Merge branch 'next' of github.com:eyaltoledano/claude-task-master into add-complexity-score-to-task
This commit is contained in:
@@ -8,6 +8,9 @@
|
||||
* It imports functionality from the modules directory and provides a CLI.
|
||||
*/
|
||||
|
||||
import dotenv from 'dotenv';
|
||||
dotenv.config();
|
||||
|
||||
// Add at the very beginning of the file
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
|
||||
|
||||
464
scripts/init.js
464
scripts/init.js
@@ -15,7 +15,6 @@
|
||||
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import readline from 'readline';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
@@ -24,6 +23,8 @@ import figlet from 'figlet';
|
||||
import boxen from 'boxen';
|
||||
import gradient from 'gradient-string';
|
||||
import { isSilentMode } from './modules/utils.js';
|
||||
import { convertAllCursorRulesToRooRules } from './modules/rule-transformer.js';
|
||||
import { execSync } from 'child_process';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
@@ -179,9 +180,6 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
|
||||
// Map template names to their actual source paths
|
||||
switch (templateName) {
|
||||
case 'dev.js':
|
||||
sourcePath = path.join(__dirname, 'dev.js');
|
||||
break;
|
||||
case 'scripts_README.md':
|
||||
sourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md');
|
||||
break;
|
||||
@@ -227,6 +225,27 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
case 'windsurfrules':
|
||||
sourcePath = path.join(__dirname, '..', 'assets', '.windsurfrules');
|
||||
break;
|
||||
case '.roomodes':
|
||||
sourcePath = path.join(__dirname, '..', 'assets', 'roocode', '.roomodes');
|
||||
break;
|
||||
case 'architect-rules':
|
||||
case 'ask-rules':
|
||||
case 'boomerang-rules':
|
||||
case 'code-rules':
|
||||
case 'debug-rules':
|
||||
case 'test-rules':
|
||||
// Extract the mode name from the template name (e.g., 'architect' from 'architect-rules')
|
||||
const mode = templateName.split('-')[0];
|
||||
sourcePath = path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'assets',
|
||||
'roocode',
|
||||
'.roo',
|
||||
`rules-${mode}`,
|
||||
templateName
|
||||
);
|
||||
break;
|
||||
default:
|
||||
// For other files like env.example, gitignore, etc. that don't have direct equivalents
|
||||
sourcePath = path.join(__dirname, '..', 'assets', templateName);
|
||||
@@ -297,61 +316,8 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle package.json - merge dependencies
|
||||
if (filename === 'package.json') {
|
||||
log('info', `${targetPath} already exists, merging dependencies...`);
|
||||
try {
|
||||
const existingPackageJson = JSON.parse(
|
||||
fs.readFileSync(targetPath, 'utf8')
|
||||
);
|
||||
const newPackageJson = JSON.parse(content);
|
||||
|
||||
// Merge dependencies, preferring existing versions in case of conflicts
|
||||
existingPackageJson.dependencies = {
|
||||
...newPackageJson.dependencies,
|
||||
...existingPackageJson.dependencies
|
||||
};
|
||||
|
||||
// Add our scripts if they don't already exist
|
||||
existingPackageJson.scripts = {
|
||||
...existingPackageJson.scripts,
|
||||
...Object.fromEntries(
|
||||
Object.entries(newPackageJson.scripts).filter(
|
||||
([key]) => !existingPackageJson.scripts[key]
|
||||
)
|
||||
)
|
||||
};
|
||||
|
||||
// Preserve existing type if present
|
||||
if (!existingPackageJson.type && newPackageJson.type) {
|
||||
existingPackageJson.type = newPackageJson.type;
|
||||
}
|
||||
|
||||
fs.writeFileSync(
|
||||
targetPath,
|
||||
JSON.stringify(existingPackageJson, null, 2)
|
||||
);
|
||||
log(
|
||||
'success',
|
||||
`Updated ${targetPath} with required dependencies and scripts`
|
||||
);
|
||||
} catch (error) {
|
||||
log('error', `Failed to merge package.json: ${error.message}`);
|
||||
// Fallback to writing a backup of the existing file and creating a new one
|
||||
const backupPath = `${targetPath}.backup-${Date.now()}`;
|
||||
fs.copyFileSync(targetPath, backupPath);
|
||||
log('info', `Created backup of existing package.json at ${backupPath}`);
|
||||
fs.writeFileSync(targetPath, content);
|
||||
log(
|
||||
'warn',
|
||||
`Replaced ${targetPath} with new content (due to JSON parsing error)`
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle README.md - offer to preserve or create a different file
|
||||
if (filename === 'README.md') {
|
||||
if (filename === 'README-task-master.md') {
|
||||
log('info', `${targetPath} already exists`);
|
||||
// Create a separate README file specifically for this project
|
||||
const taskMasterReadmePath = path.join(
|
||||
@@ -361,16 +327,13 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
fs.writeFileSync(taskMasterReadmePath, content);
|
||||
log(
|
||||
'success',
|
||||
`Created ${taskMasterReadmePath} (preserved original README.md)`
|
||||
`Created ${taskMasterReadmePath} (preserved original README-task-master.md)`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// For other files, warn and prompt before overwriting
|
||||
log(
|
||||
'warn',
|
||||
`${targetPath} already exists. Skipping file creation to avoid overwriting existing content.`
|
||||
);
|
||||
log('warn', `${targetPath} already exists, skipping.`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -379,7 +342,7 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
log('info', `Created file: ${targetPath}`);
|
||||
}
|
||||
|
||||
// Main function to initialize a new project (Now relies solely on passed options)
|
||||
// Main function to initialize a new project (No longer needs isInteractive logic)
|
||||
async function initializeProject(options = {}) {
|
||||
// Receives options as argument
|
||||
// Only display banner if not in silent mode
|
||||
@@ -392,12 +355,11 @@ async function initializeProject(options = {}) {
|
||||
console.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED =====');
|
||||
console.log('Full options object:', JSON.stringify(options));
|
||||
console.log('options.yes:', options.yes);
|
||||
console.log('options.name:', options.name);
|
||||
console.log('==================================================');
|
||||
}
|
||||
|
||||
// Determine if we should skip prompts based on the passed options
|
||||
const skipPrompts = options.yes || (options.name && options.description);
|
||||
|
||||
if (!isSilentMode()) {
|
||||
console.log('Skip prompts determined:', skipPrompts);
|
||||
}
|
||||
@@ -411,47 +373,26 @@ async function initializeProject(options = {}) {
|
||||
const projectName = options.name || 'task-master-project';
|
||||
const projectDescription =
|
||||
options.description || 'A project managed with Task Master AI';
|
||||
const projectVersion = options.version || '0.1.0'; // Default from commands.js or here
|
||||
const authorName = options.author || 'Vibe coder'; // Default if not provided
|
||||
const projectVersion = options.version || '0.1.0';
|
||||
const authorName = options.author || 'Vibe coder';
|
||||
const dryRun = options.dryRun || false;
|
||||
const skipInstall = options.skipInstall || false;
|
||||
const addAliases = options.aliases || false;
|
||||
|
||||
if (dryRun) {
|
||||
log('info', 'DRY RUN MODE: No files will be modified');
|
||||
log(
|
||||
'info',
|
||||
`Would initialize project: ${projectName} (${projectVersion})`
|
||||
);
|
||||
log('info', `Description: ${projectDescription}`);
|
||||
log('info', `Author: ${authorName || 'Not specified'}`);
|
||||
log('info', 'Would initialize Task Master project');
|
||||
log('info', 'Would create/update necessary project files');
|
||||
if (addAliases) {
|
||||
log('info', 'Would add shell aliases for task-master');
|
||||
}
|
||||
if (!skipInstall) {
|
||||
log('info', 'Would install dependencies');
|
||||
}
|
||||
return {
|
||||
projectName,
|
||||
projectDescription,
|
||||
projectVersion,
|
||||
authorName,
|
||||
dryRun: true
|
||||
};
|
||||
}
|
||||
|
||||
// Create structure using determined values
|
||||
createProjectStructure(
|
||||
projectName,
|
||||
projectDescription,
|
||||
projectVersion,
|
||||
authorName,
|
||||
skipInstall,
|
||||
addAliases
|
||||
);
|
||||
createProjectStructure(addAliases, dryRun);
|
||||
} else {
|
||||
// Prompting logic (only runs if skipPrompts is false)
|
||||
// Interactive logic
|
||||
log('info', 'Required options not provided, proceeding with prompts.');
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
@@ -459,41 +400,17 @@ async function initializeProject(options = {}) {
|
||||
});
|
||||
|
||||
try {
|
||||
// Prompt user for input...
|
||||
const projectName = await promptQuestion(
|
||||
rl,
|
||||
chalk.cyan('Enter project name: ')
|
||||
);
|
||||
const projectDescription = await promptQuestion(
|
||||
rl,
|
||||
chalk.cyan('Enter project description: ')
|
||||
);
|
||||
const projectVersionInput = await promptQuestion(
|
||||
rl,
|
||||
chalk.cyan('Enter project version (default: 1.0.0): ')
|
||||
); // Use a default for prompt
|
||||
const authorName = await promptQuestion(
|
||||
rl,
|
||||
chalk.cyan('Enter your name: ')
|
||||
);
|
||||
// Only prompt for shell aliases
|
||||
const addAliasesInput = await promptQuestion(
|
||||
rl,
|
||||
chalk.cyan('Add shell aliases for task-master? (Y/n): ')
|
||||
chalk.cyan(
|
||||
'Add shell aliases for task-master? This lets you type "tm" instead of "task-master" (Y/n): '
|
||||
)
|
||||
);
|
||||
const addAliasesPrompted = addAliasesInput.trim().toLowerCase() !== 'n';
|
||||
const projectVersion = projectVersionInput.trim()
|
||||
? projectVersionInput
|
||||
: '1.0.0';
|
||||
|
||||
// Confirm settings...
|
||||
console.log('\nProject settings:');
|
||||
console.log(chalk.blue('Name:'), chalk.white(projectName));
|
||||
console.log(chalk.blue('Description:'), chalk.white(projectDescription));
|
||||
console.log(chalk.blue('Version:'), chalk.white(projectVersion));
|
||||
console.log(
|
||||
chalk.blue('Author:'),
|
||||
chalk.white(authorName || 'Not specified')
|
||||
);
|
||||
console.log('\nTask Master Project settings:');
|
||||
console.log(
|
||||
chalk.blue(
|
||||
'Add shell aliases (so you can use "tm" instead of "task-master"):'
|
||||
@@ -510,51 +427,30 @@ async function initializeProject(options = {}) {
|
||||
|
||||
if (!shouldContinue) {
|
||||
log('info', 'Project initialization cancelled by user');
|
||||
process.exit(0); // Exit if cancelled
|
||||
return; // Added return for clarity
|
||||
process.exit(0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Still respect dryRun/skipInstall if passed initially even when prompting
|
||||
const dryRun = options.dryRun || false;
|
||||
const skipInstall = options.skipInstall || false;
|
||||
|
||||
if (dryRun) {
|
||||
log('info', 'DRY RUN MODE: No files will be modified');
|
||||
log(
|
||||
'info',
|
||||
`Would initialize project: ${projectName} (${projectVersion})`
|
||||
);
|
||||
log('info', `Description: ${projectDescription}`);
|
||||
log('info', `Author: ${authorName || 'Not specified'}`);
|
||||
log('info', 'Would initialize Task Master project');
|
||||
log('info', 'Would create/update necessary project files');
|
||||
if (addAliasesPrompted) {
|
||||
log('info', 'Would add shell aliases for task-master');
|
||||
}
|
||||
if (!skipInstall) {
|
||||
log('info', 'Would install dependencies');
|
||||
}
|
||||
return {
|
||||
projectName,
|
||||
projectDescription,
|
||||
projectVersion,
|
||||
authorName,
|
||||
dryRun: true
|
||||
};
|
||||
}
|
||||
|
||||
// Create structure using prompted values, respecting initial options where relevant
|
||||
createProjectStructure(
|
||||
projectName,
|
||||
projectDescription,
|
||||
projectVersion,
|
||||
authorName,
|
||||
skipInstall, // Use value from initial options
|
||||
addAliasesPrompted // Use value from prompt
|
||||
);
|
||||
// Create structure using only necessary values
|
||||
createProjectStructure(addAliasesPrompted, dryRun);
|
||||
} catch (error) {
|
||||
rl.close();
|
||||
log('error', `Error during prompting: ${error.message}`); // Use log function
|
||||
process.exit(1); // Exit on error during prompts
|
||||
log('error', `Error during initialization process: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -569,128 +465,35 @@ function promptQuestion(rl, question) {
|
||||
}
|
||||
|
||||
// Function to create the project structure
|
||||
function createProjectStructure(
|
||||
projectName,
|
||||
projectDescription,
|
||||
projectVersion,
|
||||
authorName,
|
||||
skipInstall,
|
||||
addAliases
|
||||
) {
|
||||
function createProjectStructure(addAliases, dryRun) {
|
||||
const targetDir = process.cwd();
|
||||
log('info', `Initializing project in ${targetDir}`);
|
||||
|
||||
// Create directories
|
||||
ensureDirectoryExists(path.join(targetDir, '.cursor', 'rules'));
|
||||
|
||||
// Create Roo directories
|
||||
ensureDirectoryExists(path.join(targetDir, '.roo'));
|
||||
ensureDirectoryExists(path.join(targetDir, '.roo', 'rules'));
|
||||
for (const mode of [
|
||||
'architect',
|
||||
'ask',
|
||||
'boomerang',
|
||||
'code',
|
||||
'debug',
|
||||
'test'
|
||||
]) {
|
||||
ensureDirectoryExists(path.join(targetDir, '.roo', `rules-${mode}`));
|
||||
}
|
||||
|
||||
ensureDirectoryExists(path.join(targetDir, 'scripts'));
|
||||
ensureDirectoryExists(path.join(targetDir, 'tasks'));
|
||||
|
||||
// Define our package.json content
|
||||
const packageJson = {
|
||||
name: projectName.toLowerCase().replace(/\s+/g, '-'),
|
||||
version: projectVersion,
|
||||
description: projectDescription,
|
||||
author: authorName,
|
||||
type: 'module',
|
||||
scripts: {
|
||||
dev: 'node scripts/dev.js',
|
||||
list: 'node scripts/dev.js list',
|
||||
generate: 'node scripts/dev.js generate',
|
||||
'parse-prd': 'node scripts/dev.js parse-prd'
|
||||
},
|
||||
dependencies: {
|
||||
'@anthropic-ai/sdk': '^0.39.0',
|
||||
boxen: '^8.0.1',
|
||||
chalk: '^4.1.2',
|
||||
commander: '^11.1.0',
|
||||
'cli-table3': '^0.6.5',
|
||||
cors: '^2.8.5',
|
||||
dotenv: '^16.3.1',
|
||||
express: '^4.21.2',
|
||||
fastmcp: '^1.20.5',
|
||||
figlet: '^1.8.0',
|
||||
'fuse.js': '^7.0.0',
|
||||
'gradient-string': '^3.0.0',
|
||||
helmet: '^8.1.0',
|
||||
inquirer: '^12.5.0',
|
||||
jsonwebtoken: '^9.0.2',
|
||||
'lru-cache': '^10.2.0',
|
||||
openai: '^4.89.0',
|
||||
ora: '^8.2.0'
|
||||
}
|
||||
};
|
||||
|
||||
// Check if package.json exists and merge if it does
|
||||
const packageJsonPath = path.join(targetDir, 'package.json');
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
log('info', 'package.json already exists, merging content...');
|
||||
try {
|
||||
const existingPackageJson = JSON.parse(
|
||||
fs.readFileSync(packageJsonPath, 'utf8')
|
||||
);
|
||||
|
||||
// Preserve existing fields but add our required ones
|
||||
const mergedPackageJson = {
|
||||
...existingPackageJson,
|
||||
scripts: {
|
||||
...existingPackageJson.scripts,
|
||||
...Object.fromEntries(
|
||||
Object.entries(packageJson.scripts).filter(
|
||||
([key]) =>
|
||||
!existingPackageJson.scripts ||
|
||||
!existingPackageJson.scripts[key]
|
||||
)
|
||||
)
|
||||
},
|
||||
dependencies: {
|
||||
...(existingPackageJson.dependencies || {}),
|
||||
...Object.fromEntries(
|
||||
Object.entries(packageJson.dependencies).filter(
|
||||
([key]) =>
|
||||
!existingPackageJson.dependencies ||
|
||||
!existingPackageJson.dependencies[key]
|
||||
)
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
// Ensure type is set if not already present
|
||||
if (!mergedPackageJson.type && packageJson.type) {
|
||||
mergedPackageJson.type = packageJson.type;
|
||||
}
|
||||
|
||||
fs.writeFileSync(
|
||||
packageJsonPath,
|
||||
JSON.stringify(mergedPackageJson, null, 2)
|
||||
);
|
||||
log('success', 'Updated package.json with required fields');
|
||||
} catch (error) {
|
||||
log('error', `Failed to merge package.json: ${error.message}`);
|
||||
// Create a backup before potentially modifying
|
||||
const backupPath = `${packageJsonPath}.backup-${Date.now()}`;
|
||||
fs.copyFileSync(packageJsonPath, backupPath);
|
||||
log('info', `Created backup of existing package.json at ${backupPath}`);
|
||||
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
||||
log(
|
||||
'warn',
|
||||
'Created new package.json (backup of original file was created)'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// If package.json doesn't exist, create it
|
||||
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
||||
log('success', 'Created package.json');
|
||||
}
|
||||
|
||||
// Setup MCP configuration for integration with Cursor
|
||||
setupMCPConfiguration(targetDir, packageJson.name);
|
||||
setupMCPConfiguration(targetDir);
|
||||
|
||||
// Copy template files with replacements
|
||||
const replacements = {
|
||||
projectName,
|
||||
projectDescription,
|
||||
projectVersion,
|
||||
authorName,
|
||||
year: new Date().getFullYear()
|
||||
};
|
||||
|
||||
@@ -701,6 +504,15 @@ function createProjectStructure(
|
||||
replacements
|
||||
);
|
||||
|
||||
// Copy .taskmasterconfig with project name
|
||||
copyTemplateFile(
|
||||
'.taskmasterconfig',
|
||||
path.join(targetDir, '.taskmasterconfig'),
|
||||
{
|
||||
...replacements
|
||||
}
|
||||
);
|
||||
|
||||
// Copy .gitignore
|
||||
copyTemplateFile('gitignore', path.join(targetDir, '.gitignore'));
|
||||
|
||||
@@ -728,17 +540,24 @@ function createProjectStructure(
|
||||
path.join(targetDir, '.cursor', 'rules', 'self_improve.mdc')
|
||||
);
|
||||
|
||||
// Generate Roo rules from Cursor rules
|
||||
log('info', 'Generating Roo rules from Cursor rules...');
|
||||
convertAllCursorRulesToRooRules(targetDir);
|
||||
|
||||
// Copy .windsurfrules
|
||||
copyTemplateFile('windsurfrules', path.join(targetDir, '.windsurfrules'));
|
||||
|
||||
// Copy scripts/dev.js
|
||||
copyTemplateFile('dev.js', path.join(targetDir, 'scripts', 'dev.js'));
|
||||
// Copy .roomodes for Roo Code integration
|
||||
copyTemplateFile('.roomodes', path.join(targetDir, '.roomodes'));
|
||||
|
||||
// Copy scripts/README.md
|
||||
copyTemplateFile(
|
||||
'scripts_README.md',
|
||||
path.join(targetDir, 'scripts', 'README.md')
|
||||
);
|
||||
// Copy Roo rule files for each mode
|
||||
const rooModes = ['architect', 'ask', 'boomerang', 'code', 'debug', 'test'];
|
||||
for (const mode of rooModes) {
|
||||
copyTemplateFile(
|
||||
`${mode}-rules`,
|
||||
path.join(targetDir, '.roo', `rules-${mode}`, `${mode}-rules`)
|
||||
);
|
||||
}
|
||||
|
||||
// Copy example_prd.txt
|
||||
copyTemplateFile(
|
||||
@@ -749,7 +568,7 @@ function createProjectStructure(
|
||||
// Create main README.md
|
||||
copyTemplateFile(
|
||||
'README-task-master.md',
|
||||
path.join(targetDir, 'README.md'),
|
||||
path.join(targetDir, 'README-task-master.md'),
|
||||
replacements
|
||||
);
|
||||
|
||||
@@ -765,7 +584,18 @@ function createProjectStructure(
|
||||
}
|
||||
|
||||
// Run npm install automatically
|
||||
if (!isSilentMode()) {
|
||||
const npmInstallOptions = {
|
||||
cwd: targetDir,
|
||||
// Default to inherit for interactive CLI, change if silent
|
||||
stdio: 'inherit'
|
||||
};
|
||||
|
||||
if (isSilentMode()) {
|
||||
// If silent (MCP mode), suppress npm install output
|
||||
npmInstallOptions.stdio = 'ignore';
|
||||
log('info', 'Running npm install silently...'); // Log our own message
|
||||
} else {
|
||||
// Interactive mode, show the boxen message
|
||||
console.log(
|
||||
boxen(chalk.cyan('Installing dependencies...'), {
|
||||
padding: 0.5,
|
||||
@@ -776,17 +606,40 @@ function createProjectStructure(
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
if (!skipInstall) {
|
||||
execSync('npm install', { stdio: 'inherit', cwd: targetDir });
|
||||
log('success', 'Dependencies installed successfully!');
|
||||
} else {
|
||||
log('info', 'Dependencies installation skipped');
|
||||
// === Add Model Configuration Step ===
|
||||
if (!isSilentMode() && !dryRun) {
|
||||
console.log(
|
||||
boxen(chalk.cyan('Configuring AI Models...'), {
|
||||
padding: 0.5,
|
||||
margin: { top: 1, bottom: 0.5 },
|
||||
borderStyle: 'round',
|
||||
borderColor: 'blue'
|
||||
})
|
||||
);
|
||||
log(
|
||||
'info',
|
||||
'Running interactive model setup. Please select your preferred AI models.'
|
||||
);
|
||||
try {
|
||||
execSync('npx task-master models --setup', {
|
||||
stdio: 'inherit',
|
||||
cwd: targetDir
|
||||
});
|
||||
log('success', 'AI Models configured.');
|
||||
} catch (error) {
|
||||
log('error', 'Failed to configure AI models:', error.message);
|
||||
log('warn', 'You may need to run "task-master models --setup" manually.');
|
||||
}
|
||||
} catch (error) {
|
||||
log('error', 'Failed to install dependencies:', error.message);
|
||||
log('error', 'Please run npm install manually');
|
||||
} else if (isSilentMode() && !dryRun) {
|
||||
log('info', 'Skipping interactive model setup in silent (MCP) mode.');
|
||||
log(
|
||||
'warn',
|
||||
'Please configure AI models using "task-master models --set-..." or the "models" MCP tool.'
|
||||
);
|
||||
} else if (dryRun) {
|
||||
log('info', 'DRY RUN: Skipping interactive model setup.');
|
||||
}
|
||||
// ====================================
|
||||
|
||||
// Display success message
|
||||
if (!isSilentMode()) {
|
||||
@@ -807,52 +660,63 @@ function createProjectStructure(
|
||||
);
|
||||
}
|
||||
|
||||
// Add shell aliases if requested
|
||||
if (addAliases) {
|
||||
addShellAliases();
|
||||
}
|
||||
|
||||
// Display next steps in a nice box
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.cyan.bold('Things you can now do:') +
|
||||
chalk.cyan.bold('Things you should do next:') +
|
||||
'\n\n' +
|
||||
chalk.white('1. ') +
|
||||
chalk.yellow(
|
||||
'Rename .env.example to .env and add your ANTHROPIC_API_KEY and PERPLEXITY_API_KEY'
|
||||
'Configure AI models (if needed) and add API keys to `.env`'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(' ├─ ') +
|
||||
chalk.dim('Models: Use `task-master models` commands') +
|
||||
'\n' +
|
||||
chalk.white(' └─ ') +
|
||||
chalk.dim(
|
||||
'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('2. ') +
|
||||
chalk.yellow(
|
||||
'Discuss your idea with AI, and once ready ask for a PRD using the example_prd.txt file, and save what you get to scripts/PRD.txt'
|
||||
'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('3. ') +
|
||||
chalk.yellow(
|
||||
'Ask Cursor Agent to parse your PRD.txt and generate tasks'
|
||||
'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(' └─ ') +
|
||||
chalk.dim('You can also run ') +
|
||||
chalk.cyan('task-master parse-prd <your-prd-file.txt>') +
|
||||
chalk.dim('MCP Tool: ') +
|
||||
chalk.cyan('parse_prd') +
|
||||
chalk.dim(' | CLI: ') +
|
||||
chalk.cyan('task-master parse-prd scripts/prd.txt') +
|
||||
'\n' +
|
||||
chalk.white('4. ') +
|
||||
chalk.yellow('Ask Cursor to analyze the complexity of your tasks') +
|
||||
chalk.yellow(
|
||||
'Ask Cursor to analyze the complexity of the tasks in your PRD using research'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(' └─ ') +
|
||||
chalk.dim('MCP Tool: ') +
|
||||
chalk.cyan('analyze_project_complexity') +
|
||||
chalk.dim(' | CLI: ') +
|
||||
chalk.cyan('task-master analyze-complexity') +
|
||||
'\n' +
|
||||
chalk.white('5. ') +
|
||||
chalk.yellow(
|
||||
'Ask Cursor which task is next to determine where to start'
|
||||
'Ask Cursor to expand all of your tasks using the complexity analysis'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('6. ') +
|
||||
chalk.yellow(
|
||||
'Ask Cursor to expand any complex tasks that are too large or complex.'
|
||||
) +
|
||||
chalk.yellow('Ask Cursor to begin working on the next task') +
|
||||
'\n' +
|
||||
chalk.white('7. ') +
|
||||
chalk.yellow(
|
||||
'Ask Cursor to set the status of a task, or multiple tasks. Use the task id from the task lists.'
|
||||
'Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists.'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('8. ') +
|
||||
@@ -865,6 +729,10 @@ function createProjectStructure(
|
||||
'\n\n' +
|
||||
chalk.dim(
|
||||
'* Review the README.md file to learn how to use other commands via Cursor Agent.'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.dim(
|
||||
'* Use the task-master command without arguments to see all available commands.'
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
@@ -880,7 +748,7 @@ function createProjectStructure(
|
||||
}
|
||||
|
||||
// Function to setup MCP configuration for Cursor integration
|
||||
function setupMCPConfiguration(targetDir, projectName) {
|
||||
function setupMCPConfiguration(targetDir) {
|
||||
const mcpDirPath = path.join(targetDir, '.cursor');
|
||||
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
|
||||
|
||||
@@ -899,9 +767,9 @@ function setupMCPConfiguration(targetDir, projectName) {
|
||||
PERPLEXITY_API_KEY: 'YOUR_PERPLEXITY_API_KEY',
|
||||
MODEL: 'claude-3-7-sonnet-20250219',
|
||||
PERPLEXITY_MODEL: 'sonar-pro',
|
||||
MAX_TOKENS: 64000,
|
||||
TEMPERATURE: 0.2,
|
||||
DEFAULT_SUBTASKS: 5,
|
||||
MAX_TOKENS: '64000',
|
||||
TEMPERATURE: '0.2',
|
||||
DEFAULT_SUBTASKS: '5',
|
||||
DEFAULT_PRIORITY: 'medium'
|
||||
}
|
||||
}
|
||||
|
||||
494
scripts/modules/ai-services-unified.js
Normal file
494
scripts/modules/ai-services-unified.js
Normal file
@@ -0,0 +1,494 @@
|
||||
/**
|
||||
* ai-services-unified.js
|
||||
* Centralized AI service layer using provider modules and config-manager.
|
||||
*/
|
||||
|
||||
// Vercel AI SDK functions are NOT called directly anymore.
|
||||
// import { generateText, streamText, generateObject } from 'ai';
|
||||
|
||||
// --- Core Dependencies ---
|
||||
import {
|
||||
getMainProvider,
|
||||
getMainModelId,
|
||||
getResearchProvider,
|
||||
getResearchModelId,
|
||||
getFallbackProvider,
|
||||
getFallbackModelId,
|
||||
getParametersForRole
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable } from './utils.js';
|
||||
|
||||
import * as anthropic from '../../src/ai-providers/anthropic.js';
|
||||
import * as perplexity from '../../src/ai-providers/perplexity.js';
|
||||
import * as google from '../../src/ai-providers/google.js';
|
||||
import * as openai from '../../src/ai-providers/openai.js';
|
||||
import * as xai from '../../src/ai-providers/xai.js';
|
||||
import * as openrouter from '../../src/ai-providers/openrouter.js';
|
||||
// TODO: Import other provider modules when implemented (ollama, etc.)
|
||||
|
||||
// --- Provider Function Map ---
|
||||
// Maps provider names (lowercase) to their respective service functions
|
||||
const PROVIDER_FUNCTIONS = {
|
||||
anthropic: {
|
||||
generateText: anthropic.generateAnthropicText,
|
||||
streamText: anthropic.streamAnthropicText,
|
||||
generateObject: anthropic.generateAnthropicObject
|
||||
},
|
||||
perplexity: {
|
||||
generateText: perplexity.generatePerplexityText,
|
||||
streamText: perplexity.streamPerplexityText,
|
||||
generateObject: perplexity.generatePerplexityObject
|
||||
},
|
||||
google: {
|
||||
// Add Google entry
|
||||
generateText: google.generateGoogleText,
|
||||
streamText: google.streamGoogleText,
|
||||
generateObject: google.generateGoogleObject
|
||||
},
|
||||
openai: {
|
||||
// ADD: OpenAI entry
|
||||
generateText: openai.generateOpenAIText,
|
||||
streamText: openai.streamOpenAIText,
|
||||
generateObject: openai.generateOpenAIObject
|
||||
},
|
||||
xai: {
|
||||
// ADD: xAI entry
|
||||
generateText: xai.generateXaiText,
|
||||
streamText: xai.streamXaiText,
|
||||
generateObject: xai.generateXaiObject // Note: Object generation might be unsupported
|
||||
},
|
||||
openrouter: {
|
||||
// ADD: OpenRouter entry
|
||||
generateText: openrouter.generateOpenRouterText,
|
||||
streamText: openrouter.streamOpenRouterText,
|
||||
generateObject: openrouter.generateOpenRouterObject
|
||||
}
|
||||
// TODO: Add entries for ollama, etc. when implemented
|
||||
};
|
||||
|
||||
// --- Configuration for Retries ---
|
||||
const MAX_RETRIES = 2;
|
||||
const INITIAL_RETRY_DELAY_MS = 1000;
|
||||
|
||||
// Helper function to check if an error is retryable
|
||||
function isRetryableError(error) {
|
||||
const errorMessage = error.message?.toLowerCase() || '';
|
||||
return (
|
||||
errorMessage.includes('rate limit') ||
|
||||
errorMessage.includes('overloaded') ||
|
||||
errorMessage.includes('service temporarily unavailable') ||
|
||||
errorMessage.includes('timeout') ||
|
||||
errorMessage.includes('network error') ||
|
||||
error.status === 429 ||
|
||||
error.status >= 500
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts a user-friendly error message from a potentially complex AI error object.
|
||||
* Prioritizes nested messages and falls back to the top-level message.
|
||||
* @param {Error | object | any} error - The error object.
|
||||
* @returns {string} A concise error message.
|
||||
*/
|
||||
function _extractErrorMessage(error) {
|
||||
try {
|
||||
// Attempt 1: Look for Vercel SDK specific nested structure (common)
|
||||
if (error?.data?.error?.message) {
|
||||
return error.data.error.message;
|
||||
}
|
||||
|
||||
// Attempt 2: Look for nested error message directly in the error object
|
||||
if (error?.error?.message) {
|
||||
return error.error.message;
|
||||
}
|
||||
|
||||
// Attempt 3: Look for nested error message in response body if it's JSON string
|
||||
if (typeof error?.responseBody === 'string') {
|
||||
try {
|
||||
const body = JSON.parse(error.responseBody);
|
||||
if (body?.error?.message) {
|
||||
return body.error.message;
|
||||
}
|
||||
} catch (parseError) {
|
||||
// Ignore if responseBody is not valid JSON
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt 4: Use the top-level message if it exists
|
||||
if (typeof error?.message === 'string' && error.message) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
// Attempt 5: Handle simple string errors
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
|
||||
// Fallback
|
||||
return 'An unknown AI service error occurred.';
|
||||
} catch (e) {
|
||||
// Safety net
|
||||
return 'Failed to extract error message.';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to resolve the API key for a given provider.
|
||||
* @param {string} providerName - The name of the provider (lowercase).
|
||||
* @param {object|null} session - Optional MCP session object.
|
||||
* @returns {string|null} The API key or null if not found/needed.
|
||||
* @throws {Error} If a required API key is missing.
|
||||
*/
|
||||
function _resolveApiKey(providerName, session) {
|
||||
const keyMap = {
|
||||
openai: 'OPENAI_API_KEY',
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
google: 'GOOGLE_API_KEY',
|
||||
perplexity: 'PERPLEXITY_API_KEY',
|
||||
mistral: 'MISTRAL_API_KEY',
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY'
|
||||
};
|
||||
|
||||
// Double check this -- I have had to use an api key for ollama in the past
|
||||
// if (providerName === 'ollama') {
|
||||
// return null; // Ollama typically doesn't require an API key for basic setup
|
||||
// }
|
||||
|
||||
const envVarName = keyMap[providerName];
|
||||
if (!envVarName) {
|
||||
throw new Error(
|
||||
`Unknown provider '${providerName}' for API key resolution.`
|
||||
);
|
||||
}
|
||||
|
||||
const apiKey = resolveEnvVariable(envVarName, session);
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
`Required API key ${envVarName} for provider '${providerName}' is not set in environment or session.`
|
||||
);
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to attempt a provider-specific AI API call with retries.
|
||||
*
|
||||
* @param {function} providerApiFn - The specific provider function to call (e.g., generateAnthropicText).
|
||||
* @param {object} callParams - Parameters object for the provider function.
|
||||
* @param {string} providerName - Name of the provider (for logging).
|
||||
* @param {string} modelId - Specific model ID (for logging).
|
||||
* @param {string} attemptRole - The role being attempted (for logging).
|
||||
* @returns {Promise<object>} The result from the successful API call.
|
||||
* @throws {Error} If the call fails after all retries.
|
||||
*/
|
||||
async function _attemptProviderCallWithRetries(
|
||||
providerApiFn,
|
||||
callParams,
|
||||
providerName,
|
||||
modelId,
|
||||
attemptRole
|
||||
) {
|
||||
let retries = 0;
|
||||
const fnName = providerApiFn.name;
|
||||
|
||||
while (retries <= MAX_RETRIES) {
|
||||
try {
|
||||
log(
|
||||
'info',
|
||||
`Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${fnName} (Provider: ${providerName}, Model: ${modelId}, Role: ${attemptRole})`
|
||||
);
|
||||
|
||||
// Call the specific provider function directly
|
||||
const result = await providerApiFn(callParams);
|
||||
|
||||
log(
|
||||
'info',
|
||||
`${fnName} succeeded for role ${attemptRole} (Provider: ${providerName}) on attempt ${retries + 1}`
|
||||
);
|
||||
return result;
|
||||
} catch (error) {
|
||||
log(
|
||||
'warn',
|
||||
`Attempt ${retries + 1} failed for role ${attemptRole} (${fnName} / ${providerName}): ${error.message}`
|
||||
);
|
||||
|
||||
if (isRetryableError(error) && retries < MAX_RETRIES) {
|
||||
retries++;
|
||||
const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, retries - 1);
|
||||
log(
|
||||
'info',
|
||||
`Retryable error detected. Retrying in ${delay / 1000}s...`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
} else {
|
||||
log(
|
||||
'error',
|
||||
`Non-retryable error or max retries reached for role ${attemptRole} (${fnName} / ${providerName}).`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Should not be reached due to throw in the else block
|
||||
throw new Error(
|
||||
`Exhausted all retries for role ${attemptRole} (${fnName} / ${providerName})`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Base logic for unified service functions.
|
||||
* @param {string} serviceType - Type of service ('generateText', 'streamText', 'generateObject').
|
||||
* @param {object} params - Original parameters passed to the service function.
|
||||
* @returns {Promise<any>} Result from the underlying provider call.
|
||||
*/
|
||||
async function _unifiedServiceRunner(serviceType, params) {
|
||||
const {
|
||||
role: initialRole,
|
||||
session,
|
||||
systemPrompt,
|
||||
prompt,
|
||||
schema,
|
||||
objectName,
|
||||
...restApiParams
|
||||
} = params;
|
||||
log('info', `${serviceType}Service called`, { role: initialRole });
|
||||
|
||||
let sequence;
|
||||
if (initialRole === 'main') {
|
||||
sequence = ['main', 'fallback', 'research'];
|
||||
} else if (initialRole === 'fallback') {
|
||||
sequence = ['fallback', 'research'];
|
||||
} else if (initialRole === 'research') {
|
||||
sequence = ['research', 'fallback'];
|
||||
} else {
|
||||
log(
|
||||
'warn',
|
||||
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
|
||||
);
|
||||
sequence = ['main', 'fallback', 'research'];
|
||||
}
|
||||
|
||||
let lastError = null;
|
||||
let lastCleanErrorMessage =
|
||||
'AI service call failed for all configured roles.';
|
||||
|
||||
for (const currentRole of sequence) {
|
||||
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
||||
|
||||
try {
|
||||
log('info', `New AI service call with role: ${currentRole}`);
|
||||
|
||||
// 1. Get Config: Provider, Model, Parameters for the current role
|
||||
// Call individual getters based on the current role
|
||||
if (currentRole === 'main') {
|
||||
providerName = getMainProvider();
|
||||
modelId = getMainModelId();
|
||||
} else if (currentRole === 'research') {
|
||||
providerName = getResearchProvider();
|
||||
modelId = getResearchModelId();
|
||||
} else if (currentRole === 'fallback') {
|
||||
providerName = getFallbackProvider();
|
||||
modelId = getFallbackModelId();
|
||||
} else {
|
||||
log(
|
||||
'error',
|
||||
`Unknown role encountered in _unifiedServiceRunner: ${currentRole}`
|
||||
);
|
||||
lastError =
|
||||
lastError || new Error(`Unknown AI role specified: ${currentRole}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!providerName || !modelId) {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}': Provider or Model ID not configured.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(
|
||||
`Configuration missing for role '${currentRole}'. Provider: ${providerName}, Model: ${modelId}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
roleParams = getParametersForRole(currentRole);
|
||||
|
||||
// 2. Get Provider Function Set
|
||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||
if (!providerFnSet) {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}': Provider '${providerName}' not supported or map entry missing.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(`Unsupported provider configured: ${providerName}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the original service type to get the function
|
||||
providerApiFn = providerFnSet[serviceType];
|
||||
if (typeof providerApiFn !== 'function') {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}': Service type '${serviceType}' not implemented for provider '${providerName}'.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(
|
||||
`Service '${serviceType}' not implemented for provider ${providerName}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 3. Resolve API Key (will throw if required and missing)
|
||||
apiKey = _resolveApiKey(providerName?.toLowerCase(), session);
|
||||
|
||||
// 4. Construct Messages Array
|
||||
const messages = [];
|
||||
if (systemPrompt) {
|
||||
messages.push({ role: 'system', content: systemPrompt });
|
||||
}
|
||||
|
||||
// IN THE FUTURE WHEN DOING CONTEXT IMPROVEMENTS
|
||||
// {
|
||||
// type: 'text',
|
||||
// text: 'Large cached context here like a tasks json',
|
||||
// providerOptions: {
|
||||
// anthropic: { cacheControl: { type: 'ephemeral' } }
|
||||
// }
|
||||
// }
|
||||
|
||||
// Example
|
||||
// if (params.context) { // context is a json string of a tasks object or some other stu
|
||||
// messages.push({
|
||||
// type: 'text',
|
||||
// text: params.context,
|
||||
// providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' } } }
|
||||
// });
|
||||
// }
|
||||
|
||||
if (prompt) {
|
||||
// Ensure prompt exists before adding
|
||||
messages.push({ role: 'user', content: prompt });
|
||||
} else {
|
||||
// Throw an error if the prompt is missing, as it's essential
|
||||
throw new Error('User prompt content is missing.');
|
||||
}
|
||||
|
||||
// 5. Prepare call parameters (using messages array)
|
||||
const callParams = {
|
||||
apiKey,
|
||||
modelId,
|
||||
maxTokens: roleParams.maxTokens,
|
||||
temperature: roleParams.temperature,
|
||||
messages,
|
||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||
...restApiParams
|
||||
};
|
||||
|
||||
// 6. Attempt the call with retries
|
||||
const result = await _attemptProviderCallWithRetries(
|
||||
providerApiFn,
|
||||
callParams,
|
||||
providerName,
|
||||
modelId,
|
||||
currentRole
|
||||
);
|
||||
|
||||
log('info', `${serviceType}Service succeeded using role: ${currentRole}`);
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
const cleanMessage = _extractErrorMessage(error);
|
||||
log(
|
||||
'error',
|
||||
`Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}, Model: ${modelId || 'unknown'}): ${cleanMessage}`
|
||||
);
|
||||
lastError = error;
|
||||
lastCleanErrorMessage = cleanMessage;
|
||||
|
||||
if (serviceType === 'generateObject') {
|
||||
const lowerCaseMessage = cleanMessage.toLowerCase();
|
||||
if (
|
||||
lowerCaseMessage.includes(
|
||||
'no endpoints found that support tool use'
|
||||
) ||
|
||||
lowerCaseMessage.includes('does not support tool_use') ||
|
||||
lowerCaseMessage.includes('tool use is not supported') ||
|
||||
lowerCaseMessage.includes('tools are not supported') ||
|
||||
lowerCaseMessage.includes('function calling is not supported')
|
||||
) {
|
||||
const specificErrorMsg = `Model '${modelId || 'unknown'}' via provider '${providerName || 'unknown'}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${currentRole}' role, or use generateTextService if structured output is not strictly required.`;
|
||||
log('error', `[Tool Support Error] ${specificErrorMsg}`);
|
||||
throw new Error(specificErrorMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If loop completes, all roles failed
|
||||
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
|
||||
// Throw a new error with the cleaner message from the last failure
|
||||
throw new Error(lastCleanErrorMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unified service function for generating text.
|
||||
* Handles client retrieval, retries, and fallback sequence.
|
||||
*
|
||||
* @param {object} params - Parameters for the service call.
|
||||
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||
* @param {object} [params.session=null] - Optional MCP session object.
|
||||
* @param {string} params.prompt - The prompt for the AI.
|
||||
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||
* // Other specific generateText params can be included here.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
*/
|
||||
async function generateTextService(params) {
|
||||
return _unifiedServiceRunner('generateText', params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unified service function for streaming text.
|
||||
* Handles client retrieval, retries, and fallback sequence.
|
||||
*
|
||||
* @param {object} params - Parameters for the service call.
|
||||
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||
* @param {object} [params.session=null] - Optional MCP session object.
|
||||
* @param {string} params.prompt - The prompt for the AI.
|
||||
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||
* // Other specific streamText params can be included here.
|
||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||
*/
|
||||
async function streamTextService(params) {
|
||||
return _unifiedServiceRunner('streamText', params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unified service function for generating structured objects.
|
||||
* Handles client retrieval, retries, and fallback sequence.
|
||||
*
|
||||
* @param {object} params - Parameters for the service call.
|
||||
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||
* @param {object} [params.session=null] - Optional MCP session object.
|
||||
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.
|
||||
* @param {string} params.prompt - The prompt for the AI.
|
||||
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||
* @param {string} [params.objectName='generated_object'] - Name for object/tool.
|
||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
*/
|
||||
async function generateObjectService(params) {
|
||||
const defaults = {
|
||||
objectName: 'generated_object',
|
||||
maxRetries: 3
|
||||
};
|
||||
const combinedParams = { ...defaults, ...params };
|
||||
return _unifiedServiceRunner('generateObject', combinedParams);
|
||||
}
|
||||
|
||||
export { generateTextService, streamTextService, generateObjectService };
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
716
scripts/modules/config-manager.js
Normal file
716
scripts/modules/config-manager.js
Normal file
@@ -0,0 +1,716 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||
|
||||
// Calculate __dirname in ESM
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// Load supported models from JSON file using the calculated __dirname
|
||||
let MODEL_MAP;
|
||||
try {
|
||||
const supportedModelsRaw = fs.readFileSync(
|
||||
path.join(__dirname, 'supported-models.json'),
|
||||
'utf-8'
|
||||
);
|
||||
MODEL_MAP = JSON.parse(supportedModelsRaw);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'FATAL ERROR: Could not load supported-models.json. Please ensure the file exists and is valid JSON.'
|
||||
),
|
||||
error
|
||||
);
|
||||
MODEL_MAP = {}; // Default to empty map on error to avoid crashing, though functionality will be limited
|
||||
process.exit(1); // Exit if models can't be loaded
|
||||
}
|
||||
|
||||
const CONFIG_FILE_NAME = '.taskmasterconfig';
|
||||
|
||||
// Define valid providers dynamically from the loaded MODEL_MAP
|
||||
const VALID_PROVIDERS = Object.keys(MODEL_MAP || {});
|
||||
|
||||
// Default configuration values (used if .taskmasterconfig is missing or incomplete)
|
||||
const DEFAULTS = {
|
||||
models: {
|
||||
main: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
},
|
||||
research: {
|
||||
provider: 'perplexity',
|
||||
modelId: 'sonar-pro',
|
||||
maxTokens: 8700,
|
||||
temperature: 0.1
|
||||
},
|
||||
fallback: {
|
||||
// No default fallback provider/model initially
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-5-sonnet',
|
||||
maxTokens: 64000, // Default parameters if fallback IS configured
|
||||
temperature: 0.2
|
||||
}
|
||||
},
|
||||
global: {
|
||||
logLevel: 'info',
|
||||
debug: false,
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
ollamaBaseUrl: 'http://localhost:11434/api'
|
||||
}
|
||||
};
|
||||
|
||||
// --- Internal Config Loading ---
|
||||
let loadedConfig = null;
|
||||
let loadedConfigRoot = null; // Track which root loaded the config
|
||||
|
||||
// Custom Error for configuration issues
|
||||
class ConfigurationError extends Error {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = 'ConfigurationError';
|
||||
}
|
||||
}
|
||||
|
||||
function _loadAndValidateConfig(explicitRoot = null) {
|
||||
const defaults = DEFAULTS; // Use the defined defaults
|
||||
let rootToUse = explicitRoot;
|
||||
let configSource = explicitRoot
|
||||
? `explicit root (${explicitRoot})`
|
||||
: 'defaults (no root provided yet)';
|
||||
|
||||
// ---> If no explicit root, TRY to find it <---
|
||||
if (!rootToUse) {
|
||||
rootToUse = findProjectRoot();
|
||||
if (rootToUse) {
|
||||
configSource = `found root (${rootToUse})`;
|
||||
} else {
|
||||
// No root found, return defaults immediately
|
||||
return defaults;
|
||||
}
|
||||
}
|
||||
// ---> End find project root logic <---
|
||||
|
||||
// --- Proceed with loading from the determined rootToUse ---
|
||||
const configPath = path.join(rootToUse, CONFIG_FILE_NAME);
|
||||
let config = { ...defaults }; // Start with a deep copy of defaults
|
||||
let configExists = false;
|
||||
|
||||
if (fs.existsSync(configPath)) {
|
||||
configExists = true;
|
||||
try {
|
||||
const rawData = fs.readFileSync(configPath, 'utf-8');
|
||||
const parsedConfig = JSON.parse(rawData);
|
||||
|
||||
// Deep merge parsed config onto defaults
|
||||
config = {
|
||||
models: {
|
||||
main: { ...defaults.models.main, ...parsedConfig?.models?.main },
|
||||
research: {
|
||||
...defaults.models.research,
|
||||
...parsedConfig?.models?.research
|
||||
},
|
||||
fallback:
|
||||
parsedConfig?.models?.fallback?.provider &&
|
||||
parsedConfig?.models?.fallback?.modelId
|
||||
? { ...defaults.models.fallback, ...parsedConfig.models.fallback }
|
||||
: { ...defaults.models.fallback }
|
||||
},
|
||||
global: { ...defaults.global, ...parsedConfig?.global }
|
||||
};
|
||||
configSource = `file (${configPath})`; // Update source info
|
||||
|
||||
// --- Validation (Warn if file content is invalid) ---
|
||||
// Use log.warn for consistency
|
||||
if (!validateProvider(config.models.main.provider)) {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: Invalid main provider "${config.models.main.provider}" in ${configPath}. Falling back to default.`
|
||||
)
|
||||
);
|
||||
config.models.main = { ...defaults.models.main };
|
||||
}
|
||||
if (!validateProvider(config.models.research.provider)) {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: Invalid research provider "${config.models.research.provider}" in ${configPath}. Falling back to default.`
|
||||
)
|
||||
);
|
||||
config.models.research = { ...defaults.models.research };
|
||||
}
|
||||
if (
|
||||
config.models.fallback?.provider &&
|
||||
!validateProvider(config.models.fallback.provider)
|
||||
) {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${configPath}. Fallback model configuration will be ignored.`
|
||||
)
|
||||
);
|
||||
config.models.fallback.provider = undefined;
|
||||
config.models.fallback.modelId = undefined;
|
||||
}
|
||||
} catch (error) {
|
||||
// Use console.error for actual errors during parsing
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error reading or parsing ${configPath}: ${error.message}. Using default configuration.`
|
||||
)
|
||||
);
|
||||
config = { ...defaults }; // Reset to defaults on parse error
|
||||
configSource = `defaults (parse error at ${configPath})`;
|
||||
}
|
||||
} else {
|
||||
// Config file doesn't exist at the determined rootToUse.
|
||||
if (explicitRoot) {
|
||||
// Only warn if an explicit root was *expected*.
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: ${CONFIG_FILE_NAME} not found at provided project root (${explicitRoot}). Using default configuration. Run 'task-master models --setup' to configure.`
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: ${CONFIG_FILE_NAME} not found at derived root (${rootToUse}). Using defaults.`
|
||||
)
|
||||
);
|
||||
}
|
||||
// Keep config as defaults
|
||||
config = { ...defaults };
|
||||
configSource = `defaults (file not found at ${configPath})`;
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current configuration, loading it if necessary.
|
||||
* Handles MCP initialization context gracefully.
|
||||
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
||||
* @param {boolean} forceReload - Force reloading the config file.
|
||||
* @returns {object} The loaded configuration object.
|
||||
*/
|
||||
function getConfig(explicitRoot = null, forceReload = false) {
|
||||
// Determine if a reload is necessary
|
||||
const needsLoad =
|
||||
!loadedConfig ||
|
||||
forceReload ||
|
||||
(explicitRoot && explicitRoot !== loadedConfigRoot);
|
||||
|
||||
if (needsLoad) {
|
||||
const newConfig = _loadAndValidateConfig(explicitRoot); // _load handles null explicitRoot
|
||||
|
||||
// Only update the global cache if loading was forced or if an explicit root
|
||||
// was provided (meaning we attempted to load a specific project's config).
|
||||
// We avoid caching the initial default load triggered without an explicitRoot.
|
||||
if (forceReload || explicitRoot) {
|
||||
loadedConfig = newConfig;
|
||||
loadedConfigRoot = explicitRoot; // Store the root used for this loaded config
|
||||
}
|
||||
return newConfig; // Return the newly loaded/default config
|
||||
}
|
||||
|
||||
// If no load was needed, return the cached config
|
||||
return loadedConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates if a provider name is in the list of supported providers.
|
||||
* @param {string} providerName The name of the provider.
|
||||
* @returns {boolean} True if the provider is valid, false otherwise.
|
||||
*/
|
||||
function validateProvider(providerName) {
|
||||
return VALID_PROVIDERS.includes(providerName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Optional: Validates if a modelId is known for a given provider based on MODEL_MAP.
|
||||
* This is a non-strict validation; an unknown model might still be valid.
|
||||
* @param {string} providerName The name of the provider.
|
||||
* @param {string} modelId The model ID.
|
||||
* @returns {boolean} True if the modelId is in the map for the provider, false otherwise.
|
||||
*/
|
||||
function validateProviderModelCombination(providerName, modelId) {
|
||||
// If provider isn't even in our map, we can't validate the model
|
||||
if (!MODEL_MAP[providerName]) {
|
||||
return true; // Allow unknown providers or those without specific model lists
|
||||
}
|
||||
// If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any)
|
||||
return (
|
||||
MODEL_MAP[providerName].length === 0 ||
|
||||
// Use .some() to check the 'id' property of objects in the array
|
||||
MODEL_MAP[providerName].some((modelObj) => modelObj.id === modelId)
|
||||
);
|
||||
}
|
||||
|
||||
// --- Role-Specific Getters ---
|
||||
|
||||
function getModelConfigForRole(role, explicitRoot = null) {
|
||||
const config = getConfig(explicitRoot);
|
||||
const roleConfig = config?.models?.[role];
|
||||
if (!roleConfig) {
|
||||
log(
|
||||
'warn',
|
||||
`No model configuration found for role: ${role}. Returning default.`
|
||||
);
|
||||
return DEFAULTS.models[role] || {};
|
||||
}
|
||||
return roleConfig;
|
||||
}
|
||||
|
||||
function getMainProvider(explicitRoot = null) {
|
||||
return getModelConfigForRole('main', explicitRoot).provider;
|
||||
}
|
||||
|
||||
function getMainModelId(explicitRoot = null) {
|
||||
return getModelConfigForRole('main', explicitRoot).modelId;
|
||||
}
|
||||
|
||||
function getMainMaxTokens(explicitRoot = null) {
|
||||
// Directly return value from config (which includes defaults)
|
||||
return getModelConfigForRole('main', explicitRoot).maxTokens;
|
||||
}
|
||||
|
||||
function getMainTemperature(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getModelConfigForRole('main', explicitRoot).temperature;
|
||||
}
|
||||
|
||||
function getResearchProvider(explicitRoot = null) {
|
||||
return getModelConfigForRole('research', explicitRoot).provider;
|
||||
}
|
||||
|
||||
function getResearchModelId(explicitRoot = null) {
|
||||
return getModelConfigForRole('research', explicitRoot).modelId;
|
||||
}
|
||||
|
||||
function getResearchMaxTokens(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getModelConfigForRole('research', explicitRoot).maxTokens;
|
||||
}
|
||||
|
||||
function getResearchTemperature(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getModelConfigForRole('research', explicitRoot).temperature;
|
||||
}
|
||||
|
||||
function getFallbackProvider(explicitRoot = null) {
|
||||
// Directly return value from config (will be undefined if not set)
|
||||
return getModelConfigForRole('fallback', explicitRoot).provider;
|
||||
}
|
||||
|
||||
function getFallbackModelId(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getModelConfigForRole('fallback', explicitRoot).modelId;
|
||||
}
|
||||
|
||||
function getFallbackMaxTokens(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getModelConfigForRole('fallback', explicitRoot).maxTokens;
|
||||
}
|
||||
|
||||
function getFallbackTemperature(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getModelConfigForRole('fallback', explicitRoot).temperature;
|
||||
}
|
||||
|
||||
// --- Global Settings Getters ---
|
||||
|
||||
function getGlobalConfig(explicitRoot = null) {
|
||||
const config = getConfig(explicitRoot);
|
||||
// Ensure global defaults are applied if global section is missing
|
||||
return { ...DEFAULTS.global, ...(config?.global || {}) };
|
||||
}
|
||||
|
||||
function getLogLevel(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).logLevel.toLowerCase();
|
||||
}
|
||||
|
||||
function getDebugFlag(explicitRoot = null) {
|
||||
// Directly return value from config, ensure boolean
|
||||
return getGlobalConfig(explicitRoot).debug === true;
|
||||
}
|
||||
|
||||
function getDefaultSubtasks(explicitRoot = null) {
|
||||
// Directly return value from config, ensure integer
|
||||
const val = getGlobalConfig(explicitRoot).defaultSubtasks;
|
||||
const parsedVal = parseInt(val, 10);
|
||||
return isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal;
|
||||
}
|
||||
|
||||
function getDefaultPriority(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).defaultPriority;
|
||||
}
|
||||
|
||||
function getProjectName(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).projectName;
|
||||
}
|
||||
|
||||
function getOllamaBaseUrl(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).ollamaBaseUrl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets model parameters (maxTokens, temperature) for a specific role,
|
||||
* considering model-specific overrides from supported-models.json.
|
||||
* @param {string} role - The role ('main', 'research', 'fallback').
|
||||
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
||||
* @returns {{maxTokens: number, temperature: number}}
|
||||
*/
|
||||
function getParametersForRole(role, explicitRoot = null) {
|
||||
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||
const roleMaxTokens = roleConfig.maxTokens;
|
||||
const roleTemperature = roleConfig.temperature;
|
||||
const modelId = roleConfig.modelId;
|
||||
const providerName = roleConfig.provider;
|
||||
|
||||
let effectiveMaxTokens = roleMaxTokens; // Start with the role's default
|
||||
|
||||
try {
|
||||
// Find the model definition in MODEL_MAP
|
||||
const providerModels = MODEL_MAP[providerName];
|
||||
if (providerModels && Array.isArray(providerModels)) {
|
||||
const modelDefinition = providerModels.find((m) => m.id === modelId);
|
||||
|
||||
// Check if a model-specific max_tokens is defined and valid
|
||||
if (
|
||||
modelDefinition &&
|
||||
typeof modelDefinition.max_tokens === 'number' &&
|
||||
modelDefinition.max_tokens > 0
|
||||
) {
|
||||
const modelSpecificMaxTokens = modelDefinition.max_tokens;
|
||||
// Use the minimum of the role default and the model specific limit
|
||||
effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens);
|
||||
log(
|
||||
'debug',
|
||||
`Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}`
|
||||
);
|
||||
} else {
|
||||
log(
|
||||
'debug',
|
||||
`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log(
|
||||
'debug',
|
||||
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
|
||||
);
|
||||
}
|
||||
} catch (lookupError) {
|
||||
log(
|
||||
'warn',
|
||||
`Error looking up model-specific max_tokens for ${modelId}: ${lookupError.message}. Using role default: ${roleMaxTokens}`
|
||||
);
|
||||
// Fallback to role default on error
|
||||
effectiveMaxTokens = roleMaxTokens;
|
||||
}
|
||||
|
||||
return {
|
||||
maxTokens: effectiveMaxTokens,
|
||||
temperature: roleTemperature
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the API key for a given provider is set in the environment.
|
||||
* Checks process.env first, then session.env if session is provided.
|
||||
* @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
|
||||
* @param {object|null} [session=null] - The MCP session object (optional).
|
||||
* @returns {boolean} True if the API key is set, false otherwise.
|
||||
*/
|
||||
function isApiKeySet(providerName, session = null) {
|
||||
// Define the expected environment variable name for each provider
|
||||
if (providerName?.toLowerCase() === 'ollama') {
|
||||
return true; // Indicate key status is effectively "OK"
|
||||
}
|
||||
|
||||
const keyMap = {
|
||||
openai: 'OPENAI_API_KEY',
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
google: 'GOOGLE_API_KEY',
|
||||
perplexity: 'PERPLEXITY_API_KEY',
|
||||
mistral: 'MISTRAL_API_KEY',
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY'
|
||||
// Add other providers as needed
|
||||
};
|
||||
|
||||
const providerKey = providerName?.toLowerCase();
|
||||
if (!providerKey || !keyMap[providerKey]) {
|
||||
log('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const envVarName = keyMap[providerKey];
|
||||
const apiKeyValue = resolveEnvVariable(envVarName, session);
|
||||
|
||||
// Check if the key exists, is not empty, and is not a placeholder
|
||||
return (
|
||||
apiKeyValue &&
|
||||
apiKeyValue.trim() !== '' &&
|
||||
!/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check
|
||||
!apiKeyValue.includes('KEY_HERE')
|
||||
); // Another common placeholder pattern
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the API key status within .cursor/mcp.json for a given provider.
|
||||
* Reads the mcp.json file, finds the taskmaster-ai server config, and checks the relevant env var.
|
||||
* @param {string} providerName The name of the provider.
|
||||
* @param {string|null} projectRoot - Optional explicit path to the project root.
|
||||
* @returns {boolean} True if the key exists and is not a placeholder, false otherwise.
|
||||
*/
|
||||
function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
||||
const rootDir = projectRoot || findProjectRoot(); // Use existing root finding
|
||||
if (!rootDir) {
|
||||
console.warn(
|
||||
chalk.yellow('Warning: Could not find project root to check mcp.json.')
|
||||
);
|
||||
return false; // Cannot check without root
|
||||
}
|
||||
const mcpConfigPath = path.join(rootDir, '.cursor', 'mcp.json');
|
||||
|
||||
if (!fs.existsSync(mcpConfigPath)) {
|
||||
// console.warn(chalk.yellow('Warning: .cursor/mcp.json not found.'));
|
||||
return false; // File doesn't exist
|
||||
}
|
||||
|
||||
try {
|
||||
const mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');
|
||||
const mcpConfig = JSON.parse(mcpConfigRaw);
|
||||
|
||||
const mcpEnv = mcpConfig?.mcpServers?.['taskmaster-ai']?.env;
|
||||
if (!mcpEnv) {
|
||||
// console.warn(chalk.yellow('Warning: Could not find taskmaster-ai env in mcp.json.'));
|
||||
return false; // Structure missing
|
||||
}
|
||||
|
||||
let apiKeyToCheck = null;
|
||||
let placeholderValue = null;
|
||||
|
||||
switch (providerName) {
|
||||
case 'anthropic':
|
||||
apiKeyToCheck = mcpEnv.ANTHROPIC_API_KEY;
|
||||
placeholderValue = 'YOUR_ANTHROPIC_API_KEY_HERE';
|
||||
break;
|
||||
case 'openai':
|
||||
apiKeyToCheck = mcpEnv.OPENAI_API_KEY;
|
||||
placeholderValue = 'YOUR_OPENAI_API_KEY_HERE'; // Assuming placeholder matches OPENAI
|
||||
break;
|
||||
case 'openrouter':
|
||||
apiKeyToCheck = mcpEnv.OPENROUTER_API_KEY;
|
||||
placeholderValue = 'YOUR_OPENROUTER_API_KEY_HERE';
|
||||
break;
|
||||
case 'google':
|
||||
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY;
|
||||
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
|
||||
break;
|
||||
case 'perplexity':
|
||||
apiKeyToCheck = mcpEnv.PERPLEXITY_API_KEY;
|
||||
placeholderValue = 'YOUR_PERPLEXITY_API_KEY_HERE';
|
||||
break;
|
||||
case 'xai':
|
||||
apiKeyToCheck = mcpEnv.XAI_API_KEY;
|
||||
placeholderValue = 'YOUR_XAI_API_KEY_HERE';
|
||||
break;
|
||||
case 'ollama':
|
||||
return true; // No key needed
|
||||
case 'mistral':
|
||||
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
|
||||
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
|
||||
break;
|
||||
case 'azure':
|
||||
apiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;
|
||||
placeholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';
|
||||
break;
|
||||
default:
|
||||
return false; // Unknown provider
|
||||
}
|
||||
|
||||
return !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
chalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a list of available models based on the MODEL_MAP.
|
||||
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
|
||||
*/
|
||||
function getAvailableModels() {
|
||||
const available = [];
|
||||
for (const [provider, models] of Object.entries(MODEL_MAP)) {
|
||||
if (models.length > 0) {
|
||||
models.forEach((modelObj) => {
|
||||
// Basic name generation - can be improved
|
||||
const modelId = modelObj.id;
|
||||
const sweScore = modelObj.swe_score;
|
||||
const cost = modelObj.cost_per_1m_tokens;
|
||||
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
|
||||
const nameParts = modelId
|
||||
.split('-')
|
||||
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
|
||||
// Handle specific known names better if needed
|
||||
let name = nameParts.join(' ');
|
||||
if (modelId === 'claude-3.5-sonnet-20240620')
|
||||
name = 'Claude 3.5 Sonnet';
|
||||
if (modelId === 'claude-3-7-sonnet-20250219')
|
||||
name = 'Claude 3.7 Sonnet';
|
||||
if (modelId === 'gpt-4o') name = 'GPT-4o';
|
||||
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
|
||||
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
|
||||
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
|
||||
|
||||
available.push({
|
||||
id: modelId,
|
||||
name: name,
|
||||
provider: provider,
|
||||
swe_score: sweScore,
|
||||
cost_per_1m_tokens: cost,
|
||||
allowed_roles: allowedRoles
|
||||
});
|
||||
});
|
||||
} else {
|
||||
// For providers with empty lists (like ollama), maybe add a placeholder or skip
|
||||
available.push({
|
||||
id: `[${provider}-any]`,
|
||||
name: `Any (${provider})`,
|
||||
provider: provider
|
||||
});
|
||||
}
|
||||
}
|
||||
return available;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the configuration object to the file.
|
||||
* @param {Object} config The configuration object to write.
|
||||
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
||||
* @returns {boolean} True if successful, false otherwise.
|
||||
*/
|
||||
function writeConfig(config, explicitRoot = null) {
|
||||
// ---> Determine root path reliably <---
|
||||
let rootPath = explicitRoot;
|
||||
if (explicitRoot === null || explicitRoot === undefined) {
|
||||
// Logic matching _loadAndValidateConfig
|
||||
const foundRoot = findProjectRoot(); // *** Explicitly call findProjectRoot ***
|
||||
if (!foundRoot) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Error: Could not determine project root. Configuration not saved.'
|
||||
)
|
||||
);
|
||||
return false;
|
||||
}
|
||||
rootPath = foundRoot;
|
||||
}
|
||||
// ---> End determine root path logic <---
|
||||
|
||||
const configPath =
|
||||
path.basename(rootPath) === CONFIG_FILE_NAME
|
||||
? rootPath
|
||||
: path.join(rootPath, CONFIG_FILE_NAME);
|
||||
|
||||
try {
|
||||
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
|
||||
loadedConfig = config; // Update the cache after successful write
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error writing configuration to ${configPath}: ${error.message}`
|
||||
)
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the .taskmasterconfig file exists at the project root
|
||||
* @param {string|null} explicitRoot - Optional explicit path to the project root
|
||||
* @returns {boolean} True if the file exists, false otherwise
|
||||
*/
|
||||
function isConfigFilePresent(explicitRoot = null) {
|
||||
// ---> Determine root path reliably <---
|
||||
let rootPath = explicitRoot;
|
||||
if (explicitRoot === null || explicitRoot === undefined) {
|
||||
// Logic matching _loadAndValidateConfig
|
||||
const foundRoot = findProjectRoot(); // *** Explicitly call findProjectRoot ***
|
||||
if (!foundRoot) {
|
||||
return false; // Cannot check if root doesn't exist
|
||||
}
|
||||
rootPath = foundRoot;
|
||||
}
|
||||
// ---> End determine root path logic <---
|
||||
|
||||
const configPath = path.join(rootPath, CONFIG_FILE_NAME);
|
||||
return fs.existsSync(configPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a list of all provider names defined in the MODEL_MAP.
|
||||
* @returns {string[]} An array of provider names.
|
||||
*/
|
||||
function getAllProviders() {
|
||||
return Object.keys(MODEL_MAP || {});
|
||||
}
|
||||
|
||||
export {
|
||||
// Core config access
|
||||
getConfig,
|
||||
writeConfig,
|
||||
ConfigurationError, // Export custom error type
|
||||
isConfigFilePresent, // Add the new function export
|
||||
|
||||
// Validation
|
||||
validateProvider,
|
||||
validateProviderModelCombination,
|
||||
VALID_PROVIDERS,
|
||||
MODEL_MAP,
|
||||
getAvailableModels,
|
||||
|
||||
// Role-specific getters (No env var overrides)
|
||||
getMainProvider,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature,
|
||||
getResearchProvider,
|
||||
getResearchModelId,
|
||||
getResearchMaxTokens,
|
||||
getResearchTemperature,
|
||||
getFallbackProvider,
|
||||
getFallbackModelId,
|
||||
getFallbackMaxTokens,
|
||||
getFallbackTemperature,
|
||||
|
||||
// Global setting getters (No env var overrides)
|
||||
getLogLevel,
|
||||
getDebugFlag,
|
||||
getDefaultSubtasks,
|
||||
getDefaultPriority,
|
||||
getProjectName,
|
||||
getOllamaBaseUrl,
|
||||
getParametersForRole,
|
||||
|
||||
// API Key Checkers (still relevant)
|
||||
isApiKeySet,
|
||||
getMcpApiKeyStatus,
|
||||
|
||||
// ADD: Function to get all provider names
|
||||
getAllProviders
|
||||
};
|
||||
@@ -6,7 +6,6 @@
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import { Anthropic } from '@anthropic-ai/sdk';
|
||||
|
||||
import {
|
||||
log,
|
||||
@@ -22,11 +21,6 @@ import { displayBanner } from './ui.js';
|
||||
|
||||
import { generateTaskFiles } from './task-manager.js';
|
||||
|
||||
// Initialize Anthropic client
|
||||
const anthropic = new Anthropic({
|
||||
apiKey: process.env.ANTHROPIC_API_KEY
|
||||
});
|
||||
|
||||
/**
|
||||
* Add a dependency to a task
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
@@ -185,18 +179,20 @@ async function addDependency(tasksPath, taskId, dependencyId) {
|
||||
);
|
||||
|
||||
// Display a more visually appealing success message
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(`Successfully added dependency:\n\n`) +
|
||||
`Task ${chalk.bold(formattedTaskId)} now depends on ${chalk.bold(formattedDependencyId)}`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(`Successfully added dependency:\n\n`) +
|
||||
`Task ${chalk.bold(formattedTaskId)} now depends on ${chalk.bold(formattedDependencyId)}`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Generate updated task files
|
||||
await generateTaskFiles(tasksPath, 'tasks');
|
||||
@@ -359,11 +355,13 @@ function isCircularDependency(tasks, taskId, chain = []) {
|
||||
|
||||
// Find the task or subtask
|
||||
let task = null;
|
||||
let parentIdForSubtask = null;
|
||||
|
||||
// Check if this is a subtask reference (e.g., "1.2")
|
||||
if (taskIdStr.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdStr.split('.').map(Number);
|
||||
const parentTask = tasks.find((t) => t.id === parentId);
|
||||
parentIdForSubtask = parentId; // Store parent ID if it's a subtask
|
||||
|
||||
if (parentTask && parentTask.subtasks) {
|
||||
task = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||
@@ -383,10 +381,18 @@ function isCircularDependency(tasks, taskId, chain = []) {
|
||||
}
|
||||
|
||||
// Check each dependency recursively
|
||||
const newChain = [...chain, taskId];
|
||||
return task.dependencies.some((depId) =>
|
||||
isCircularDependency(tasks, depId, newChain)
|
||||
);
|
||||
const newChain = [...chain, taskIdStr]; // Use taskIdStr for consistency
|
||||
return task.dependencies.some((depId) => {
|
||||
let normalizedDepId = String(depId);
|
||||
// Normalize relative subtask dependencies
|
||||
if (typeof depId === 'number' && parentIdForSubtask !== null) {
|
||||
// If the current task is a subtask AND the dependency is a number,
|
||||
// assume it refers to a sibling subtask.
|
||||
normalizedDepId = `${parentIdForSubtask}.${depId}`;
|
||||
}
|
||||
// Pass the normalized ID to the recursive call
|
||||
return isCircularDependency(tasks, normalizedDepId, newChain);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -585,118 +591,43 @@ async function validateDependenciesCommand(tasksPath, options = {}) {
|
||||
`Analyzing dependencies for ${taskCount} tasks and ${subtaskCount} subtasks...`
|
||||
);
|
||||
|
||||
// Track validation statistics
|
||||
const stats = {
|
||||
nonExistentDependenciesRemoved: 0,
|
||||
selfDependenciesRemoved: 0,
|
||||
tasksFixed: 0,
|
||||
subtasksFixed: 0
|
||||
};
|
||||
|
||||
// Create a custom logger instead of reassigning the imported log function
|
||||
const warnings = [];
|
||||
const customLogger = function (level, ...args) {
|
||||
if (level === 'warn') {
|
||||
warnings.push(args.join(' '));
|
||||
|
||||
// Count the type of fix based on the warning message
|
||||
const msg = args.join(' ');
|
||||
if (msg.includes('self-dependency')) {
|
||||
stats.selfDependenciesRemoved++;
|
||||
} else if (msg.includes('invalid')) {
|
||||
stats.nonExistentDependenciesRemoved++;
|
||||
}
|
||||
|
||||
// Count if it's a task or subtask being fixed
|
||||
if (msg.includes('from subtask')) {
|
||||
stats.subtasksFixed++;
|
||||
} else if (msg.includes('from task')) {
|
||||
stats.tasksFixed++;
|
||||
}
|
||||
}
|
||||
// Call the original log function
|
||||
return log(level, ...args);
|
||||
};
|
||||
|
||||
// Run validation with custom logger
|
||||
try {
|
||||
// Temporarily save validateTaskDependencies function with normal log
|
||||
const originalValidateTaskDependencies = validateTaskDependencies;
|
||||
// Directly call the validation function
|
||||
const validationResult = validateTaskDependencies(data.tasks);
|
||||
|
||||
// Create patched version that uses customLogger
|
||||
const patchedValidateTaskDependencies = (tasks, tasksPath) => {
|
||||
// Temporarily redirect log calls in this scope
|
||||
const originalLog = log;
|
||||
const logProxy = function (...args) {
|
||||
return customLogger(...args);
|
||||
};
|
||||
if (!validationResult.valid) {
|
||||
log(
|
||||
'error',
|
||||
`Dependency validation failed. Found ${validationResult.issues.length} issue(s):`
|
||||
);
|
||||
validationResult.issues.forEach((issue) => {
|
||||
let errorMsg = ` [${issue.type.toUpperCase()}] Task ${issue.taskId}: ${issue.message}`;
|
||||
if (issue.dependencyId) {
|
||||
errorMsg += ` (Dependency: ${issue.dependencyId})`;
|
||||
}
|
||||
log('error', errorMsg); // Log each issue as an error
|
||||
});
|
||||
|
||||
// Call the original function in a context where log calls are intercepted
|
||||
const result = (() => {
|
||||
// Use Function.prototype.bind to create a new function that has logProxy available
|
||||
// Pass isCircularDependency explicitly to make it available
|
||||
return Function(
|
||||
'tasks',
|
||||
'tasksPath',
|
||||
'log',
|
||||
'customLogger',
|
||||
'isCircularDependency',
|
||||
'taskExists',
|
||||
`return (${originalValidateTaskDependencies.toString()})(tasks, tasksPath);`
|
||||
)(
|
||||
tasks,
|
||||
tasksPath,
|
||||
logProxy,
|
||||
customLogger,
|
||||
isCircularDependency,
|
||||
taskExists
|
||||
);
|
||||
})();
|
||||
// Optionally exit if validation fails, depending on desired behavior
|
||||
// process.exit(1); // Uncomment if validation failure should stop the process
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const changesDetected = patchedValidateTaskDependencies(
|
||||
data.tasks,
|
||||
tasksPath
|
||||
);
|
||||
|
||||
// Create a detailed report
|
||||
if (changesDetected) {
|
||||
log('success', 'Invalid dependencies were removed from tasks.json');
|
||||
|
||||
// Show detailed stats in a nice box - only if not in silent mode
|
||||
// Display summary box even on failure, showing issues found
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(`Dependency Validation Results:\n\n`) +
|
||||
chalk.red(`Dependency Validation FAILED\n\n`) +
|
||||
`${chalk.cyan('Tasks checked:')} ${taskCount}\n` +
|
||||
`${chalk.cyan('Subtasks checked:')} ${subtaskCount}\n` +
|
||||
`${chalk.cyan('Non-existent dependencies removed:')} ${stats.nonExistentDependenciesRemoved}\n` +
|
||||
`${chalk.cyan('Self-dependencies removed:')} ${stats.selfDependenciesRemoved}\n` +
|
||||
`${chalk.cyan('Tasks fixed:')} ${stats.tasksFixed}\n` +
|
||||
`${chalk.cyan('Subtasks fixed:')} ${stats.subtasksFixed}`,
|
||||
`${chalk.red('Issues found:')} ${validationResult.issues.length}`, // Display count from result
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderColor: 'red',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// Show all warnings in a collapsible list if there are many
|
||||
if (warnings.length > 0) {
|
||||
console.log(chalk.yellow('\nDetailed fixes:'));
|
||||
warnings.forEach((warning) => {
|
||||
console.log(` ${warning}`);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Regenerate task files to reflect the changes
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
log('info', 'Task files regenerated to reflect dependency changes');
|
||||
} else {
|
||||
log(
|
||||
'success',
|
||||
|
||||
@@ -6,6 +6,5 @@
|
||||
// Export all modules
|
||||
export * from './utils.js';
|
||||
export * from './ui.js';
|
||||
export * from './ai-services.js';
|
||||
export * from './task-manager.js';
|
||||
export * from './commands.js';
|
||||
|
||||
314
scripts/modules/rule-transformer.js
Normal file
314
scripts/modules/rule-transformer.js
Normal file
@@ -0,0 +1,314 @@
|
||||
/**
|
||||
* Rule Transformer Module
|
||||
* Handles conversion of Cursor rules to Roo rules
|
||||
*
|
||||
* This module procedurally generates .roo/rules files from .cursor/rules files,
|
||||
* eliminating the need to maintain both sets of files manually.
|
||||
*/
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { log } from './utils.js';
|
||||
|
||||
// Configuration for term conversions - centralized for easier future updates
|
||||
const conversionConfig = {
|
||||
// Product and brand name replacements
|
||||
brandTerms: [
|
||||
{ from: /cursor\.so/g, to: 'roocode.com' },
|
||||
{ from: /\[cursor\.so\]/g, to: '[roocode.com]' },
|
||||
{ from: /href="https:\/\/cursor\.so/g, to: 'href="https://roocode.com' },
|
||||
{ from: /\(https:\/\/cursor\.so/g, to: '(https://roocode.com' },
|
||||
{
|
||||
from: /\bcursor\b/gi,
|
||||
to: (match) => (match === 'Cursor' ? 'Roo Code' : 'roo')
|
||||
},
|
||||
{ from: /Cursor/g, to: 'Roo Code' }
|
||||
],
|
||||
|
||||
// File extension replacements
|
||||
fileExtensions: [{ from: /\.mdc\b/g, to: '.md' }],
|
||||
|
||||
// Documentation URL replacements
|
||||
docUrls: [
|
||||
{
|
||||
from: /https:\/\/docs\.cursor\.com\/[^\s)'"]+/g,
|
||||
to: (match) => match.replace('docs.cursor.com', 'docs.roocode.com')
|
||||
},
|
||||
{ from: /https:\/\/docs\.roo\.com\//g, to: 'https://docs.roocode.com/' }
|
||||
],
|
||||
|
||||
// Tool references - direct replacements
|
||||
toolNames: {
|
||||
search: 'search_files',
|
||||
read_file: 'read_file',
|
||||
edit_file: 'apply_diff',
|
||||
create_file: 'write_to_file',
|
||||
run_command: 'execute_command',
|
||||
terminal_command: 'execute_command',
|
||||
use_mcp: 'use_mcp_tool',
|
||||
switch_mode: 'switch_mode'
|
||||
},
|
||||
|
||||
// Tool references in context - more specific replacements
|
||||
toolContexts: [
|
||||
{ from: /\bsearch tool\b/g, to: 'search_files tool' },
|
||||
{ from: /\bedit_file tool\b/g, to: 'apply_diff tool' },
|
||||
{ from: /\buse the search\b/g, to: 'use the search_files' },
|
||||
{ from: /\bThe edit_file\b/g, to: 'The apply_diff' },
|
||||
{ from: /\brun_command executes\b/g, to: 'execute_command executes' },
|
||||
{ from: /\buse_mcp connects\b/g, to: 'use_mcp_tool connects' },
|
||||
// Additional contextual patterns for flexibility
|
||||
{ from: /\bCursor search\b/g, to: 'Roo Code search_files' },
|
||||
{ from: /\bCursor edit\b/g, to: 'Roo Code apply_diff' },
|
||||
{ from: /\bCursor create\b/g, to: 'Roo Code write_to_file' },
|
||||
{ from: /\bCursor run\b/g, to: 'Roo Code execute_command' }
|
||||
],
|
||||
|
||||
// Tool group and category names
|
||||
toolGroups: [
|
||||
{ from: /\bSearch tools\b/g, to: 'Read Group tools' },
|
||||
{ from: /\bEdit tools\b/g, to: 'Edit Group tools' },
|
||||
{ from: /\bRun tools\b/g, to: 'Command Group tools' },
|
||||
{ from: /\bMCP servers\b/g, to: 'MCP Group tools' },
|
||||
{ from: /\bSearch Group\b/g, to: 'Read Group' },
|
||||
{ from: /\bEdit Group\b/g, to: 'Edit Group' },
|
||||
{ from: /\bRun Group\b/g, to: 'Command Group' }
|
||||
],
|
||||
|
||||
// File references in markdown links
|
||||
fileReferences: {
|
||||
pathPattern: /\[(.+?)\]\(mdc:\.cursor\/rules\/(.+?)\.mdc\)/g,
|
||||
replacement: (match, text, filePath) => {
|
||||
// Get the base filename
|
||||
const baseName = path.basename(filePath, '.mdc');
|
||||
|
||||
// Get the new filename (either from mapping or by replacing extension)
|
||||
const newFileName = fileMap[`${baseName}.mdc`] || `${baseName}.md`;
|
||||
|
||||
// Return the updated link
|
||||
return `[${text}](mdc:.roo/rules/${newFileName})`;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// File name mapping (specific files with naming changes)
|
||||
const fileMap = {
|
||||
'cursor_rules.mdc': 'roo_rules.md',
|
||||
'dev_workflow.mdc': 'dev_workflow.md',
|
||||
'self_improve.mdc': 'self_improve.md',
|
||||
'taskmaster.mdc': 'taskmaster.md'
|
||||
// Add other mappings as needed
|
||||
};
|
||||
|
||||
/**
|
||||
* Replace basic Cursor terms with Roo equivalents
|
||||
*/
|
||||
function replaceBasicTerms(content) {
|
||||
let result = content;
|
||||
|
||||
// Apply brand term replacements
|
||||
conversionConfig.brandTerms.forEach((pattern) => {
|
||||
if (typeof pattern.to === 'function') {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
} else {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
}
|
||||
});
|
||||
|
||||
// Apply file extension replacements
|
||||
conversionConfig.fileExtensions.forEach((pattern) => {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace Cursor tool references with Roo tool equivalents
|
||||
*/
|
||||
function replaceToolReferences(content) {
|
||||
let result = content;
|
||||
|
||||
// Basic pattern for direct tool name replacements
|
||||
const toolNames = conversionConfig.toolNames;
|
||||
const toolReferencePattern = new RegExp(
|
||||
`\\b(${Object.keys(toolNames).join('|')})\\b`,
|
||||
'g'
|
||||
);
|
||||
|
||||
// Apply direct tool name replacements
|
||||
result = result.replace(toolReferencePattern, (match, toolName) => {
|
||||
return toolNames[toolName] || toolName;
|
||||
});
|
||||
|
||||
// Apply contextual tool replacements
|
||||
conversionConfig.toolContexts.forEach((pattern) => {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
});
|
||||
|
||||
// Apply tool group replacements
|
||||
conversionConfig.toolGroups.forEach((pattern) => {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update documentation URLs to point to Roo documentation
|
||||
*/
|
||||
function updateDocReferences(content) {
|
||||
let result = content;
|
||||
|
||||
// Apply documentation URL replacements
|
||||
conversionConfig.docUrls.forEach((pattern) => {
|
||||
if (typeof pattern.to === 'function') {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
} else {
|
||||
result = result.replace(pattern.from, pattern.to);
|
||||
}
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update file references in markdown links
|
||||
*/
|
||||
function updateFileReferences(content) {
|
||||
const { pathPattern, replacement } = conversionConfig.fileReferences;
|
||||
return content.replace(pathPattern, replacement);
|
||||
}
|
||||
|
||||
/**
|
||||
* Main transformation function that applies all conversions
|
||||
*/
|
||||
function transformCursorToRooRules(content) {
|
||||
// Apply all transformations in appropriate order
|
||||
let result = content;
|
||||
result = replaceBasicTerms(result);
|
||||
result = replaceToolReferences(result);
|
||||
result = updateDocReferences(result);
|
||||
result = updateFileReferences(result);
|
||||
|
||||
// Super aggressive failsafe pass to catch any variations we might have missed
|
||||
// This ensures critical transformations are applied even in contexts we didn't anticipate
|
||||
|
||||
// 1. Handle cursor.so in any possible context
|
||||
result = result.replace(/cursor\.so/gi, 'roocode.com');
|
||||
// Edge case: URL with different formatting
|
||||
result = result.replace(/cursor\s*\.\s*so/gi, 'roocode.com');
|
||||
result = result.replace(/https?:\/\/cursor\.so/gi, 'https://roocode.com');
|
||||
result = result.replace(
|
||||
/https?:\/\/www\.cursor\.so/gi,
|
||||
'https://www.roocode.com'
|
||||
);
|
||||
|
||||
// 2. Handle tool references - even partial ones
|
||||
result = result.replace(/\bedit_file\b/gi, 'apply_diff');
|
||||
result = result.replace(/\bsearch tool\b/gi, 'search_files tool');
|
||||
result = result.replace(/\bSearch Tool\b/g, 'Search_Files Tool');
|
||||
|
||||
// 3. Handle basic terms (with case handling)
|
||||
result = result.replace(/\bcursor\b/gi, (match) =>
|
||||
match.charAt(0) === 'C' ? 'Roo Code' : 'roo'
|
||||
);
|
||||
result = result.replace(/Cursor/g, 'Roo Code');
|
||||
result = result.replace(/CURSOR/g, 'ROO CODE');
|
||||
|
||||
// 4. Handle file extensions
|
||||
result = result.replace(/\.mdc\b/g, '.md');
|
||||
|
||||
// 5. Handle any missed URL patterns
|
||||
result = result.replace(/docs\.cursor\.com/gi, 'docs.roocode.com');
|
||||
result = result.replace(/docs\.roo\.com/gi, 'docs.roocode.com');
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a single Cursor rule file to Roo rule format
|
||||
*/
|
||||
function convertCursorRuleToRooRule(sourcePath, targetPath) {
|
||||
try {
|
||||
log(
|
||||
'info',
|
||||
`Converting Cursor rule ${path.basename(sourcePath)} to Roo rule ${path.basename(targetPath)}`
|
||||
);
|
||||
|
||||
// Read source content
|
||||
const content = fs.readFileSync(sourcePath, 'utf8');
|
||||
|
||||
// Transform content
|
||||
const transformedContent = transformCursorToRooRules(content);
|
||||
|
||||
// Ensure target directory exists
|
||||
const targetDir = path.dirname(targetPath);
|
||||
if (!fs.existsSync(targetDir)) {
|
||||
fs.mkdirSync(targetDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Write transformed content
|
||||
fs.writeFileSync(targetPath, transformedContent);
|
||||
log(
|
||||
'success',
|
||||
`Successfully converted ${path.basename(sourcePath)} to ${path.basename(targetPath)}`
|
||||
);
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Failed to convert rule file ${path.basename(sourcePath)}: ${error.message}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process all Cursor rules and convert to Roo rules
|
||||
*/
|
||||
function convertAllCursorRulesToRooRules(projectDir) {
|
||||
const cursorRulesDir = path.join(projectDir, '.cursor', 'rules');
|
||||
const rooRulesDir = path.join(projectDir, '.roo', 'rules');
|
||||
|
||||
if (!fs.existsSync(cursorRulesDir)) {
|
||||
log('warn', `Cursor rules directory not found: ${cursorRulesDir}`);
|
||||
return { success: 0, failed: 0 };
|
||||
}
|
||||
|
||||
// Ensure Roo rules directory exists
|
||||
if (!fs.existsSync(rooRulesDir)) {
|
||||
fs.mkdirSync(rooRulesDir, { recursive: true });
|
||||
log('info', `Created Roo rules directory: ${rooRulesDir}`);
|
||||
}
|
||||
|
||||
// Count successful and failed conversions
|
||||
let success = 0;
|
||||
let failed = 0;
|
||||
|
||||
// Process each file in the Cursor rules directory
|
||||
fs.readdirSync(cursorRulesDir).forEach((file) => {
|
||||
if (file.endsWith('.mdc')) {
|
||||
const sourcePath = path.join(cursorRulesDir, file);
|
||||
|
||||
// Determine target file name (either from mapping or by replacing extension)
|
||||
const targetFilename = fileMap[file] || file.replace('.mdc', '.md');
|
||||
const targetPath = path.join(rooRulesDir, targetFilename);
|
||||
|
||||
// Convert the file
|
||||
if (convertCursorRuleToRooRule(sourcePath, targetPath)) {
|
||||
success++;
|
||||
} else {
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
log(
|
||||
'info',
|
||||
`Rule conversion complete: ${success} successful, ${failed} failed`
|
||||
);
|
||||
return { success, failed };
|
||||
}
|
||||
|
||||
export { convertAllCursorRulesToRooRules, convertCursorRuleToRooRule };
|
||||
438
scripts/modules/supported-models.json
Normal file
438
scripts/modules/supported-models.json
Normal file
@@ -0,0 +1,438 @@
|
||||
{
|
||||
"anthropic": [
|
||||
{
|
||||
"id": "claude-3-7-sonnet-20250219",
|
||||
"swe_score": 0.623,
|
||||
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 120000
|
||||
},
|
||||
{
|
||||
"id": "claude-3-5-sonnet-20241022",
|
||||
"swe_score": 0.49,
|
||||
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "claude-3-5-haiku-20241022",
|
||||
"swe_score": 0.406,
|
||||
"cost_per_1m_tokens": { "input": 0.8, "output": 4.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "claude-3-opus-20240229",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 15, "output": 75 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
}
|
||||
],
|
||||
"openai": [
|
||||
{
|
||||
"id": "gpt-4o",
|
||||
"swe_score": 0.332,
|
||||
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
},
|
||||
{
|
||||
"id": "o1",
|
||||
"swe_score": 0.489,
|
||||
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "o3",
|
||||
"swe_score": 0.5,
|
||||
"cost_per_1m_tokens": { "input": 10.0, "output": 40.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "o3-mini",
|
||||
"swe_score": 0.493,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "o4-mini",
|
||||
"swe_score": 0.45,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "o1-mini",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "o1-pro",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150.0, "output": 600.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-5-preview",
|
||||
"swe_score": 0.38,
|
||||
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-1-mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-1-nano",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-search-preview",
|
||||
"swe_score": 0.33,
|
||||
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini-search-preview",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
}
|
||||
],
|
||||
"google": [
|
||||
{
|
||||
"id": "gemini-2.5-pro-exp-03-25",
|
||||
"swe_score": 0.638,
|
||||
"cost_per_1m_tokens": null,
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemini-2.5-flash-preview-04-17",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": null,
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemini-2.0-flash",
|
||||
"swe_score": 0.754,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemini-2.0-flash-thinking-experimental",
|
||||
"swe_score": 0.754,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemini-2.0-pro",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": null,
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
}
|
||||
],
|
||||
"perplexity": [
|
||||
{
|
||||
"id": "sonar-pro",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||
"allowed_roles": ["research"],
|
||||
"max_tokens": 8700
|
||||
},
|
||||
{
|
||||
"id": "sonar",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1, "output": 1 },
|
||||
"allowed_roles": ["research"],
|
||||
"max_tokens": 8700
|
||||
},
|
||||
{
|
||||
"id": "deep-research",
|
||||
"swe_score": 0.211,
|
||||
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
||||
"allowed_roles": ["research"],
|
||||
"max_tokens": 8700
|
||||
},
|
||||
{
|
||||
"id": "sonar-reasoning-pro",
|
||||
"swe_score": 0.211,
|
||||
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 8700
|
||||
},
|
||||
{
|
||||
"id": "sonar-reasoning",
|
||||
"swe_score": 0.211,
|
||||
"cost_per_1m_tokens": { "input": 1, "output": 5 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 8700
|
||||
}
|
||||
],
|
||||
"xai": [
|
||||
{
|
||||
"id": "grok-3",
|
||||
"name": "Grok 3",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini",
|
||||
"name": "Grok 3 Mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.3, "output": 0.5 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-fast",
|
||||
"name": "Grok 3 Fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 5, "output": 25 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini-fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.6, "output": 4 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
}
|
||||
],
|
||||
"ollama": [
|
||||
{
|
||||
"id": "gemma3:27b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemma3:12b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "qwq",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "deepseek-r1",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "mistral-small3.1",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "llama3.3",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "phi4",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
}
|
||||
],
|
||||
"openrouter": [
|
||||
{
|
||||
"id": "google/gemini-2.0-flash-001",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-pro-exp-03-25",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-chat-v3-0324:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-chat-v3-0324",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.27, "output": 1.1 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-r1:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
},
|
||||
|
||||
{
|
||||
"id": "microsoft/mai-ds-r1:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-pro-preview-03-25",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.25, "output": 10 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview:thinking",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 3.5 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "openai/o3",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 10, "output": 40 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 200000
|
||||
},
|
||||
{
|
||||
"id": "openai/o4-mini",
|
||||
"swe_score": 0.45,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "openai/o4-mini-high",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "openai/o1-pro",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150, "output": 600 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/llama-3.3-70b-instruct",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 120, "output": 600 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-12b-it:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-12b-it",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 50, "output": 100 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-27b-it:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 96000
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-27b-it",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 100, "output": 200 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwq-32b:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 40000
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwq-32b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150, "output": 200 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen-max",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.6, "output": 6.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen-turbo",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.05, "output": 0.2 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/mistral-small-3.1-24b-instruct:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 96000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/mistral-small-3.1-24b-instruct",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.3 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 128000
|
||||
},
|
||||
{
|
||||
"id": "thudm/glm-4-32b:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,32 +0,0 @@
|
||||
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
|
||||
let loadingIndicator = null;
|
||||
try {
|
||||
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
|
||||
|
||||
// Validate subtask ID format
|
||||
if (!subtaskId || typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
|
||||
throw new Error(`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`);
|
||||
}
|
||||
|
||||
// Validate prompt
|
||||
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
|
||||
throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
|
||||
}
|
||||
|
||||
// Prepare for fallback handling
|
||||
let claudeOverloaded = false;
|
||||
|
||||
// Validate tasks file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
throw new Error(`Tasks file not found at path: ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Read the tasks file
|
||||
const data = readJSON(tasksPath);
|
||||
// ... rest of the function
|
||||
} catch (error) {
|
||||
// Handle errors
|
||||
console.error(`Error updating subtask: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
153
scripts/modules/task-manager/add-subtask.js
Normal file
153
scripts/modules/task-manager/add-subtask.js
Normal file
@@ -0,0 +1,153 @@
|
||||
import path from 'path';
|
||||
|
||||
import { log, readJSON, writeJSON } from '../utils.js';
|
||||
import { isTaskDependentOn } from '../task-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
* Add a subtask to a parent task
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number|string} parentId - ID of the parent task
|
||||
* @param {number|string|null} existingTaskId - ID of an existing task to convert to subtask (optional)
|
||||
* @param {Object} newSubtaskData - Data for creating a new subtask (used if existingTaskId is null)
|
||||
* @param {boolean} generateFiles - Whether to regenerate task files after adding the subtask
|
||||
* @returns {Object} The newly created or converted subtask
|
||||
*/
|
||||
async function addSubtask(
|
||||
tasksPath,
|
||||
parentId,
|
||||
existingTaskId = null,
|
||||
newSubtaskData = null,
|
||||
generateFiles = true
|
||||
) {
|
||||
try {
|
||||
log('info', `Adding subtask to parent task ${parentId}...`);
|
||||
|
||||
// Read the existing tasks
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid or missing tasks file at ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Convert parent ID to number
|
||||
const parentIdNum = parseInt(parentId, 10);
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((t) => t.id === parentIdNum);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task with ID ${parentIdNum} not found`);
|
||||
}
|
||||
|
||||
// Initialize subtasks array if it doesn't exist
|
||||
if (!parentTask.subtasks) {
|
||||
parentTask.subtasks = [];
|
||||
}
|
||||
|
||||
let newSubtask;
|
||||
|
||||
// Case 1: Convert an existing task to a subtask
|
||||
if (existingTaskId !== null) {
|
||||
const existingTaskIdNum = parseInt(existingTaskId, 10);
|
||||
|
||||
// Find the existing task
|
||||
const existingTaskIndex = data.tasks.findIndex(
|
||||
(t) => t.id === existingTaskIdNum
|
||||
);
|
||||
if (existingTaskIndex === -1) {
|
||||
throw new Error(`Task with ID ${existingTaskIdNum} not found`);
|
||||
}
|
||||
|
||||
const existingTask = data.tasks[existingTaskIndex];
|
||||
|
||||
// Check if task is already a subtask
|
||||
if (existingTask.parentTaskId) {
|
||||
throw new Error(
|
||||
`Task ${existingTaskIdNum} is already a subtask of task ${existingTask.parentTaskId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check for circular dependency
|
||||
if (existingTaskIdNum === parentIdNum) {
|
||||
throw new Error(`Cannot make a task a subtask of itself`);
|
||||
}
|
||||
|
||||
// Check if parent task is a subtask of the task we're converting
|
||||
// This would create a circular dependency
|
||||
if (isTaskDependentOn(data.tasks, parentTask, existingTaskIdNum)) {
|
||||
throw new Error(
|
||||
`Cannot create circular dependency: task ${parentIdNum} is already a subtask or dependent of task ${existingTaskIdNum}`
|
||||
);
|
||||
}
|
||||
|
||||
// Find the highest subtask ID to determine the next ID
|
||||
const highestSubtaskId =
|
||||
parentTask.subtasks.length > 0
|
||||
? Math.max(...parentTask.subtasks.map((st) => st.id))
|
||||
: 0;
|
||||
const newSubtaskId = highestSubtaskId + 1;
|
||||
|
||||
// Clone the existing task to be converted to a subtask
|
||||
newSubtask = {
|
||||
...existingTask,
|
||||
id: newSubtaskId,
|
||||
parentTaskId: parentIdNum
|
||||
};
|
||||
|
||||
// Add to parent's subtasks
|
||||
parentTask.subtasks.push(newSubtask);
|
||||
|
||||
// Remove the task from the main tasks array
|
||||
data.tasks.splice(existingTaskIndex, 1);
|
||||
|
||||
log(
|
||||
'info',
|
||||
`Converted task ${existingTaskIdNum} to subtask ${parentIdNum}.${newSubtaskId}`
|
||||
);
|
||||
}
|
||||
// Case 2: Create a new subtask
|
||||
else if (newSubtaskData) {
|
||||
// Find the highest subtask ID to determine the next ID
|
||||
const highestSubtaskId =
|
||||
parentTask.subtasks.length > 0
|
||||
? Math.max(...parentTask.subtasks.map((st) => st.id))
|
||||
: 0;
|
||||
const newSubtaskId = highestSubtaskId + 1;
|
||||
|
||||
// Create the new subtask object
|
||||
newSubtask = {
|
||||
id: newSubtaskId,
|
||||
title: newSubtaskData.title,
|
||||
description: newSubtaskData.description || '',
|
||||
details: newSubtaskData.details || '',
|
||||
status: newSubtaskData.status || 'pending',
|
||||
dependencies: newSubtaskData.dependencies || [],
|
||||
parentTaskId: parentIdNum
|
||||
};
|
||||
|
||||
// Add to parent's subtasks
|
||||
parentTask.subtasks.push(newSubtask);
|
||||
|
||||
log('info', `Created new subtask ${parentIdNum}.${newSubtaskId}`);
|
||||
} else {
|
||||
throw new Error(
|
||||
'Either existingTaskId or newSubtaskData must be provided'
|
||||
);
|
||||
}
|
||||
|
||||
// Write the updated tasks back to the file
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Generate task files if requested
|
||||
if (generateFiles) {
|
||||
log('info', 'Regenerating task files...');
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
}
|
||||
|
||||
return newSubtask;
|
||||
} catch (error) {
|
||||
log('error', `Error adding subtask: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export default addSubtask;
|
||||
363
scripts/modules/task-manager/add-task.js
Normal file
363
scripts/modules/task-manager/add-task.js
Normal file
@@ -0,0 +1,363 @@
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
displayBanner,
|
||||
getStatusWithColor,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDefaultPriority } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
// Define Zod schema for the expected AI output object
|
||||
const AiTaskDataSchema = z.object({
|
||||
title: z.string().describe('Clear, concise title for the task'),
|
||||
description: z
|
||||
.string()
|
||||
.describe('A one or two sentence description of the task'),
|
||||
details: z
|
||||
.string()
|
||||
.describe('In-depth implementation details, considerations, and guidance'),
|
||||
testStrategy: z
|
||||
.string()
|
||||
.describe('Detailed approach for verifying task completion')
|
||||
});
|
||||
|
||||
/**
|
||||
* Add a new task using AI
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} prompt - Description of the task to add (required for AI-driven creation)
|
||||
* @param {Array} dependencies - Task dependencies
|
||||
* @param {string} priority - Task priority
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
* @param {string} outputFormat - Output format (text or json)
|
||||
* @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated
|
||||
* @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)
|
||||
* @param {boolean} useResearch - Whether to use the research model (passed to unified service)
|
||||
* @returns {number} The new task ID
|
||||
*/
|
||||
async function addTask(
|
||||
tasksPath,
|
||||
prompt,
|
||||
dependencies = [],
|
||||
priority = getDefaultPriority(), // Keep getter for default priority
|
||||
{ reportProgress, mcpLog, session } = {},
|
||||
outputFormat = 'text',
|
||||
// customEnv = null, // Removed as AI param overrides are deprecated
|
||||
manualTaskData = null,
|
||||
useResearch = false // <-- Add useResearch parameter
|
||||
) {
|
||||
let loadingIndicator = null;
|
||||
|
||||
// Create custom reporter that checks for MCP log
|
||||
const report = (message, level = 'info') => {
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
} else if (outputFormat === 'text') {
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
// Only display banner and UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
displayBanner();
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Creating New Task`), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Read the existing tasks
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
report('Invalid or missing tasks.json.', 'error');
|
||||
throw new Error('Invalid or missing tasks.json.');
|
||||
}
|
||||
|
||||
// Find the highest task ID to determine the next ID
|
||||
const highestId =
|
||||
data.tasks.length > 0 ? Math.max(...data.tasks.map((t) => t.id)) : 0;
|
||||
const newTaskId = highestId + 1;
|
||||
|
||||
// Only show UI box for CLI mode
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Creating New Task #${newTaskId}`), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Validate dependencies before proceeding
|
||||
const invalidDeps = dependencies.filter((depId) => {
|
||||
// Ensure depId is parsed as a number for comparison
|
||||
const numDepId = parseInt(depId, 10);
|
||||
return isNaN(numDepId) || !data.tasks.some((t) => t.id === numDepId);
|
||||
});
|
||||
|
||||
if (invalidDeps.length > 0) {
|
||||
report(
|
||||
`The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`,
|
||||
'warn'
|
||||
);
|
||||
report('Removing invalid dependencies...', 'info');
|
||||
dependencies = dependencies.filter(
|
||||
(depId) => !invalidDeps.includes(depId)
|
||||
);
|
||||
}
|
||||
// Ensure dependencies are numbers
|
||||
const numericDependencies = dependencies.map((dep) => parseInt(dep, 10));
|
||||
|
||||
let taskData;
|
||||
|
||||
// Check if manual task data is provided
|
||||
if (manualTaskData) {
|
||||
report('Using manually provided task data', 'info');
|
||||
taskData = manualTaskData;
|
||||
report('DEBUG: Taking MANUAL task data path.', 'debug');
|
||||
|
||||
// Basic validation for manual data
|
||||
if (
|
||||
!taskData.title ||
|
||||
typeof taskData.title !== 'string' ||
|
||||
!taskData.description ||
|
||||
typeof taskData.description !== 'string'
|
||||
) {
|
||||
throw new Error(
|
||||
'Manual task data must include at least a title and description.'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
report('DEBUG: Taking AI task generation path.', 'debug');
|
||||
// --- Refactored AI Interaction ---
|
||||
report('Generating task data with AI...', 'info');
|
||||
|
||||
// Create context string for task creation prompt
|
||||
let contextTasks = '';
|
||||
if (numericDependencies.length > 0) {
|
||||
const dependentTasks = data.tasks.filter((t) =>
|
||||
numericDependencies.includes(t.id)
|
||||
);
|
||||
contextTasks = `\nThis task depends on the following tasks:\n${dependentTasks
|
||||
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
|
||||
.join('\n')}`;
|
||||
} else {
|
||||
const recentTasks = [...data.tasks]
|
||||
.sort((a, b) => b.id - a.id)
|
||||
.slice(0, 3);
|
||||
if (recentTasks.length > 0) {
|
||||
contextTasks = `\nRecent tasks in the project:\n${recentTasks
|
||||
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
|
||||
.join('\n')}`;
|
||||
}
|
||||
}
|
||||
|
||||
// System Prompt
|
||||
const systemPrompt =
|
||||
"You are a helpful assistant that creates well-structured tasks for a software development project. Generate a single new task based on the user's description, adhering strictly to the provided JSON schema.";
|
||||
|
||||
// Task Structure Description (for user prompt)
|
||||
const taskStructureDesc = `
|
||||
{
|
||||
"title": "Task title goes here",
|
||||
"description": "A concise one or two sentence description of what the task involves",
|
||||
"details": "In-depth implementation details, considerations, and guidance.",
|
||||
"testStrategy": "Detailed approach for verifying task completion."
|
||||
}`;
|
||||
|
||||
// Add any manually provided details to the prompt for context
|
||||
let contextFromArgs = '';
|
||||
if (manualTaskData?.title)
|
||||
contextFromArgs += `\n- Suggested Title: "${manualTaskData.title}"`;
|
||||
if (manualTaskData?.description)
|
||||
contextFromArgs += `\n- Suggested Description: "${manualTaskData.description}"`;
|
||||
if (manualTaskData?.details)
|
||||
contextFromArgs += `\n- Additional Details Context: "${manualTaskData.details}"`;
|
||||
if (manualTaskData?.testStrategy)
|
||||
contextFromArgs += `\n- Additional Test Strategy Context: "${manualTaskData.testStrategy}"`;
|
||||
|
||||
// User Prompt
|
||||
const userPrompt = `Create a comprehensive new task (Task #${newTaskId}) for a software development project based on this description: "${prompt}"
|
||||
|
||||
${contextTasks}
|
||||
${contextFromArgs ? `\nConsider these additional details provided by the user:${contextFromArgs}` : ''}
|
||||
|
||||
Return your answer as a single JSON object matching the schema precisely:
|
||||
${taskStructureDesc}
|
||||
|
||||
Make sure the details and test strategy are thorough and specific.`;
|
||||
|
||||
// Start the loading indicator - only for text mode
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI...`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Determine the service role based on the useResearch flag
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
|
||||
report('DEBUG: Calling generateObjectService...', 'debug');
|
||||
// Call the unified AI service
|
||||
const aiGeneratedTaskData = await generateObjectService({
|
||||
role: serviceRole, // <-- Use the determined role
|
||||
session: session, // Pass session for API key resolution
|
||||
schema: AiTaskDataSchema, // Pass the Zod schema
|
||||
objectName: 'newTaskData', // Name for the object
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
reportProgress // Pass progress reporter if available
|
||||
});
|
||||
report('DEBUG: generateObjectService returned successfully.', 'debug');
|
||||
|
||||
report('Successfully generated task data from AI.', 'success');
|
||||
taskData = aiGeneratedTaskData; // Assign the validated object
|
||||
} catch (error) {
|
||||
report(
|
||||
`DEBUG: generateObjectService caught error: ${error.message}`,
|
||||
'debug'
|
||||
);
|
||||
report(`Error generating task with AI: ${error.message}`, 'error');
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
throw error; // Re-throw error after logging
|
||||
} finally {
|
||||
report('DEBUG: generateObjectService finally block reached.', 'debug');
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
|
||||
}
|
||||
// --- End Refactored AI Interaction ---
|
||||
}
|
||||
|
||||
// Create the new task object
|
||||
const newTask = {
|
||||
id: newTaskId,
|
||||
title: taskData.title,
|
||||
description: taskData.description,
|
||||
details: taskData.details || '',
|
||||
testStrategy: taskData.testStrategy || '',
|
||||
status: 'pending',
|
||||
dependencies: numericDependencies, // Use validated numeric dependencies
|
||||
priority: priority,
|
||||
subtasks: [] // Initialize with empty subtasks array
|
||||
};
|
||||
|
||||
// Add the task to the tasks array
|
||||
data.tasks.push(newTask);
|
||||
|
||||
report('DEBUG: Writing tasks.json...', 'debug');
|
||||
// Write the updated tasks to the file
|
||||
writeJSON(tasksPath, data);
|
||||
report('DEBUG: tasks.json written.', 'debug');
|
||||
|
||||
// Generate markdown task files
|
||||
report('Generating task files...', 'info');
|
||||
report('DEBUG: Calling generateTaskFiles...', 'debug');
|
||||
// Pass mcpLog if available to generateTaskFiles
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
|
||||
report('DEBUG: generateTaskFiles finished.', 'debug');
|
||||
|
||||
// Show success message - only for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
const table = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('ID'),
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Description')
|
||||
],
|
||||
colWidths: [5, 30, 50] // Adjust widths as needed
|
||||
});
|
||||
|
||||
table.push([
|
||||
newTask.id,
|
||||
truncate(newTask.title, 27),
|
||||
truncate(newTask.description, 47)
|
||||
]);
|
||||
|
||||
console.log(chalk.green('✅ New task created successfully:'));
|
||||
console.log(table.toString());
|
||||
|
||||
// Helper to get priority color
|
||||
const getPriorityColor = (p) => {
|
||||
switch (p?.toLowerCase()) {
|
||||
case 'high':
|
||||
return 'red';
|
||||
case 'low':
|
||||
return 'gray';
|
||||
case 'medium':
|
||||
default:
|
||||
return 'yellow';
|
||||
}
|
||||
};
|
||||
|
||||
// Show success message box
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold(`Task ${newTaskId} Created Successfully`) +
|
||||
'\n\n' +
|
||||
chalk.white(`Title: ${newTask.title}`) +
|
||||
'\n' +
|
||||
chalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +
|
||||
'\n' +
|
||||
chalk.white(
|
||||
`Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}`
|
||||
) +
|
||||
'\n' +
|
||||
(numericDependencies.length > 0
|
||||
? chalk.white(`Dependencies: ${numericDependencies.join(', ')}`) +
|
||||
'\n'
|
||||
: '') +
|
||||
'\n' +
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n' +
|
||||
chalk.cyan(
|
||||
`1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details`
|
||||
) +
|
||||
'\n' +
|
||||
chalk.cyan(
|
||||
`2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it`
|
||||
) +
|
||||
'\n' +
|
||||
chalk.cyan(
|
||||
`3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks`
|
||||
),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Return the new task ID
|
||||
report(`DEBUG: Returning new task ID: ${newTaskId}`, 'debug');
|
||||
return newTaskId;
|
||||
} catch (error) {
|
||||
// Stop any loading indicator on error
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
report(`Error adding task: ${error.message}`, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
}
|
||||
// In MCP mode, we let the direct function handler catch and format
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export default addTask;
|
||||
484
scripts/modules/task-manager/analyze-task-complexity.js
Normal file
484
scripts/modules/task-manager/analyze-task-complexity.js
Normal file
@@ -0,0 +1,484 @@
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import readline from 'readline';
|
||||
|
||||
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
|
||||
|
||||
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
|
||||
import { getDebugFlag, getProjectName } from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Generates the prompt for complexity analysis.
|
||||
* (Moved from ai-services.js and simplified)
|
||||
* @param {Object} tasksData - The tasks data object.
|
||||
* @returns {string} The generated prompt.
|
||||
*/
|
||||
function generateInternalComplexityAnalysisPrompt(tasksData) {
|
||||
const tasksString = JSON.stringify(tasksData.tasks, null, 2);
|
||||
return `Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.
|
||||
|
||||
Tasks:
|
||||
${tasksString}
|
||||
|
||||
Respond ONLY with a valid JSON array matching the schema:
|
||||
[
|
||||
{
|
||||
"taskId": <number>,
|
||||
"taskTitle": "<string>",
|
||||
"complexityScore": <number 1-10>,
|
||||
"recommendedSubtasks": <number>,
|
||||
"expansionPrompt": "<string>",
|
||||
"reasoning": "<string>"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
Do not include any explanatory text, markdown formatting, or code block markers before or after the JSON array.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyzes task complexity and generates expansion recommendations
|
||||
* @param {Object} options Command options
|
||||
* @param {string} options.file - Path to tasks file
|
||||
* @param {string} options.output - Path to report output file
|
||||
* @param {string|number} [options.threshold] - Complexity threshold
|
||||
* @param {boolean} [options.research] - Use research role
|
||||
* @param {Object} [options._filteredTasksData] - Pre-filtered task data (internal use)
|
||||
* @param {number} [options._originalTaskCount] - Original task count (internal use)
|
||||
* @param {Object} context - Context object, potentially containing session and mcpLog
|
||||
* @param {Object} [context.session] - Session object from MCP server (optional)
|
||||
* @param {Object} [context.mcpLog] - MCP logger object (optional)
|
||||
* @param {function} [context.reportProgress] - Deprecated: Function to report progress (ignored)
|
||||
*/
|
||||
async function analyzeTaskComplexity(options, context = {}) {
|
||||
const { session, mcpLog } = context;
|
||||
const tasksPath = options.file || 'tasks/tasks.json';
|
||||
const outputPath = options.output || 'scripts/task-complexity-report.json';
|
||||
const thresholdScore = parseFloat(options.threshold || '5');
|
||||
const useResearch = options.research || false;
|
||||
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
|
||||
const reportLog = (message, level = 'info') => {
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Analyzing task complexity and generating expansion recommendations...`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
reportLog(`Reading tasks from ${tasksPath}...`, 'info');
|
||||
let tasksData;
|
||||
let originalTaskCount = 0;
|
||||
|
||||
if (options._filteredTasksData) {
|
||||
tasksData = options._filteredTasksData;
|
||||
originalTaskCount = options._originalTaskCount || tasksData.tasks.length;
|
||||
if (!options._originalTaskCount) {
|
||||
try {
|
||||
const originalData = readJSON(tasksPath);
|
||||
if (originalData && originalData.tasks) {
|
||||
originalTaskCount = originalData.tasks.length;
|
||||
}
|
||||
} catch (e) {
|
||||
log('warn', `Could not read original tasks file: ${e.message}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tasksData = readJSON(tasksPath);
|
||||
if (
|
||||
!tasksData ||
|
||||
!tasksData.tasks ||
|
||||
!Array.isArray(tasksData.tasks) ||
|
||||
tasksData.tasks.length === 0
|
||||
) {
|
||||
throw new Error('No tasks found in the tasks file');
|
||||
}
|
||||
originalTaskCount = tasksData.tasks.length;
|
||||
const activeStatuses = ['pending', 'blocked', 'in-progress'];
|
||||
const filteredTasks = tasksData.tasks.filter((task) =>
|
||||
activeStatuses.includes(task.status?.toLowerCase() || 'pending')
|
||||
);
|
||||
tasksData = {
|
||||
...tasksData,
|
||||
tasks: filteredTasks,
|
||||
_originalTaskCount: originalTaskCount
|
||||
};
|
||||
}
|
||||
|
||||
const skippedCount = originalTaskCount - tasksData.tasks.length;
|
||||
reportLog(
|
||||
`Found ${originalTaskCount} total tasks in the task file.`,
|
||||
'info'
|
||||
);
|
||||
if (skippedCount > 0) {
|
||||
const skipMessage = `Skipping ${skippedCount} tasks marked as done/cancelled/deferred. Analyzing ${tasksData.tasks.length} active tasks.`;
|
||||
reportLog(skipMessage, 'info');
|
||||
if (outputFormat === 'text') {
|
||||
console.log(chalk.yellow(skipMessage));
|
||||
}
|
||||
}
|
||||
|
||||
if (tasksData.tasks.length === 0) {
|
||||
const emptyReport = {
|
||||
meta: {
|
||||
generatedAt: new Date().toISOString(),
|
||||
tasksAnalyzed: 0,
|
||||
thresholdScore: thresholdScore,
|
||||
projectName: getProjectName(session),
|
||||
usedResearch: useResearch
|
||||
},
|
||||
complexityAnalysis: []
|
||||
};
|
||||
reportLog(`Writing empty complexity report to ${outputPath}...`, 'info');
|
||||
writeJSON(outputPath, emptyReport);
|
||||
reportLog(
|
||||
`Task complexity analysis complete. Report written to ${outputPath}`,
|
||||
'success'
|
||||
);
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
chalk.green(
|
||||
`Task complexity analysis complete. Report written to ${outputPath}`
|
||||
)
|
||||
);
|
||||
const highComplexity = 0;
|
||||
const mediumComplexity = 0;
|
||||
const lowComplexity = 0;
|
||||
const totalAnalyzed = 0;
|
||||
|
||||
console.log('\nComplexity Analysis Summary:');
|
||||
console.log('----------------------------');
|
||||
console.log(`Tasks in input file: ${originalTaskCount}`);
|
||||
console.log(`Tasks successfully analyzed: ${totalAnalyzed}`);
|
||||
console.log(`High complexity tasks: ${highComplexity}`);
|
||||
console.log(`Medium complexity tasks: ${mediumComplexity}`);
|
||||
console.log(`Low complexity tasks: ${lowComplexity}`);
|
||||
console.log(
|
||||
`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`
|
||||
);
|
||||
console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);
|
||||
console.log(
|
||||
`\nSee ${outputPath} for the full report and expansion commands.`
|
||||
);
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Suggested Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\n` +
|
||||
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
return emptyReport;
|
||||
}
|
||||
|
||||
const prompt = generateInternalComplexityAnalysisPrompt(tasksData);
|
||||
// System prompt remains simple for text generation
|
||||
const systemPrompt =
|
||||
'You are an expert software architect and project manager analyzing task complexity. Respond only with the requested valid JSON array.';
|
||||
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator('Calling AI service...');
|
||||
}
|
||||
|
||||
let fullResponse = ''; // To store the raw text response
|
||||
|
||||
try {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
reportLog(`Using AI service with role: ${role}`, 'info');
|
||||
|
||||
// *** CHANGED: Use generateTextService ***
|
||||
fullResponse = await generateTextService({
|
||||
prompt,
|
||||
systemPrompt,
|
||||
role,
|
||||
session
|
||||
// No schema or objectName needed
|
||||
});
|
||||
// *** End Service Call Change ***
|
||||
|
||||
reportLog(
|
||||
'Successfully received text response via AI service',
|
||||
'success'
|
||||
);
|
||||
|
||||
// --- Stop Loading Indicator (Unchanged) ---
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
if (outputFormat === 'text') {
|
||||
readline.clearLine(process.stdout, 0);
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
console.log(
|
||||
chalk.green('AI service call complete. Parsing response...')
|
||||
);
|
||||
}
|
||||
// --- End Stop Loading Indicator ---
|
||||
|
||||
// --- Re-introduce Manual JSON Parsing & Cleanup ---
|
||||
reportLog(`Parsing complexity analysis from text response...`, 'info');
|
||||
let complexityAnalysis;
|
||||
try {
|
||||
let cleanedResponse = fullResponse;
|
||||
// Basic trim first
|
||||
cleanedResponse = cleanedResponse.trim();
|
||||
|
||||
// Remove potential markdown code block fences
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim(); // Trim content inside block
|
||||
reportLog('Extracted JSON from code block', 'info');
|
||||
} else {
|
||||
// If no code block, ensure it starts with '[' and ends with ']'
|
||||
// This is less robust but a common fallback
|
||||
const firstBracket = cleanedResponse.indexOf('[');
|
||||
const lastBracket = cleanedResponse.lastIndexOf(']');
|
||||
if (firstBracket !== -1 && lastBracket > firstBracket) {
|
||||
cleanedResponse = cleanedResponse.substring(
|
||||
firstBracket,
|
||||
lastBracket + 1
|
||||
);
|
||||
reportLog('Extracted content between first [ and last ]', 'info');
|
||||
} else {
|
||||
reportLog(
|
||||
'Warning: Response does not appear to be a JSON array.',
|
||||
'warn'
|
||||
);
|
||||
// Keep going, maybe JSON.parse can handle it or will fail informatively
|
||||
}
|
||||
}
|
||||
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(chalk.gray('Attempting to parse cleaned JSON...'));
|
||||
console.log(chalk.gray('Cleaned response (first 100 chars):'));
|
||||
console.log(chalk.gray(cleanedResponse.substring(0, 100)));
|
||||
console.log(chalk.gray('Last 100 chars:'));
|
||||
console.log(
|
||||
chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
complexityAnalysis = JSON.parse(cleanedResponse);
|
||||
} catch (jsonError) {
|
||||
reportLog(
|
||||
'Initial JSON parsing failed. Raw response might be malformed.',
|
||||
'error'
|
||||
);
|
||||
reportLog(`Original JSON Error: ${jsonError.message}`, 'error');
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(chalk.red('--- Start Raw Malformed Response ---'));
|
||||
console.log(chalk.gray(fullResponse));
|
||||
console.log(chalk.red('--- End Raw Malformed Response ---'));
|
||||
}
|
||||
// Re-throw the specific JSON parsing error
|
||||
throw new Error(
|
||||
`Failed to parse JSON response: ${jsonError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure it's an array after parsing
|
||||
if (!Array.isArray(complexityAnalysis)) {
|
||||
throw new Error('Parsed response is not a valid JSON array.');
|
||||
}
|
||||
} catch (error) {
|
||||
// Catch errors specifically from the parsing/cleanup block
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
|
||||
reportLog(
|
||||
`Error parsing complexity analysis JSON: ${error.message}`,
|
||||
'error'
|
||||
);
|
||||
if (outputFormat === 'text') {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error parsing complexity analysis JSON: ${error.message}`
|
||||
)
|
||||
);
|
||||
}
|
||||
throw error; // Re-throw parsing error
|
||||
}
|
||||
// --- End Manual JSON Parsing & Cleanup ---
|
||||
|
||||
// --- Post-processing (Missing Task Check) - (Unchanged) ---
|
||||
const taskIds = tasksData.tasks.map((t) => t.id);
|
||||
const analysisTaskIds = complexityAnalysis.map((a) => a.taskId);
|
||||
const missingTaskIds = taskIds.filter(
|
||||
(id) => !analysisTaskIds.includes(id)
|
||||
);
|
||||
|
||||
if (missingTaskIds.length > 0) {
|
||||
reportLog(
|
||||
`Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`,
|
||||
'warn'
|
||||
);
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`
|
||||
)
|
||||
);
|
||||
}
|
||||
for (const missingId of missingTaskIds) {
|
||||
const missingTask = tasksData.tasks.find((t) => t.id === missingId);
|
||||
if (missingTask) {
|
||||
reportLog(`Adding default analysis for task ${missingId}`, 'info');
|
||||
complexityAnalysis.push({
|
||||
taskId: missingId,
|
||||
taskTitle: missingTask.title,
|
||||
complexityScore: 5,
|
||||
recommendedSubtasks: 3,
|
||||
expansionPrompt: `Break down this task with a focus on ${missingTask.title.toLowerCase()}.`,
|
||||
reasoning:
|
||||
'Automatically added due to missing analysis in AI response.'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
// --- End Post-processing ---
|
||||
|
||||
// --- Report Creation & Writing (Unchanged) ---
|
||||
const finalReport = {
|
||||
meta: {
|
||||
generatedAt: new Date().toISOString(),
|
||||
tasksAnalyzed: tasksData.tasks.length,
|
||||
thresholdScore: thresholdScore,
|
||||
projectName: getProjectName(session),
|
||||
usedResearch: useResearch
|
||||
},
|
||||
complexityAnalysis: complexityAnalysis
|
||||
};
|
||||
reportLog(`Writing complexity report to ${outputPath}...`, 'info');
|
||||
writeJSON(outputPath, finalReport);
|
||||
|
||||
reportLog(
|
||||
`Task complexity analysis complete. Report written to ${outputPath}`,
|
||||
'success'
|
||||
);
|
||||
// --- End Report Creation & Writing ---
|
||||
|
||||
// --- Display CLI Summary (Unchanged) ---
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
chalk.green(
|
||||
`Task complexity analysis complete. Report written to ${outputPath}`
|
||||
)
|
||||
);
|
||||
const highComplexity = complexityAnalysis.filter(
|
||||
(t) => t.complexityScore >= 8
|
||||
).length;
|
||||
const mediumComplexity = complexityAnalysis.filter(
|
||||
(t) => t.complexityScore >= 5 && t.complexityScore < 8
|
||||
).length;
|
||||
const lowComplexity = complexityAnalysis.filter(
|
||||
(t) => t.complexityScore < 5
|
||||
).length;
|
||||
const totalAnalyzed = complexityAnalysis.length;
|
||||
|
||||
console.log('\nComplexity Analysis Summary:');
|
||||
console.log('----------------------------');
|
||||
console.log(
|
||||
`Active tasks sent for analysis: ${tasksData.tasks.length}`
|
||||
);
|
||||
console.log(`Tasks successfully analyzed: ${totalAnalyzed}`);
|
||||
console.log(`High complexity tasks: ${highComplexity}`);
|
||||
console.log(`Medium complexity tasks: ${mediumComplexity}`);
|
||||
console.log(`Low complexity tasks: ${lowComplexity}`);
|
||||
console.log(
|
||||
`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`
|
||||
);
|
||||
console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);
|
||||
console.log(
|
||||
`\nSee ${outputPath} for the full report and expansion commands.`
|
||||
);
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Suggested Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\n` +
|
||||
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
console.debug(
|
||||
chalk.gray(
|
||||
`Final analysis object: ${JSON.stringify(finalReport, null, 2)}`
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
// --- End Display CLI Summary ---
|
||||
|
||||
return finalReport;
|
||||
} catch (error) {
|
||||
// Catches errors from generateTextService call
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
reportLog(`Error during AI service call: ${error.message}`, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(
|
||||
chalk.red(`Error during AI service call: ${error.message}`)
|
||||
);
|
||||
if (error.message.includes('API key')) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nPlease ensure your API keys are correctly configured in .env or ~/.taskmaster/.env'
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.yellow("Run 'task-master models --setup' if needed.")
|
||||
);
|
||||
}
|
||||
}
|
||||
throw error; // Re-throw AI service error
|
||||
}
|
||||
} catch (error) {
|
||||
// Catches general errors (file read, etc.)
|
||||
reportLog(`Error analyzing task complexity: ${error.message}`, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(
|
||||
chalk.red(`Error analyzing task complexity: ${error.message}`)
|
||||
);
|
||||
if (getDebugFlag(session)) {
|
||||
console.error(error);
|
||||
}
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default analyzeTaskComplexity;
|
||||
144
scripts/modules/task-manager/clear-subtasks.js
Normal file
144
scripts/modules/task-manager/clear-subtasks.js
Normal file
@@ -0,0 +1,144 @@
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||
import { displayBanner } from '../ui.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
* Clear subtasks from specified tasks
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} taskIds - Task IDs to clear subtasks from
|
||||
*/
|
||||
function clearSubtasks(tasksPath, taskIds) {
|
||||
displayBanner();
|
||||
|
||||
log('info', `Reading tasks from ${tasksPath}...`);
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
log('error', 'No valid tasks found.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold('Clearing Subtasks'), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
})
|
||||
);
|
||||
|
||||
// Handle multiple task IDs (comma-separated)
|
||||
const taskIdArray = taskIds.split(',').map((id) => id.trim());
|
||||
let clearedCount = 0;
|
||||
|
||||
// Create a summary table for the cleared subtasks
|
||||
const summaryTable = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('Task ID'),
|
||||
chalk.cyan.bold('Task Title'),
|
||||
chalk.cyan.bold('Subtasks Cleared')
|
||||
],
|
||||
colWidths: [10, 50, 20],
|
||||
style: { head: [], border: [] }
|
||||
});
|
||||
|
||||
taskIdArray.forEach((taskId) => {
|
||||
const id = parseInt(taskId, 10);
|
||||
if (isNaN(id)) {
|
||||
log('error', `Invalid task ID: ${taskId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const task = data.tasks.find((t) => t.id === id);
|
||||
if (!task) {
|
||||
log('error', `Task ${id} not found`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!task.subtasks || task.subtasks.length === 0) {
|
||||
log('info', `Task ${id} has no subtasks to clear`);
|
||||
summaryTable.push([
|
||||
id.toString(),
|
||||
truncate(task.title, 47),
|
||||
chalk.yellow('No subtasks')
|
||||
]);
|
||||
return;
|
||||
}
|
||||
|
||||
const subtaskCount = task.subtasks.length;
|
||||
task.subtasks = [];
|
||||
clearedCount++;
|
||||
log('info', `Cleared ${subtaskCount} subtasks from task ${id}`);
|
||||
|
||||
summaryTable.push([
|
||||
id.toString(),
|
||||
truncate(task.title, 47),
|
||||
chalk.green(`${subtaskCount} subtasks cleared`)
|
||||
]);
|
||||
});
|
||||
|
||||
if (clearedCount > 0) {
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Show summary table
|
||||
console.log(
|
||||
boxen(chalk.white.bold('Subtask Clearing Summary:'), {
|
||||
padding: { left: 2, right: 2, top: 0, bottom: 0 },
|
||||
margin: { top: 1, bottom: 0 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round'
|
||||
})
|
||||
);
|
||||
console.log(summaryTable.toString());
|
||||
|
||||
// Regenerate task files to reflect changes
|
||||
log('info', 'Regenerating task files...');
|
||||
generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// Success message
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully cleared subtasks from ${chalk.bold(clearedCount)} task(s)`
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// Next steps suggestion
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master expand --id=<id>')} to generate new subtasks\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master list --with-subtasks')} to verify changes`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
boxen(chalk.yellow('No subtasks were cleared'), {
|
||||
padding: 1,
|
||||
borderColor: 'yellow',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default clearSubtasks;
|
||||
177
scripts/modules/task-manager/expand-all-tasks.js
Normal file
177
scripts/modules/task-manager/expand-all-tasks.js
Normal file
@@ -0,0 +1,177 @@
|
||||
import { log, readJSON, isSilentMode } from '../utils.js';
|
||||
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||
import expandTask from './expand-task.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Expand all eligible pending or in-progress tasks using the expandTask function.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} [numSubtasks] - Optional: Target number of subtasks per task.
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {string} [additionalContext=''] - Optional additional context.
|
||||
* @param {boolean} [force=false] - Force expansion even if tasks already have subtasks.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json'). MCP calls should use 'json'.
|
||||
* @returns {Promise<{success: boolean, expandedCount: number, failedCount: number, skippedCount: number, tasksToExpand: number, message?: string}>} - Result summary.
|
||||
*/
|
||||
async function expandAllTasks(
|
||||
tasksPath,
|
||||
numSubtasks, // Keep this signature, expandTask handles defaults
|
||||
useResearch = false,
|
||||
additionalContext = '',
|
||||
force = false, // Keep force here for the filter logic
|
||||
context = {},
|
||||
outputFormat = 'text' // Assume text default for CLI
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const isMCPCall = !!mcpLog; // Determine if called from MCP
|
||||
|
||||
// Use mcpLog if available, otherwise use the default console log wrapper respecting silent mode
|
||||
const logger =
|
||||
mcpLog ||
|
||||
(outputFormat === 'json'
|
||||
? {
|
||||
// Basic logger for JSON output mode
|
||||
info: (msg) => {},
|
||||
warn: (msg) => {},
|
||||
error: (msg) => console.error(`ERROR: ${msg}`), // Still log errors
|
||||
debug: (msg) => {}
|
||||
}
|
||||
: {
|
||||
// CLI logger respecting silent mode
|
||||
info: (msg) => !isSilentMode() && log('info', msg),
|
||||
warn: (msg) => !isSilentMode() && log('warn', msg),
|
||||
error: (msg) => !isSilentMode() && log('error', msg),
|
||||
debug: (msg) =>
|
||||
!isSilentMode() && getDebugFlag(session) && log('debug', msg)
|
||||
});
|
||||
|
||||
let loadingIndicator = null;
|
||||
let expandedCount = 0;
|
||||
let failedCount = 0;
|
||||
// No skipped count needed now as the filter handles it upfront
|
||||
let tasksToExpandCount = 0; // Renamed for clarity
|
||||
|
||||
if (!isMCPCall && outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Analyzing tasks for expansion...'
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info(`Reading tasks from ${tasksPath}`);
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid tasks data in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// --- Restore Original Filtering Logic ---
|
||||
const tasksToExpand = data.tasks.filter(
|
||||
(task) =>
|
||||
(task.status === 'pending' || task.status === 'in-progress') && // Include 'in-progress'
|
||||
(!task.subtasks || task.subtasks.length === 0 || force) // Check subtasks/force here
|
||||
);
|
||||
tasksToExpandCount = tasksToExpand.length; // Get the count from the filtered array
|
||||
logger.info(`Found ${tasksToExpandCount} tasks eligible for expansion.`);
|
||||
// --- End Restored Filtering Logic ---
|
||||
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator, 'Analysis complete.');
|
||||
}
|
||||
|
||||
if (tasksToExpandCount === 0) {
|
||||
logger.info('No tasks eligible for expansion.');
|
||||
// --- Fix: Restore success: true and add message ---
|
||||
return {
|
||||
success: true, // Indicate overall success despite no action
|
||||
expandedCount: 0,
|
||||
failedCount: 0,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: 0,
|
||||
message: 'No tasks eligible for expansion.'
|
||||
};
|
||||
// --- End Fix ---
|
||||
}
|
||||
|
||||
// Iterate over the already filtered tasks
|
||||
for (const task of tasksToExpand) {
|
||||
// --- Remove Redundant Check ---
|
||||
// The check below is no longer needed as the initial filter handles it
|
||||
/*
|
||||
if (task.subtasks && task.subtasks.length > 0 && !force) {
|
||||
logger.info(
|
||||
`Skipping task ${task.id}: Already has subtasks. Use --force to overwrite.`
|
||||
);
|
||||
skippedCount++;
|
||||
continue;
|
||||
}
|
||||
*/
|
||||
// --- End Removed Redundant Check ---
|
||||
|
||||
// Start indicator for individual task expansion in CLI mode
|
||||
let taskIndicator = null;
|
||||
if (!isMCPCall && outputFormat === 'text') {
|
||||
taskIndicator = startLoadingIndicator(`Expanding task ${task.id}...`);
|
||||
}
|
||||
|
||||
try {
|
||||
// Call the refactored expandTask function
|
||||
await expandTask(
|
||||
tasksPath,
|
||||
task.id,
|
||||
numSubtasks, // Pass numSubtasks, expandTask handles defaults/complexity
|
||||
useResearch,
|
||||
additionalContext,
|
||||
context, // Pass the whole context object { session, mcpLog }
|
||||
force // Pass the force flag down
|
||||
);
|
||||
expandedCount++;
|
||||
if (taskIndicator) {
|
||||
stopLoadingIndicator(taskIndicator, `Task ${task.id} expanded.`);
|
||||
}
|
||||
logger.info(`Successfully expanded task ${task.id}.`);
|
||||
} catch (error) {
|
||||
failedCount++;
|
||||
if (taskIndicator) {
|
||||
stopLoadingIndicator(
|
||||
taskIndicator,
|
||||
`Failed to expand task ${task.id}.`,
|
||||
false
|
||||
);
|
||||
}
|
||||
logger.error(`Failed to expand task ${task.id}: ${error.message}`);
|
||||
// Continue to the next task
|
||||
}
|
||||
}
|
||||
|
||||
// Log final summary (removed skipped count from message)
|
||||
logger.info(
|
||||
`Expansion complete: ${expandedCount} expanded, ${failedCount} failed.`
|
||||
);
|
||||
|
||||
// Return summary (skippedCount is now 0) - Add success: true here as well for consistency
|
||||
return {
|
||||
success: true, // Indicate overall success
|
||||
expandedCount,
|
||||
failedCount,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: tasksToExpandCount
|
||||
};
|
||||
} catch (error) {
|
||||
if (loadingIndicator)
|
||||
stopLoadingIndicator(loadingIndicator, 'Error.', false);
|
||||
logger.error(`Error during expand all operation: ${error.message}`);
|
||||
if (!isMCPCall && getDebugFlag(session)) {
|
||||
console.error(error); // Log full stack in debug CLI mode
|
||||
}
|
||||
// Re-throw error for the caller to handle, the direct function will format it
|
||||
throw error; // Let direct function wrapper handle formatting
|
||||
/* Original re-throw:
|
||||
throw new Error(`Failed to expand all tasks: ${error.message}`);
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
export default expandAllTasks;
|
||||
570
scripts/modules/task-manager/expand-task.js
Normal file
570
scripts/modules/task-manager/expand-task.js
Normal file
@@ -0,0 +1,570 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { z } from 'zod';
|
||||
|
||||
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
|
||||
|
||||
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
|
||||
import { getDefaultSubtasks, getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
// --- Zod Schemas (Keep from previous step) ---
|
||||
const subtaskSchema = z
|
||||
.object({
|
||||
id: z
|
||||
.number()
|
||||
.int()
|
||||
.positive()
|
||||
.describe('Sequential subtask ID starting from 1'),
|
||||
title: z.string().min(5).describe('Clear, specific title for the subtask'),
|
||||
description: z
|
||||
.string()
|
||||
.min(10)
|
||||
.describe('Detailed description of the subtask'),
|
||||
dependencies: z
|
||||
.array(z.number().int())
|
||||
.describe('IDs of prerequisite subtasks within this expansion'),
|
||||
details: z.string().min(20).describe('Implementation details and guidance'),
|
||||
status: z
|
||||
.string()
|
||||
.describe(
|
||||
'The current status of the subtask (should be pending initially)'
|
||||
),
|
||||
testStrategy: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Approach for testing this subtask')
|
||||
})
|
||||
.strict();
|
||||
const subtaskArraySchema = z.array(subtaskSchema);
|
||||
const subtaskWrapperSchema = z.object({
|
||||
subtasks: subtaskArraySchema.describe('The array of generated subtasks.')
|
||||
});
|
||||
// --- End Zod Schemas ---
|
||||
|
||||
/**
|
||||
* Generates the system prompt for the main AI role (e.g., Claude).
|
||||
* @param {number} subtaskCount - The target number of subtasks.
|
||||
* @returns {string} The system prompt.
|
||||
*/
|
||||
function generateMainSystemPrompt(subtaskCount) {
|
||||
return `You are an AI assistant helping with task breakdown for software development.
|
||||
You need to break down a high-level task into ${subtaskCount} specific subtasks that can be implemented one by one.
|
||||
|
||||
Subtasks should:
|
||||
1. Be specific and actionable implementation steps
|
||||
2. Follow a logical sequence
|
||||
3. Each handle a distinct part of the parent task
|
||||
4. Include clear guidance on implementation approach
|
||||
5. Have appropriate dependency chains between subtasks (using the new sequential IDs)
|
||||
6. Collectively cover all aspects of the parent task
|
||||
|
||||
For each subtask, provide:
|
||||
- id: Sequential integer starting from the provided nextSubtaskId
|
||||
- title: Clear, specific title
|
||||
- description: Detailed description
|
||||
- dependencies: Array of prerequisite subtask IDs (use the new sequential IDs)
|
||||
- details: Implementation details
|
||||
- testStrategy: Optional testing approach
|
||||
|
||||
|
||||
Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array matching the structure described. Do not include any explanatory text, markdown formatting, or code block markers.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the user prompt for the main AI role (e.g., Claude).
|
||||
* @param {Object} task - The parent task object.
|
||||
* @param {number} subtaskCount - The target number of subtasks.
|
||||
* @param {string} additionalContext - Optional additional context.
|
||||
* @param {number} nextSubtaskId - The starting ID for the new subtasks.
|
||||
* @returns {string} The user prompt.
|
||||
*/
|
||||
function generateMainUserPrompt(
|
||||
task,
|
||||
subtaskCount,
|
||||
additionalContext,
|
||||
nextSubtaskId
|
||||
) {
|
||||
const contextPrompt = additionalContext
|
||||
? `\n\nAdditional context: ${additionalContext}`
|
||||
: '';
|
||||
const schemaDescription = `
|
||||
{
|
||||
"subtasks": [
|
||||
{
|
||||
"id": ${nextSubtaskId}, // First subtask ID
|
||||
"title": "Specific subtask title",
|
||||
"description": "Detailed description",
|
||||
"dependencies": [], // e.g., [${nextSubtaskId + 1}] if it depends on the next
|
||||
"details": "Implementation guidance",
|
||||
"testStrategy": "Optional testing approach"
|
||||
},
|
||||
// ... (repeat for a total of ${subtaskCount} subtasks with sequential IDs)
|
||||
]
|
||||
}`;
|
||||
|
||||
return `Break down this task into exactly ${subtaskCount} specific subtasks:
|
||||
|
||||
Task ID: ${task.id}
|
||||
Title: ${task.title}
|
||||
Description: ${task.description}
|
||||
Current details: ${task.details || 'None'}
|
||||
${contextPrompt}
|
||||
|
||||
Return ONLY the JSON object containing the "subtasks" array, matching this structure:
|
||||
${schemaDescription}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the user prompt for the research AI role (e.g., Perplexity).
|
||||
* @param {Object} task - The parent task object.
|
||||
* @param {number} subtaskCount - The target number of subtasks.
|
||||
* @param {string} additionalContext - Optional additional context.
|
||||
* @param {number} nextSubtaskId - The starting ID for the new subtasks.
|
||||
* @returns {string} The user prompt.
|
||||
*/
|
||||
function generateResearchUserPrompt(
|
||||
task,
|
||||
subtaskCount,
|
||||
additionalContext,
|
||||
nextSubtaskId
|
||||
) {
|
||||
const contextPrompt = additionalContext
|
||||
? `\n\nConsider this context: ${additionalContext}`
|
||||
: '';
|
||||
const schemaDescription = `
|
||||
{
|
||||
"subtasks": [
|
||||
{
|
||||
"id": <number>, // Sequential ID starting from ${nextSubtaskId}
|
||||
"title": "<string>",
|
||||
"description": "<string>",
|
||||
"dependencies": [<number>], // e.g., [${nextSubtaskId + 1}]
|
||||
"details": "<string>",
|
||||
"testStrategy": "<string>" // Optional
|
||||
},
|
||||
// ... (repeat for ${subtaskCount} subtasks)
|
||||
]
|
||||
}`;
|
||||
|
||||
return `Analyze the following task and break it down into exactly ${subtaskCount} specific subtasks using your research capabilities. Assign sequential IDs starting from ${nextSubtaskId}.
|
||||
|
||||
Parent Task:
|
||||
ID: ${task.id}
|
||||
Title: ${task.title}
|
||||
Description: ${task.description}
|
||||
Current details: ${task.details || 'None'}
|
||||
${contextPrompt}
|
||||
|
||||
CRITICAL: Respond ONLY with a valid JSON object containing a single key "subtasks". The value must be an array of the generated subtasks, strictly matching this structure:
|
||||
${schemaDescription}
|
||||
|
||||
Do not include ANY explanatory text, markdown, or code block markers. Just the JSON object.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse subtasks from AI's text response. Includes basic cleanup.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} startId - Starting subtask ID expected.
|
||||
* @param {number} expectedCount - Expected number of subtasks.
|
||||
* @param {number} parentTaskId - Parent task ID for context.
|
||||
* @param {Object} logger - Logging object (mcpLog or console log).
|
||||
* @returns {Array} Parsed and potentially corrected subtasks array.
|
||||
* @throws {Error} If parsing fails or JSON is invalid/malformed.
|
||||
*/
|
||||
function parseSubtasksFromText(
|
||||
text,
|
||||
startId,
|
||||
expectedCount,
|
||||
parentTaskId,
|
||||
logger
|
||||
) {
|
||||
logger.info('Attempting to parse subtasks object from text response...');
|
||||
if (!text || text.trim() === '') {
|
||||
throw new Error('AI response text is empty.');
|
||||
}
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
|
||||
// 1. Extract from Markdown code block first
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
logger.info('Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// 2. If no code block, find first '{' and last '}' for the object
|
||||
const firstBrace = cleanedResponse.indexOf('{');
|
||||
const lastBrace = cleanedResponse.lastIndexOf('}');
|
||||
if (firstBrace !== -1 && lastBrace > firstBrace) {
|
||||
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
|
||||
logger.info('Extracted content between first { and last }.');
|
||||
} else {
|
||||
logger.warn(
|
||||
'Response does not appear to contain a JSON object structure. Parsing raw response.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Attempt to parse the object
|
||||
let parsedObject;
|
||||
try {
|
||||
parsedObject = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
logger.error(`Failed to parse JSON object: ${parseError.message}`);
|
||||
logger.error(
|
||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||
);
|
||||
logger.error(
|
||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
||||
);
|
||||
throw new Error(
|
||||
`Failed to parse JSON response object: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// 4. Validate the object structure and extract the subtasks array
|
||||
if (
|
||||
!parsedObject ||
|
||||
typeof parsedObject !== 'object' ||
|
||||
!Array.isArray(parsedObject.subtasks)
|
||||
) {
|
||||
logger.error(
|
||||
`Parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`
|
||||
);
|
||||
throw new Error(
|
||||
'Parsed AI response is not a valid object containing a "subtasks" array.'
|
||||
);
|
||||
}
|
||||
const parsedSubtasks = parsedObject.subtasks; // Extract the array
|
||||
|
||||
logger.info(
|
||||
`Successfully parsed ${parsedSubtasks.length} potential subtasks from the object.`
|
||||
);
|
||||
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
||||
logger.warn(
|
||||
`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`
|
||||
);
|
||||
}
|
||||
|
||||
// 5. Validate and Normalize each subtask using Zod schema
|
||||
let currentId = startId;
|
||||
const validatedSubtasks = [];
|
||||
const validationErrors = [];
|
||||
|
||||
for (const rawSubtask of parsedSubtasks) {
|
||||
const correctedSubtask = {
|
||||
...rawSubtask,
|
||||
id: currentId, // Enforce sequential ID
|
||||
dependencies: Array.isArray(rawSubtask.dependencies)
|
||||
? rawSubtask.dependencies
|
||||
.map((dep) => (typeof dep === 'string' ? parseInt(dep, 10) : dep))
|
||||
.filter(
|
||||
(depId) => !isNaN(depId) && depId >= startId && depId < currentId
|
||||
) // Ensure deps are numbers, valid range
|
||||
: [],
|
||||
status: 'pending' // Enforce pending status
|
||||
// parentTaskId can be added if needed: parentTaskId: parentTaskId
|
||||
};
|
||||
|
||||
const result = subtaskSchema.safeParse(correctedSubtask);
|
||||
|
||||
if (result.success) {
|
||||
validatedSubtasks.push(result.data); // Add the validated data
|
||||
} else {
|
||||
logger.warn(
|
||||
`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`
|
||||
);
|
||||
result.error.errors.forEach((err) => {
|
||||
const errorMessage = ` - Field '${err.path.join('.')}': ${err.message}`;
|
||||
logger.warn(errorMessage);
|
||||
validationErrors.push(`Subtask ${currentId}: ${errorMessage}`);
|
||||
});
|
||||
// Optionally, decide whether to include partially valid tasks or skip them
|
||||
// For now, we'll skip invalid ones
|
||||
}
|
||||
currentId++; // Increment ID for the next *potential* subtask
|
||||
}
|
||||
|
||||
if (validationErrors.length > 0) {
|
||||
logger.error(
|
||||
`Found ${validationErrors.length} validation errors in the generated subtasks.`
|
||||
);
|
||||
// Optionally throw an error here if strict validation is required
|
||||
// throw new Error(`Subtask validation failed:\n${validationErrors.join('\n')}`);
|
||||
logger.warn('Proceeding with only the successfully validated subtasks.');
|
||||
}
|
||||
|
||||
if (validatedSubtasks.length === 0 && parsedSubtasks.length > 0) {
|
||||
throw new Error(
|
||||
'AI response contained potential subtasks, but none passed validation.'
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure we don't return more than expected, preferring validated ones
|
||||
return validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expand a task into subtasks using the unified AI service (generateTextService).
|
||||
* Appends new subtasks by default. Replaces existing subtasks if force=true.
|
||||
* Integrates complexity report to determine subtask count and prompt if available,
|
||||
* unless numSubtasks is explicitly provided.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} taskId - Task ID to expand
|
||||
* @param {number | null | undefined} [numSubtasks] - Optional: Explicit target number of subtasks. If null/undefined, check complexity report or config default.
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {string} [additionalContext=''] - Optional additional context.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {boolean} [force=false] - If true, replace existing subtasks; otherwise, append.
|
||||
* @returns {Promise<Object>} The updated parent task object with new subtasks.
|
||||
* @throws {Error} If task not found, AI service fails, or parsing fails.
|
||||
*/
|
||||
async function expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
numSubtasks,
|
||||
useResearch = false,
|
||||
additionalContext = '',
|
||||
context = {},
|
||||
force = false
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
|
||||
// Use mcpLog if available, otherwise use the default console log wrapper
|
||||
const logger = mcpLog || {
|
||||
info: (msg) => !isSilentMode() && log('info', msg),
|
||||
warn: (msg) => !isSilentMode() && log('warn', msg),
|
||||
error: (msg) => !isSilentMode() && log('error', msg),
|
||||
debug: (msg) =>
|
||||
!isSilentMode() && getDebugFlag(session) && log('debug', msg) // Use getDebugFlag
|
||||
};
|
||||
|
||||
if (mcpLog) {
|
||||
logger.info(`expandTask called with context: session=${!!session}`);
|
||||
}
|
||||
|
||||
try {
|
||||
// --- Task Loading/Filtering (Unchanged) ---
|
||||
logger.info(`Reading tasks from ${tasksPath}`);
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks)
|
||||
throw new Error(`Invalid tasks data in ${tasksPath}`);
|
||||
const taskIndex = data.tasks.findIndex(
|
||||
(t) => t.id === parseInt(taskId, 10)
|
||||
);
|
||||
if (taskIndex === -1) throw new Error(`Task ${taskId} not found`);
|
||||
const task = data.tasks[taskIndex];
|
||||
logger.info(`Expanding task ${taskId}: ${task.title}`);
|
||||
// --- End Task Loading/Filtering ---
|
||||
|
||||
// --- Handle Force Flag: Clear existing subtasks if force=true ---
|
||||
if (force && Array.isArray(task.subtasks) && task.subtasks.length > 0) {
|
||||
logger.info(
|
||||
`Force flag set. Clearing existing ${task.subtasks.length} subtasks for task ${taskId}.`
|
||||
);
|
||||
task.subtasks = []; // Clear existing subtasks
|
||||
}
|
||||
// --- End Force Flag Handling ---
|
||||
|
||||
// --- Complexity Report Integration ---
|
||||
let finalSubtaskCount;
|
||||
let promptContent = '';
|
||||
let complexityReasoningContext = '';
|
||||
let systemPrompt; // Declare systemPrompt here
|
||||
|
||||
const projectRoot = path.dirname(path.dirname(tasksPath));
|
||||
const complexityReportPath = path.join(
|
||||
projectRoot,
|
||||
'scripts/task-complexity-report.json'
|
||||
);
|
||||
let taskAnalysis = null;
|
||||
|
||||
try {
|
||||
if (fs.existsSync(complexityReportPath)) {
|
||||
const complexityReport = readJSON(complexityReportPath);
|
||||
taskAnalysis = complexityReport?.complexityAnalysis?.find(
|
||||
(a) => a.taskId === task.id
|
||||
);
|
||||
if (taskAnalysis) {
|
||||
logger.info(
|
||||
`Found complexity analysis for task ${task.id}: Score ${taskAnalysis.complexityScore}`
|
||||
);
|
||||
if (taskAnalysis.reasoning) {
|
||||
complexityReasoningContext = `\nComplexity Analysis Reasoning: ${taskAnalysis.reasoning}`;
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`No complexity analysis found for task ${task.id} in report.`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`Complexity report not found at ${complexityReportPath}. Skipping complexity check.`
|
||||
);
|
||||
}
|
||||
} catch (reportError) {
|
||||
logger.warn(
|
||||
`Could not read or parse complexity report: ${reportError.message}. Proceeding without it.`
|
||||
);
|
||||
}
|
||||
|
||||
// Determine final subtask count
|
||||
const explicitNumSubtasks = parseInt(numSubtasks, 10);
|
||||
if (!isNaN(explicitNumSubtasks) && explicitNumSubtasks > 0) {
|
||||
finalSubtaskCount = explicitNumSubtasks;
|
||||
logger.info(
|
||||
`Using explicitly provided subtask count: ${finalSubtaskCount}`
|
||||
);
|
||||
} else if (taskAnalysis?.recommendedSubtasks) {
|
||||
finalSubtaskCount = parseInt(taskAnalysis.recommendedSubtasks, 10);
|
||||
logger.info(
|
||||
`Using subtask count from complexity report: ${finalSubtaskCount}`
|
||||
);
|
||||
} else {
|
||||
finalSubtaskCount = getDefaultSubtasks(session);
|
||||
logger.info(`Using default number of subtasks: ${finalSubtaskCount}`);
|
||||
}
|
||||
if (isNaN(finalSubtaskCount) || finalSubtaskCount <= 0) {
|
||||
logger.warn(
|
||||
`Invalid subtask count determined (${finalSubtaskCount}), defaulting to 3.`
|
||||
);
|
||||
finalSubtaskCount = 3;
|
||||
}
|
||||
|
||||
// Determine prompt content AND system prompt
|
||||
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||
|
||||
if (taskAnalysis?.expansionPrompt) {
|
||||
// Use prompt from complexity report
|
||||
promptContent = taskAnalysis.expansionPrompt;
|
||||
// Append additional context and reasoning
|
||||
promptContent += `\n\n${additionalContext}`.trim();
|
||||
promptContent += `${complexityReasoningContext}`.trim();
|
||||
|
||||
// --- Use Simplified System Prompt for Report Prompts ---
|
||||
systemPrompt = `You are an AI assistant helping with task breakdown. Generate exactly ${finalSubtaskCount} subtasks based on the provided prompt and context. Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array of the generated subtask objects. Each subtask object in the array must have keys: "id", "title", "description", "dependencies", "details", "status". Ensure the 'id' starts from ${nextSubtaskId} and is sequential. Ensure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from ${nextSubtaskId}). Ensure 'status' is 'pending'. Do not include any other text or explanation.`;
|
||||
logger.info(
|
||||
`Using expansion prompt from complexity report and simplified system prompt for task ${task.id}.`
|
||||
);
|
||||
// --- End Simplified System Prompt ---
|
||||
} else {
|
||||
// Use standard prompt generation
|
||||
const combinedAdditionalContext =
|
||||
`${additionalContext}${complexityReasoningContext}`.trim();
|
||||
if (useResearch) {
|
||||
promptContent = generateResearchUserPrompt(
|
||||
task,
|
||||
finalSubtaskCount,
|
||||
combinedAdditionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
// Use the specific research system prompt if needed, or a standard one
|
||||
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`; // Or keep generateResearchSystemPrompt if it exists
|
||||
} else {
|
||||
promptContent = generateMainUserPrompt(
|
||||
task,
|
||||
finalSubtaskCount,
|
||||
combinedAdditionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
// Use the original detailed system prompt for standard generation
|
||||
systemPrompt = generateMainSystemPrompt(finalSubtaskCount);
|
||||
}
|
||||
logger.info(`Using standard prompt generation for task ${task.id}.`);
|
||||
}
|
||||
// --- End Complexity Report / Prompt Logic ---
|
||||
|
||||
// --- AI Subtask Generation using generateTextService ---
|
||||
let generatedSubtasks = [];
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating ${finalSubtaskCount} subtasks...`
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
|
||||
try {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
logger.info(`Using AI service with role: ${role}`);
|
||||
|
||||
// Call generateTextService with the determined prompts
|
||||
responseText = await generateTextService({
|
||||
prompt: promptContent,
|
||||
systemPrompt: systemPrompt, // Use the determined system prompt
|
||||
role,
|
||||
session
|
||||
});
|
||||
logger.info(
|
||||
'Successfully received text response from AI service',
|
||||
'success'
|
||||
);
|
||||
|
||||
// Parse Subtasks
|
||||
generatedSubtasks = parseSubtasksFromText(
|
||||
responseText,
|
||||
nextSubtaskId,
|
||||
finalSubtaskCount,
|
||||
task.id,
|
||||
logger
|
||||
);
|
||||
logger.info(
|
||||
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
|
||||
);
|
||||
} catch (error) {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
logger.error(
|
||||
`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context
|
||||
'error'
|
||||
);
|
||||
// Log raw response in debug mode if parsing failed
|
||||
if (
|
||||
error.message.includes('Failed to parse valid subtasks') &&
|
||||
getDebugFlag(session)
|
||||
) {
|
||||
logger.error(`Raw AI Response that failed parsing:\n${responseText}`);
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
// --- Task Update & File Writing ---
|
||||
// Ensure task.subtasks is an array before appending
|
||||
if (!Array.isArray(task.subtasks)) {
|
||||
task.subtasks = [];
|
||||
}
|
||||
// Append the newly generated and validated subtasks
|
||||
task.subtasks.push(...generatedSubtasks);
|
||||
// --- End Change: Append instead of replace ---
|
||||
|
||||
data.tasks[taskIndex] = task; // Assign the modified task back
|
||||
logger.info(`Writing updated tasks to ${tasksPath}`);
|
||||
writeJSON(tasksPath, data);
|
||||
logger.info(`Generating individual task files...`);
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
logger.info(`Task files generated.`);
|
||||
// --- End Task Update & File Writing ---
|
||||
|
||||
return task; // Return the updated task object
|
||||
} catch (error) {
|
||||
// Catches errors from file reading, parsing, AI call etc.
|
||||
logger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.error(error); // Log full stack in debug CLI mode
|
||||
}
|
||||
throw error; // Re-throw for the caller
|
||||
}
|
||||
}
|
||||
|
||||
export default expandTask;
|
||||
122
scripts/modules/task-manager/find-next-task.js
Normal file
122
scripts/modules/task-manager/find-next-task.js
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* Return the next work item:
|
||||
* • Prefer an eligible SUBTASK that belongs to any parent task
|
||||
* whose own status is `in-progress`.
|
||||
* • If no such subtask exists, fall back to the best top-level task
|
||||
* (previous behaviour).
|
||||
*
|
||||
* The function still exports the same name (`findNextTask`) so callers
|
||||
* don't need to change. It now always returns an object with
|
||||
* ─ id → number (task) or "parentId.subId" (subtask)
|
||||
* ─ title → string
|
||||
* ─ status → string
|
||||
* ─ priority → string ("high" | "medium" | "low")
|
||||
* ─ dependencies → array (all IDs expressed in the same dotted form)
|
||||
* ─ parentId → number (present only when it's a subtask)
|
||||
*
|
||||
* @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]
|
||||
* @returns {Object|null} – next work item or null if nothing is eligible
|
||||
*/
|
||||
function findNextTask(tasks) {
|
||||
// ---------- helpers ----------------------------------------------------
|
||||
const priorityValues = { high: 3, medium: 2, low: 1 };
|
||||
|
||||
const toFullSubId = (parentId, maybeDotId) => {
|
||||
// "12.3" -> "12.3"
|
||||
// 4 -> "12.4" (numeric / short form)
|
||||
if (typeof maybeDotId === 'string' && maybeDotId.includes('.')) {
|
||||
return maybeDotId;
|
||||
}
|
||||
return `${parentId}.${maybeDotId}`;
|
||||
};
|
||||
|
||||
// ---------- build completed-ID set (tasks *and* subtasks) --------------
|
||||
const completedIds = new Set();
|
||||
tasks.forEach((t) => {
|
||||
if (t.status === 'done' || t.status === 'completed') {
|
||||
completedIds.add(String(t.id));
|
||||
}
|
||||
if (Array.isArray(t.subtasks)) {
|
||||
t.subtasks.forEach((st) => {
|
||||
if (st.status === 'done' || st.status === 'completed') {
|
||||
completedIds.add(`${t.id}.${st.id}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// ---------- 1) look for eligible subtasks ------------------------------
|
||||
const candidateSubtasks = [];
|
||||
|
||||
tasks
|
||||
.filter((t) => t.status === 'in-progress' && Array.isArray(t.subtasks))
|
||||
.forEach((parent) => {
|
||||
parent.subtasks.forEach((st) => {
|
||||
const stStatus = (st.status || 'pending').toLowerCase();
|
||||
if (stStatus !== 'pending' && stStatus !== 'in-progress') return;
|
||||
|
||||
const fullDeps =
|
||||
st.dependencies?.map((d) => toFullSubId(parent.id, d)) ?? [];
|
||||
|
||||
const depsSatisfied =
|
||||
fullDeps.length === 0 ||
|
||||
fullDeps.every((depId) => completedIds.has(String(depId)));
|
||||
|
||||
if (depsSatisfied) {
|
||||
candidateSubtasks.push({
|
||||
id: `${parent.id}.${st.id}`,
|
||||
title: st.title || `Subtask ${st.id}`,
|
||||
status: st.status || 'pending',
|
||||
priority: st.priority || parent.priority || 'medium',
|
||||
dependencies: fullDeps,
|
||||
parentId: parent.id
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if (candidateSubtasks.length > 0) {
|
||||
// sort by priority → dep-count → parent-id → sub-id
|
||||
candidateSubtasks.sort((a, b) => {
|
||||
const pa = priorityValues[a.priority] ?? 2;
|
||||
const pb = priorityValues[b.priority] ?? 2;
|
||||
if (pb !== pa) return pb - pa;
|
||||
|
||||
if (a.dependencies.length !== b.dependencies.length)
|
||||
return a.dependencies.length - b.dependencies.length;
|
||||
|
||||
// compare parent then sub-id numerically
|
||||
const [aPar, aSub] = a.id.split('.').map(Number);
|
||||
const [bPar, bSub] = b.id.split('.').map(Number);
|
||||
if (aPar !== bPar) return aPar - bPar;
|
||||
return aSub - bSub;
|
||||
});
|
||||
return candidateSubtasks[0];
|
||||
}
|
||||
|
||||
// ---------- 2) fall back to top-level tasks (original logic) ------------
|
||||
const eligibleTasks = tasks.filter((task) => {
|
||||
const status = (task.status || 'pending').toLowerCase();
|
||||
if (status !== 'pending' && status !== 'in-progress') return false;
|
||||
const deps = task.dependencies ?? [];
|
||||
return deps.every((depId) => completedIds.has(String(depId)));
|
||||
});
|
||||
|
||||
if (eligibleTasks.length === 0) return null;
|
||||
|
||||
const nextTask = eligibleTasks.sort((a, b) => {
|
||||
const pa = priorityValues[a.priority || 'medium'] ?? 2;
|
||||
const pb = priorityValues[b.priority || 'medium'] ?? 2;
|
||||
if (pb !== pa) return pb - pa;
|
||||
|
||||
const da = (a.dependencies ?? []).length;
|
||||
const db = (b.dependencies ?? []).length;
|
||||
if (da !== db) return da - db;
|
||||
|
||||
return a.id - b.id;
|
||||
})[0];
|
||||
|
||||
return nextTask;
|
||||
}
|
||||
|
||||
export default findNextTask;
|
||||
156
scripts/modules/task-manager/generate-task-files.js
Normal file
156
scripts/modules/task-manager/generate-task-files.js
Normal file
@@ -0,0 +1,156 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
|
||||
import { log, readJSON } from '../utils.js';
|
||||
import { formatDependenciesWithStatus } from '../ui.js';
|
||||
import { validateAndFixDependencies } from '../dependency-manager.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Generate individual task files from tasks.json
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} outputDir - Output directory for task files
|
||||
* @param {Object} options - Additional options (mcpLog for MCP mode)
|
||||
* @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode
|
||||
*/
|
||||
function generateTaskFiles(tasksPath, outputDir, options = {}) {
|
||||
try {
|
||||
// Determine if we're in MCP mode by checking for mcpLog
|
||||
const isMcpMode = !!options?.mcpLog;
|
||||
|
||||
log('info', `Preparing to regenerate task files in ${tasksPath}`);
|
||||
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Create the output directory if it doesn't exist
|
||||
if (!fs.existsSync(outputDir)) {
|
||||
fs.mkdirSync(outputDir, { recursive: true });
|
||||
}
|
||||
|
||||
log('info', `Found ${data.tasks.length} tasks to regenerate`);
|
||||
|
||||
// Validate and fix dependencies before generating files
|
||||
log('info', `Validating and fixing dependencies`);
|
||||
validateAndFixDependencies(data, tasksPath);
|
||||
|
||||
// Generate task files
|
||||
log('info', 'Generating individual task files...');
|
||||
data.tasks.forEach((task) => {
|
||||
const taskPath = path.join(
|
||||
outputDir,
|
||||
`task_${task.id.toString().padStart(3, '0')}.txt`
|
||||
);
|
||||
|
||||
// Format the content
|
||||
let content = `# Task ID: ${task.id}\n`;
|
||||
content += `# Title: ${task.title}\n`;
|
||||
content += `# Status: ${task.status || 'pending'}\n`;
|
||||
|
||||
// Format dependencies with their status
|
||||
if (task.dependencies && task.dependencies.length > 0) {
|
||||
content += `# Dependencies: ${formatDependenciesWithStatus(task.dependencies, data.tasks, false)}\n`;
|
||||
} else {
|
||||
content += '# Dependencies: None\n';
|
||||
}
|
||||
|
||||
content += `# Priority: ${task.priority || 'medium'}\n`;
|
||||
content += `# Description: ${task.description || ''}\n`;
|
||||
|
||||
// Add more detailed sections
|
||||
content += '# Details:\n';
|
||||
content += (task.details || '')
|
||||
.split('\n')
|
||||
.map((line) => line)
|
||||
.join('\n');
|
||||
content += '\n\n';
|
||||
|
||||
content += '# Test Strategy:\n';
|
||||
content += (task.testStrategy || '')
|
||||
.split('\n')
|
||||
.map((line) => line)
|
||||
.join('\n');
|
||||
content += '\n';
|
||||
|
||||
// Add subtasks if they exist
|
||||
if (task.subtasks && task.subtasks.length > 0) {
|
||||
content += '\n# Subtasks:\n';
|
||||
|
||||
task.subtasks.forEach((subtask) => {
|
||||
content += `## ${subtask.id}. ${subtask.title} [${subtask.status || 'pending'}]\n`;
|
||||
|
||||
if (subtask.dependencies && subtask.dependencies.length > 0) {
|
||||
// Format subtask dependencies
|
||||
let subtaskDeps = subtask.dependencies
|
||||
.map((depId) => {
|
||||
if (typeof depId === 'number') {
|
||||
// Handle numeric dependencies to other subtasks
|
||||
const foundSubtask = task.subtasks.find(
|
||||
(st) => st.id === depId
|
||||
);
|
||||
if (foundSubtask) {
|
||||
// Just return the plain ID format without any color formatting
|
||||
return `${task.id}.${depId}`;
|
||||
}
|
||||
}
|
||||
return depId.toString();
|
||||
})
|
||||
.join(', ');
|
||||
|
||||
content += `### Dependencies: ${subtaskDeps}\n`;
|
||||
} else {
|
||||
content += '### Dependencies: None\n';
|
||||
}
|
||||
|
||||
content += `### Description: ${subtask.description || ''}\n`;
|
||||
content += '### Details:\n';
|
||||
content += (subtask.details || '')
|
||||
.split('\n')
|
||||
.map((line) => line)
|
||||
.join('\n');
|
||||
content += '\n\n';
|
||||
});
|
||||
}
|
||||
|
||||
// Write the file
|
||||
fs.writeFileSync(taskPath, content);
|
||||
// log('info', `Generated: task_${task.id.toString().padStart(3, '0')}.txt`); // Pollutes the CLI output
|
||||
});
|
||||
|
||||
log(
|
||||
'success',
|
||||
`All ${data.tasks.length} tasks have been generated into '${outputDir}'.`
|
||||
);
|
||||
|
||||
// Return success data in MCP mode
|
||||
if (isMcpMode) {
|
||||
return {
|
||||
success: true,
|
||||
count: data.tasks.length,
|
||||
directory: outputDir
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
log('error', `Error generating task files: ${error.message}`);
|
||||
|
||||
// Only show error UI in CLI mode
|
||||
if (!options?.mcpLog) {
|
||||
console.error(chalk.red(`Error generating task files: ${error.message}`));
|
||||
|
||||
if (getDebugFlag()) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
} else {
|
||||
// In MCP mode, throw the error for the caller to handle
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default generateTaskFiles;
|
||||
42
scripts/modules/task-manager/is-task-dependent.js
Normal file
42
scripts/modules/task-manager/is-task-dependent.js
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Check if a task is dependent on another task (directly or indirectly)
|
||||
* Used to prevent circular dependencies
|
||||
* @param {Array} allTasks - Array of all tasks
|
||||
* @param {Object} task - The task to check
|
||||
* @param {number} targetTaskId - The task ID to check dependency against
|
||||
* @returns {boolean} Whether the task depends on the target task
|
||||
*/
|
||||
function isTaskDependentOn(allTasks, task, targetTaskId) {
|
||||
// If the task is a subtask, check if its parent is the target
|
||||
if (task.parentTaskId === targetTaskId) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check direct dependencies
|
||||
if (task.dependencies && task.dependencies.includes(targetTaskId)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check dependencies of dependencies (recursive)
|
||||
if (task.dependencies) {
|
||||
for (const depId of task.dependencies) {
|
||||
const depTask = allTasks.find((t) => t.id === depId);
|
||||
if (depTask && isTaskDependentOn(allTasks, depTask, targetTaskId)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check subtasks for dependencies
|
||||
if (task.subtasks) {
|
||||
for (const subtask of task.subtasks) {
|
||||
if (isTaskDependentOn(allTasks, subtask, targetTaskId)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
export default isTaskDependentOn;
|
||||
719
scripts/modules/task-manager/list-tasks.js
Normal file
719
scripts/modules/task-manager/list-tasks.js
Normal file
@@ -0,0 +1,719 @@
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import { log, readJSON, truncate } from '../utils.js';
|
||||
import findNextTask from './find-next-task.js';
|
||||
|
||||
import {
|
||||
displayBanner,
|
||||
getStatusWithColor,
|
||||
formatDependenciesWithStatus,
|
||||
createProgressBar
|
||||
} from '../ui.js';
|
||||
|
||||
/**
|
||||
* List all tasks
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} statusFilter - Filter by status
|
||||
* @param {boolean} withSubtasks - Whether to show subtasks
|
||||
* @param {string} outputFormat - Output format (text or json)
|
||||
* @returns {Object} - Task list result for json format
|
||||
*/
|
||||
function listTasks(
|
||||
tasksPath,
|
||||
statusFilter,
|
||||
withSubtasks = false,
|
||||
outputFormat = 'text'
|
||||
) {
|
||||
try {
|
||||
// Only display banner for text output
|
||||
if (outputFormat === 'text') {
|
||||
displayBanner();
|
||||
}
|
||||
|
||||
const data = readJSON(tasksPath); // Reads the whole tasks.json
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Filter tasks by status if specified
|
||||
const filteredTasks =
|
||||
statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all'
|
||||
? data.tasks.filter(
|
||||
(task) =>
|
||||
task.status &&
|
||||
task.status.toLowerCase() === statusFilter.toLowerCase()
|
||||
)
|
||||
: data.tasks; // Default to all tasks if no filter or filter is 'all'
|
||||
|
||||
// Calculate completion statistics
|
||||
const totalTasks = data.tasks.length;
|
||||
const completedTasks = data.tasks.filter(
|
||||
(task) => task.status === 'done' || task.status === 'completed'
|
||||
).length;
|
||||
const completionPercentage =
|
||||
totalTasks > 0 ? (completedTasks / totalTasks) * 100 : 0;
|
||||
|
||||
// Count statuses for tasks
|
||||
const doneCount = completedTasks;
|
||||
const inProgressCount = data.tasks.filter(
|
||||
(task) => task.status === 'in-progress'
|
||||
).length;
|
||||
const pendingCount = data.tasks.filter(
|
||||
(task) => task.status === 'pending'
|
||||
).length;
|
||||
const blockedCount = data.tasks.filter(
|
||||
(task) => task.status === 'blocked'
|
||||
).length;
|
||||
const deferredCount = data.tasks.filter(
|
||||
(task) => task.status === 'deferred'
|
||||
).length;
|
||||
const cancelledCount = data.tasks.filter(
|
||||
(task) => task.status === 'cancelled'
|
||||
).length;
|
||||
|
||||
// Count subtasks and their statuses
|
||||
let totalSubtasks = 0;
|
||||
let completedSubtasks = 0;
|
||||
let inProgressSubtasks = 0;
|
||||
let pendingSubtasks = 0;
|
||||
let blockedSubtasks = 0;
|
||||
let deferredSubtasks = 0;
|
||||
let cancelledSubtasks = 0;
|
||||
|
||||
data.tasks.forEach((task) => {
|
||||
if (task.subtasks && task.subtasks.length > 0) {
|
||||
totalSubtasks += task.subtasks.length;
|
||||
completedSubtasks += task.subtasks.filter(
|
||||
(st) => st.status === 'done' || st.status === 'completed'
|
||||
).length;
|
||||
inProgressSubtasks += task.subtasks.filter(
|
||||
(st) => st.status === 'in-progress'
|
||||
).length;
|
||||
pendingSubtasks += task.subtasks.filter(
|
||||
(st) => st.status === 'pending'
|
||||
).length;
|
||||
blockedSubtasks += task.subtasks.filter(
|
||||
(st) => st.status === 'blocked'
|
||||
).length;
|
||||
deferredSubtasks += task.subtasks.filter(
|
||||
(st) => st.status === 'deferred'
|
||||
).length;
|
||||
cancelledSubtasks += task.subtasks.filter(
|
||||
(st) => st.status === 'cancelled'
|
||||
).length;
|
||||
}
|
||||
});
|
||||
|
||||
const subtaskCompletionPercentage =
|
||||
totalSubtasks > 0 ? (completedSubtasks / totalSubtasks) * 100 : 0;
|
||||
|
||||
// For JSON output, return structured data
|
||||
if (outputFormat === 'json') {
|
||||
// *** Modification: Remove 'details' field for JSON output ***
|
||||
const tasksWithoutDetails = filteredTasks.map((task) => {
|
||||
// <-- USES filteredTasks!
|
||||
// Omit 'details' from the parent task
|
||||
const { details, ...taskRest } = task;
|
||||
|
||||
// If subtasks exist, omit 'details' from them too
|
||||
if (taskRest.subtasks && Array.isArray(taskRest.subtasks)) {
|
||||
taskRest.subtasks = taskRest.subtasks.map((subtask) => {
|
||||
const { details: subtaskDetails, ...subtaskRest } = subtask;
|
||||
return subtaskRest;
|
||||
});
|
||||
}
|
||||
return taskRest;
|
||||
});
|
||||
// *** End of Modification ***
|
||||
|
||||
return {
|
||||
tasks: tasksWithoutDetails, // <--- THIS IS THE ARRAY BEING RETURNED
|
||||
filter: statusFilter || 'all', // Return the actual filter used
|
||||
stats: {
|
||||
total: totalTasks,
|
||||
completed: doneCount,
|
||||
inProgress: inProgressCount,
|
||||
pending: pendingCount,
|
||||
blocked: blockedCount,
|
||||
deferred: deferredCount,
|
||||
cancelled: cancelledCount,
|
||||
completionPercentage,
|
||||
subtasks: {
|
||||
total: totalSubtasks,
|
||||
completed: completedSubtasks,
|
||||
inProgress: inProgressSubtasks,
|
||||
pending: pendingSubtasks,
|
||||
blocked: blockedSubtasks,
|
||||
deferred: deferredSubtasks,
|
||||
cancelled: cancelledSubtasks,
|
||||
completionPercentage: subtaskCompletionPercentage
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// ... existing code for text output ...
|
||||
|
||||
// Calculate status breakdowns as percentages of total
|
||||
const taskStatusBreakdown = {
|
||||
'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,
|
||||
pending: totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,
|
||||
blocked: totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,
|
||||
deferred: totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,
|
||||
cancelled: totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0
|
||||
};
|
||||
|
||||
const subtaskStatusBreakdown = {
|
||||
'in-progress':
|
||||
totalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,
|
||||
pending: totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,
|
||||
blocked: totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,
|
||||
deferred:
|
||||
totalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,
|
||||
cancelled:
|
||||
totalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0
|
||||
};
|
||||
|
||||
// Create progress bars with status breakdowns
|
||||
const taskProgressBar = createProgressBar(
|
||||
completionPercentage,
|
||||
30,
|
||||
taskStatusBreakdown
|
||||
);
|
||||
const subtaskProgressBar = createProgressBar(
|
||||
subtaskCompletionPercentage,
|
||||
30,
|
||||
subtaskStatusBreakdown
|
||||
);
|
||||
|
||||
// Calculate dependency statistics
|
||||
const completedTaskIds = new Set(
|
||||
data.tasks
|
||||
.filter((t) => t.status === 'done' || t.status === 'completed')
|
||||
.map((t) => t.id)
|
||||
);
|
||||
|
||||
const tasksWithNoDeps = data.tasks.filter(
|
||||
(t) =>
|
||||
t.status !== 'done' &&
|
||||
t.status !== 'completed' &&
|
||||
(!t.dependencies || t.dependencies.length === 0)
|
||||
).length;
|
||||
|
||||
const tasksWithAllDepsSatisfied = data.tasks.filter(
|
||||
(t) =>
|
||||
t.status !== 'done' &&
|
||||
t.status !== 'completed' &&
|
||||
t.dependencies &&
|
||||
t.dependencies.length > 0 &&
|
||||
t.dependencies.every((depId) => completedTaskIds.has(depId))
|
||||
).length;
|
||||
|
||||
const tasksWithUnsatisfiedDeps = data.tasks.filter(
|
||||
(t) =>
|
||||
t.status !== 'done' &&
|
||||
t.status !== 'completed' &&
|
||||
t.dependencies &&
|
||||
t.dependencies.length > 0 &&
|
||||
!t.dependencies.every((depId) => completedTaskIds.has(depId))
|
||||
).length;
|
||||
|
||||
// Calculate total tasks ready to work on (no deps + satisfied deps)
|
||||
const tasksReadyToWork = tasksWithNoDeps + tasksWithAllDepsSatisfied;
|
||||
|
||||
// Calculate most depended-on tasks
|
||||
const dependencyCount = {};
|
||||
data.tasks.forEach((task) => {
|
||||
if (task.dependencies && task.dependencies.length > 0) {
|
||||
task.dependencies.forEach((depId) => {
|
||||
dependencyCount[depId] = (dependencyCount[depId] || 0) + 1;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Find the most depended-on task
|
||||
let mostDependedOnTaskId = null;
|
||||
let maxDependents = 0;
|
||||
|
||||
for (const [taskId, count] of Object.entries(dependencyCount)) {
|
||||
if (count > maxDependents) {
|
||||
maxDependents = count;
|
||||
mostDependedOnTaskId = parseInt(taskId);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the most depended-on task
|
||||
const mostDependedOnTask =
|
||||
mostDependedOnTaskId !== null
|
||||
? data.tasks.find((t) => t.id === mostDependedOnTaskId)
|
||||
: null;
|
||||
|
||||
// Calculate average dependencies per task
|
||||
const totalDependencies = data.tasks.reduce(
|
||||
(sum, task) => sum + (task.dependencies ? task.dependencies.length : 0),
|
||||
0
|
||||
);
|
||||
const avgDependenciesPerTask = totalDependencies / data.tasks.length;
|
||||
|
||||
// Find next task to work on
|
||||
const nextItem = findNextTask(data.tasks);
|
||||
|
||||
// Get terminal width - more reliable method
|
||||
let terminalWidth;
|
||||
try {
|
||||
// Try to get the actual terminal columns
|
||||
terminalWidth = process.stdout.columns;
|
||||
} catch (e) {
|
||||
// Fallback if columns cannot be determined
|
||||
log('debug', 'Could not determine terminal width, using default');
|
||||
}
|
||||
// Ensure we have a reasonable default if detection fails
|
||||
terminalWidth = terminalWidth || 80;
|
||||
|
||||
// Ensure terminal width is at least a minimum value to prevent layout issues
|
||||
terminalWidth = Math.max(terminalWidth, 80);
|
||||
|
||||
// Create dashboard content
|
||||
const projectDashboardContent =
|
||||
chalk.white.bold('Project Dashboard') +
|
||||
'\n' +
|
||||
`Tasks Progress: ${chalk.greenBright(taskProgressBar)} ${completionPercentage.toFixed(0)}%\n` +
|
||||
`Done: ${chalk.green(doneCount)} In Progress: ${chalk.blue(inProgressCount)} Pending: ${chalk.yellow(pendingCount)} Blocked: ${chalk.red(blockedCount)} Deferred: ${chalk.gray(deferredCount)} Cancelled: ${chalk.gray(cancelledCount)}\n\n` +
|
||||
`Subtasks Progress: ${chalk.cyan(subtaskProgressBar)} ${subtaskCompletionPercentage.toFixed(0)}%\n` +
|
||||
`Completed: ${chalk.green(completedSubtasks)}/${totalSubtasks} In Progress: ${chalk.blue(inProgressSubtasks)} Pending: ${chalk.yellow(pendingSubtasks)} Blocked: ${chalk.red(blockedSubtasks)} Deferred: ${chalk.gray(deferredSubtasks)} Cancelled: ${chalk.gray(cancelledSubtasks)}\n\n` +
|
||||
chalk.cyan.bold('Priority Breakdown:') +
|
||||
'\n' +
|
||||
`${chalk.red('•')} ${chalk.white('High priority:')} ${data.tasks.filter((t) => t.priority === 'high').length}\n` +
|
||||
`${chalk.yellow('•')} ${chalk.white('Medium priority:')} ${data.tasks.filter((t) => t.priority === 'medium').length}\n` +
|
||||
`${chalk.green('•')} ${chalk.white('Low priority:')} ${data.tasks.filter((t) => t.priority === 'low').length}`;
|
||||
|
||||
const dependencyDashboardContent =
|
||||
chalk.white.bold('Dependency Status & Next Task') +
|
||||
'\n' +
|
||||
chalk.cyan.bold('Dependency Metrics:') +
|
||||
'\n' +
|
||||
`${chalk.green('•')} ${chalk.white('Tasks with no dependencies:')} ${tasksWithNoDeps}\n` +
|
||||
`${chalk.green('•')} ${chalk.white('Tasks ready to work on:')} ${tasksReadyToWork}\n` +
|
||||
`${chalk.yellow('•')} ${chalk.white('Tasks blocked by dependencies:')} ${tasksWithUnsatisfiedDeps}\n` +
|
||||
`${chalk.magenta('•')} ${chalk.white('Most depended-on task:')} ${mostDependedOnTask ? chalk.cyan(`#${mostDependedOnTaskId} (${maxDependents} dependents)`) : chalk.gray('None')}\n` +
|
||||
`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
|
||||
chalk.cyan.bold('Next Task to Work On:') +
|
||||
'\n' +
|
||||
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +
|
||||
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : ''}`;
|
||||
|
||||
// Calculate width for side-by-side display
|
||||
// Box borders, padding take approximately 4 chars on each side
|
||||
const minDashboardWidth = 50; // Minimum width for dashboard
|
||||
const minDependencyWidth = 50; // Minimum width for dependency dashboard
|
||||
const totalMinWidth = minDashboardWidth + minDependencyWidth + 4; // Extra 4 chars for spacing
|
||||
|
||||
// If terminal is wide enough, show boxes side by side with responsive widths
|
||||
if (terminalWidth >= totalMinWidth) {
|
||||
// Calculate widths proportionally for each box - use exact 50% width each
|
||||
const availableWidth = terminalWidth;
|
||||
const halfWidth = Math.floor(availableWidth / 2);
|
||||
|
||||
// Account for border characters (2 chars on each side)
|
||||
const boxContentWidth = halfWidth - 4;
|
||||
|
||||
// Create boxen options with precise widths
|
||||
const dashboardBox = boxen(projectDashboardContent, {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
width: boxContentWidth,
|
||||
dimBorder: false
|
||||
});
|
||||
|
||||
const dependencyBox = boxen(dependencyDashboardContent, {
|
||||
padding: 1,
|
||||
borderColor: 'magenta',
|
||||
borderStyle: 'round',
|
||||
width: boxContentWidth,
|
||||
dimBorder: false
|
||||
});
|
||||
|
||||
// Create a better side-by-side layout with exact spacing
|
||||
const dashboardLines = dashboardBox.split('\n');
|
||||
const dependencyLines = dependencyBox.split('\n');
|
||||
|
||||
// Make sure both boxes have the same height
|
||||
const maxHeight = Math.max(dashboardLines.length, dependencyLines.length);
|
||||
|
||||
// For each line of output, pad the dashboard line to exactly halfWidth chars
|
||||
// This ensures the dependency box starts at exactly the right position
|
||||
const combinedLines = [];
|
||||
for (let i = 0; i < maxHeight; i++) {
|
||||
// Get the dashboard line (or empty string if we've run out of lines)
|
||||
const dashLine = i < dashboardLines.length ? dashboardLines[i] : '';
|
||||
// Get the dependency line (or empty string if we've run out of lines)
|
||||
const depLine = i < dependencyLines.length ? dependencyLines[i] : '';
|
||||
|
||||
// Remove any trailing spaces from dashLine before padding to exact width
|
||||
const trimmedDashLine = dashLine.trimEnd();
|
||||
// Pad the dashboard line to exactly halfWidth chars with no extra spaces
|
||||
const paddedDashLine = trimmedDashLine.padEnd(halfWidth, ' ');
|
||||
|
||||
// Join the lines with no space in between
|
||||
combinedLines.push(paddedDashLine + depLine);
|
||||
}
|
||||
|
||||
// Join all lines and output
|
||||
console.log(combinedLines.join('\n'));
|
||||
} else {
|
||||
// Terminal too narrow, show boxes stacked vertically
|
||||
const dashboardBox = boxen(projectDashboardContent, {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 0, bottom: 1 }
|
||||
});
|
||||
|
||||
const dependencyBox = boxen(dependencyDashboardContent, {
|
||||
padding: 1,
|
||||
borderColor: 'magenta',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 0, bottom: 1 }
|
||||
});
|
||||
|
||||
// Display stacked vertically
|
||||
console.log(dashboardBox);
|
||||
console.log(dependencyBox);
|
||||
}
|
||||
|
||||
if (filteredTasks.length === 0) {
|
||||
console.log(
|
||||
boxen(
|
||||
statusFilter
|
||||
? chalk.yellow(`No tasks with status '${statusFilter}' found`)
|
||||
: chalk.yellow('No tasks found'),
|
||||
{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// COMPLETELY REVISED TABLE APPROACH
|
||||
// Define percentage-based column widths and calculate actual widths
|
||||
// Adjust percentages based on content type and user requirements
|
||||
|
||||
// Adjust ID width if showing subtasks (subtask IDs are longer: e.g., "1.2")
|
||||
const idWidthPct = withSubtasks ? 10 : 7;
|
||||
|
||||
// Calculate max status length to accommodate "in-progress"
|
||||
const statusWidthPct = 15;
|
||||
|
||||
// Increase priority column width as requested
|
||||
const priorityWidthPct = 12;
|
||||
|
||||
// Make dependencies column smaller as requested (-20%)
|
||||
const depsWidthPct = 20;
|
||||
|
||||
// Calculate title/description width as remaining space (+20% from dependencies reduction)
|
||||
const titleWidthPct =
|
||||
100 - idWidthPct - statusWidthPct - priorityWidthPct - depsWidthPct;
|
||||
|
||||
// Allow 10 characters for borders and padding
|
||||
const availableWidth = terminalWidth - 10;
|
||||
|
||||
// Calculate actual column widths based on percentages
|
||||
const idWidth = Math.floor(availableWidth * (idWidthPct / 100));
|
||||
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
|
||||
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
|
||||
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
|
||||
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
|
||||
|
||||
// Create a table with correct borders and spacing
|
||||
const table = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('ID'),
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status'),
|
||||
chalk.cyan.bold('Priority'),
|
||||
chalk.cyan.bold('Dependencies')
|
||||
],
|
||||
colWidths: [idWidth, titleWidth, statusWidth, priorityWidth, depsWidth],
|
||||
style: {
|
||||
head: [], // No special styling for header
|
||||
border: [], // No special styling for border
|
||||
compact: false // Use default spacing
|
||||
},
|
||||
wordWrap: true,
|
||||
wrapOnWordBoundary: true
|
||||
});
|
||||
|
||||
// Process tasks for the table
|
||||
filteredTasks.forEach((task) => {
|
||||
// Format dependencies with status indicators (colored)
|
||||
let depText = 'None';
|
||||
if (task.dependencies && task.dependencies.length > 0) {
|
||||
// Use the proper formatDependenciesWithStatus function for colored status
|
||||
depText = formatDependenciesWithStatus(
|
||||
task.dependencies,
|
||||
data.tasks,
|
||||
true
|
||||
);
|
||||
} else {
|
||||
depText = chalk.gray('None');
|
||||
}
|
||||
|
||||
// Clean up any ANSI codes or confusing characters
|
||||
const cleanTitle = task.title.replace(/\n/g, ' ');
|
||||
|
||||
// Get priority color
|
||||
const priorityColor =
|
||||
{
|
||||
high: chalk.red,
|
||||
medium: chalk.yellow,
|
||||
low: chalk.gray
|
||||
}[task.priority || 'medium'] || chalk.white;
|
||||
|
||||
// Format status
|
||||
const status = getStatusWithColor(task.status, true);
|
||||
|
||||
// Add the row without truncating dependencies
|
||||
table.push([
|
||||
task.id.toString(),
|
||||
truncate(cleanTitle, titleWidth - 3),
|
||||
status,
|
||||
priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
|
||||
depText // No truncation for dependencies
|
||||
]);
|
||||
|
||||
// Add subtasks if requested
|
||||
if (withSubtasks && task.subtasks && task.subtasks.length > 0) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
// Format subtask dependencies with status indicators
|
||||
let subtaskDepText = 'None';
|
||||
if (subtask.dependencies && subtask.dependencies.length > 0) {
|
||||
// Handle both subtask-to-subtask and subtask-to-task dependencies
|
||||
const formattedDeps = subtask.dependencies
|
||||
.map((depId) => {
|
||||
// Check if it's a dependency on another subtask
|
||||
if (typeof depId === 'number' && depId < 100) {
|
||||
const foundSubtask = task.subtasks.find(
|
||||
(st) => st.id === depId
|
||||
);
|
||||
if (foundSubtask) {
|
||||
const isDone =
|
||||
foundSubtask.status === 'done' ||
|
||||
foundSubtask.status === 'completed';
|
||||
const isInProgress = foundSubtask.status === 'in-progress';
|
||||
|
||||
// Use consistent color formatting instead of emojis
|
||||
if (isDone) {
|
||||
return chalk.green.bold(`${task.id}.${depId}`);
|
||||
} else if (isInProgress) {
|
||||
return chalk.hex('#FFA500').bold(`${task.id}.${depId}`);
|
||||
} else {
|
||||
return chalk.red.bold(`${task.id}.${depId}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Default to regular task dependency
|
||||
const depTask = data.tasks.find((t) => t.id === depId);
|
||||
if (depTask) {
|
||||
const isDone =
|
||||
depTask.status === 'done' || depTask.status === 'completed';
|
||||
const isInProgress = depTask.status === 'in-progress';
|
||||
// Use the same color scheme as in formatDependenciesWithStatus
|
||||
if (isDone) {
|
||||
return chalk.green.bold(`${depId}`);
|
||||
} else if (isInProgress) {
|
||||
return chalk.hex('#FFA500').bold(`${depId}`);
|
||||
} else {
|
||||
return chalk.red.bold(`${depId}`);
|
||||
}
|
||||
}
|
||||
return chalk.cyan(depId.toString());
|
||||
})
|
||||
.join(', ');
|
||||
|
||||
subtaskDepText = formattedDeps || chalk.gray('None');
|
||||
}
|
||||
|
||||
// Add the subtask row without truncating dependencies
|
||||
table.push([
|
||||
`${task.id}.${subtask.id}`,
|
||||
chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
|
||||
getStatusWithColor(subtask.status, true),
|
||||
chalk.dim('-'),
|
||||
subtaskDepText // No truncation for dependencies
|
||||
]);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Ensure we output the table even if it had to wrap
|
||||
try {
|
||||
console.log(table.toString());
|
||||
} catch (err) {
|
||||
log('error', `Error rendering table: ${err.message}`);
|
||||
|
||||
// Fall back to simpler output
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nFalling back to simple task list due to terminal width constraints:'
|
||||
)
|
||||
);
|
||||
filteredTasks.forEach((task) => {
|
||||
console.log(
|
||||
`${chalk.cyan(task.id)}: ${chalk.white(task.title)} - ${getStatusWithColor(task.status)}`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// Show filter info if applied
|
||||
if (statusFilter) {
|
||||
console.log(chalk.yellow(`\nFiltered by status: ${statusFilter}`));
|
||||
console.log(
|
||||
chalk.yellow(`Showing ${filteredTasks.length} of ${totalTasks} tasks`)
|
||||
);
|
||||
}
|
||||
|
||||
// Define priority colors
|
||||
const priorityColors = {
|
||||
high: chalk.red.bold,
|
||||
medium: chalk.yellow,
|
||||
low: chalk.gray
|
||||
};
|
||||
|
||||
// Show next task box in a prominent color
|
||||
if (nextItem) {
|
||||
// Prepare subtasks section if they exist (Only tasks have .subtasks property)
|
||||
let subtasksSection = '';
|
||||
// Check if the nextItem is a top-level task before looking for subtasks
|
||||
const parentTaskForSubtasks = data.tasks.find(
|
||||
(t) => String(t.id) === String(nextItem.id)
|
||||
); // Find the original task object
|
||||
if (
|
||||
parentTaskForSubtasks &&
|
||||
parentTaskForSubtasks.subtasks &&
|
||||
parentTaskForSubtasks.subtasks.length > 0
|
||||
) {
|
||||
subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
|
||||
subtasksSection += parentTaskForSubtasks.subtasks
|
||||
.map((subtask) => {
|
||||
// Using a more simplified format for subtask status display
|
||||
const status = subtask.status || 'pending';
|
||||
const statusColors = {
|
||||
done: chalk.green,
|
||||
completed: chalk.green,
|
||||
pending: chalk.yellow,
|
||||
'in-progress': chalk.blue,
|
||||
deferred: chalk.gray,
|
||||
blocked: chalk.red,
|
||||
cancelled: chalk.gray
|
||||
};
|
||||
const statusColor =
|
||||
statusColors[status.toLowerCase()] || chalk.white;
|
||||
// Ensure subtask ID is displayed correctly using parent ID from the original task object
|
||||
return `${chalk.cyan(`${parentTaskForSubtasks.id}.${subtask.id}`)} [${statusColor(status)}] ${subtask.title}`;
|
||||
})
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.hex('#FF8800').bold(
|
||||
// Use nextItem.id and nextItem.title
|
||||
`🔥 Next Task to Work On: #${nextItem.id} - ${nextItem.title}`
|
||||
) +
|
||||
'\n\n' +
|
||||
// Use nextItem.priority, nextItem.status, nextItem.dependencies
|
||||
`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
|
||||
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : chalk.gray('None')}\n\n` +
|
||||
// Use nextItem.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
||||
// *** Fetching original item for description and details ***
|
||||
`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
|
||||
subtasksSection + // <-- Subtasks are handled above now
|
||||
'\n\n' +
|
||||
// Use nextItem.id
|
||||
`${chalk.cyan('Start working:')} ${chalk.yellow(`task-master set-status --id=${nextItem.id} --status=in-progress`)}\n` +
|
||||
// Use nextItem.id
|
||||
`${chalk.cyan('View details:')} ${chalk.yellow(`task-master show ${nextItem.id}`)}`,
|
||||
{
|
||||
padding: { left: 2, right: 2, top: 1, bottom: 1 },
|
||||
borderColor: '#FF8800',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 },
|
||||
title: '⚡ RECOMMENDED NEXT TASK ⚡',
|
||||
titleAlignment: 'center',
|
||||
width: terminalWidth - 4,
|
||||
fullscreen: false
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.hex('#FF8800').bold('No eligible next task found') +
|
||||
'\n\n' +
|
||||
'All pending tasks have dependencies that are not yet completed, or all tasks are done.',
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: '#FF8800',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 },
|
||||
title: '⚡ NEXT TASK ⚡',
|
||||
titleAlignment: 'center',
|
||||
width: terminalWidth - 4 // Use full terminal width minus a small margin
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Show next steps
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Suggested Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master next')} to see what to work on next\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks\n` +
|
||||
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master set-status --id=<id> --status=done')} to mark a task as complete`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'gray',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
} catch (error) {
|
||||
log('error', `Error listing tasks: ${error.message}`);
|
||||
|
||||
if (outputFormat === 'json') {
|
||||
// Return structured error for JSON output
|
||||
throw {
|
||||
code: 'TASK_LIST_ERROR',
|
||||
message: error.message,
|
||||
details: error.stack
|
||||
};
|
||||
}
|
||||
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// *** Helper function to get description for task or subtask ***
|
||||
function getWorkItemDescription(item, allTasks) {
|
||||
if (!item) return 'N/A';
|
||||
if (item.parentId) {
|
||||
// It's a subtask
|
||||
const parent = allTasks.find((t) => t.id === item.parentId);
|
||||
const subtask = parent?.subtasks?.find(
|
||||
(st) => `${parent.id}.${st.id}` === item.id
|
||||
);
|
||||
return subtask?.description || 'No description available.';
|
||||
} else {
|
||||
// It's a top-level task
|
||||
const task = allTasks.find((t) => String(t.id) === String(item.id));
|
||||
return task?.description || 'No description available.';
|
||||
}
|
||||
}
|
||||
|
||||
export default listTasks;
|
||||
560
scripts/modules/task-manager/models.js
Normal file
560
scripts/modules/task-manager/models.js
Normal file
@@ -0,0 +1,560 @@
|
||||
/**
|
||||
* models.js
|
||||
* Core functionality for managing AI model configurations
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import https from 'https';
|
||||
import {
|
||||
getMainModelId,
|
||||
getResearchModelId,
|
||||
getFallbackModelId,
|
||||
getAvailableModels,
|
||||
getMainProvider,
|
||||
getResearchProvider,
|
||||
getFallbackProvider,
|
||||
isApiKeySet,
|
||||
getMcpApiKeyStatus,
|
||||
getConfig,
|
||||
writeConfig,
|
||||
isConfigFilePresent,
|
||||
getAllProviders
|
||||
} from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Fetches the list of models from OpenRouter API.
|
||||
* @returns {Promise<Array|null>} A promise that resolves with the list of model IDs or null if fetch fails.
|
||||
*/
|
||||
function fetchOpenRouterModels() {
|
||||
return new Promise((resolve) => {
|
||||
const options = {
|
||||
hostname: 'openrouter.ai',
|
||||
path: '/api/v1/models',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: 'application/json'
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
if (res.statusCode === 200) {
|
||||
try {
|
||||
const parsedData = JSON.parse(data);
|
||||
resolve(parsedData.data || []); // Return the array of models
|
||||
} catch (e) {
|
||||
console.error('Error parsing OpenRouter response:', e);
|
||||
resolve(null); // Indicate failure
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`OpenRouter API request failed with status code: ${res.statusCode}`
|
||||
);
|
||||
resolve(null); // Indicate failure
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (e) => {
|
||||
console.error('Error fetching OpenRouter models:', e);
|
||||
resolve(null); // Indicate failure
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current model configuration
|
||||
* @param {Object} [options] - Options for the operation
|
||||
* @param {Object} [options.session] - Session object containing environment variables (for MCP)
|
||||
* @param {Function} [options.mcpLog] - MCP logger object (for MCP)
|
||||
* @param {string} [options.projectRoot] - Project root directory
|
||||
* @returns {Object} RESTful response with current model configuration
|
||||
*/
|
||||
async function getModelConfiguration(options = {}) {
|
||||
const { mcpLog, projectRoot } = options;
|
||||
|
||||
const report = (level, ...args) => {
|
||||
if (mcpLog && typeof mcpLog[level] === 'function') {
|
||||
mcpLog[level](...args);
|
||||
}
|
||||
};
|
||||
|
||||
// Check if configuration file exists using provided project root
|
||||
let configPath;
|
||||
let configExists = false;
|
||||
|
||||
if (projectRoot) {
|
||||
configPath = path.join(projectRoot, '.taskmasterconfig');
|
||||
configExists = fs.existsSync(configPath);
|
||||
report(
|
||||
'info',
|
||||
`Checking for .taskmasterconfig at: ${configPath}, exists: ${configExists}`
|
||||
);
|
||||
} else {
|
||||
configExists = isConfigFilePresent();
|
||||
report(
|
||||
'info',
|
||||
`Checking for .taskmasterconfig using isConfigFilePresent(), exists: ${configExists}`
|
||||
);
|
||||
}
|
||||
|
||||
if (!configExists) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CONFIG_MISSING',
|
||||
message:
|
||||
'The .taskmasterconfig file is missing. Run "task-master models --setup" to create it.'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Get current settings - these should use the config from the found path automatically
|
||||
const mainProvider = getMainProvider(projectRoot);
|
||||
const mainModelId = getMainModelId(projectRoot);
|
||||
const researchProvider = getResearchProvider(projectRoot);
|
||||
const researchModelId = getResearchModelId(projectRoot);
|
||||
const fallbackProvider = getFallbackProvider(projectRoot);
|
||||
const fallbackModelId = getFallbackModelId(projectRoot);
|
||||
|
||||
// Check API keys
|
||||
const mainCliKeyOk = isApiKeySet(mainProvider);
|
||||
const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot);
|
||||
const researchCliKeyOk = isApiKeySet(researchProvider);
|
||||
const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot);
|
||||
const fallbackCliKeyOk = fallbackProvider
|
||||
? isApiKeySet(fallbackProvider)
|
||||
: true;
|
||||
const fallbackMcpKeyOk = fallbackProvider
|
||||
? getMcpApiKeyStatus(fallbackProvider, projectRoot)
|
||||
: true;
|
||||
|
||||
// Get available models to find detailed info
|
||||
const availableModels = getAvailableModels(projectRoot);
|
||||
|
||||
// Find model details
|
||||
const mainModelData = availableModels.find((m) => m.id === mainModelId);
|
||||
const researchModelData = availableModels.find(
|
||||
(m) => m.id === researchModelId
|
||||
);
|
||||
const fallbackModelData = fallbackModelId
|
||||
? availableModels.find((m) => m.id === fallbackModelId)
|
||||
: null;
|
||||
|
||||
// Return structured configuration data
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
activeModels: {
|
||||
main: {
|
||||
provider: mainProvider,
|
||||
modelId: mainModelId,
|
||||
sweScore: mainModelData?.swe_score || null,
|
||||
cost: mainModelData?.cost_per_1m_tokens || null,
|
||||
keyStatus: {
|
||||
cli: mainCliKeyOk,
|
||||
mcp: mainMcpKeyOk
|
||||
}
|
||||
},
|
||||
research: {
|
||||
provider: researchProvider,
|
||||
modelId: researchModelId,
|
||||
sweScore: researchModelData?.swe_score || null,
|
||||
cost: researchModelData?.cost_per_1m_tokens || null,
|
||||
keyStatus: {
|
||||
cli: researchCliKeyOk,
|
||||
mcp: researchMcpKeyOk
|
||||
}
|
||||
},
|
||||
fallback: fallbackProvider
|
||||
? {
|
||||
provider: fallbackProvider,
|
||||
modelId: fallbackModelId,
|
||||
sweScore: fallbackModelData?.swe_score || null,
|
||||
cost: fallbackModelData?.cost_per_1m_tokens || null,
|
||||
keyStatus: {
|
||||
cli: fallbackCliKeyOk,
|
||||
mcp: fallbackMcpKeyOk
|
||||
}
|
||||
}
|
||||
: null
|
||||
},
|
||||
message: 'Successfully retrieved current model configuration'
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
report('error', `Error getting model configuration: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CONFIG_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available models not currently in use
|
||||
* @param {Object} [options] - Options for the operation
|
||||
* @param {Object} [options.session] - Session object containing environment variables (for MCP)
|
||||
* @param {Function} [options.mcpLog] - MCP logger object (for MCP)
|
||||
* @param {string} [options.projectRoot] - Project root directory
|
||||
* @returns {Object} RESTful response with available models
|
||||
*/
|
||||
async function getAvailableModelsList(options = {}) {
|
||||
const { mcpLog, projectRoot } = options;
|
||||
|
||||
const report = (level, ...args) => {
|
||||
if (mcpLog && typeof mcpLog[level] === 'function') {
|
||||
mcpLog[level](...args);
|
||||
}
|
||||
};
|
||||
|
||||
// Check if configuration file exists using provided project root
|
||||
let configPath;
|
||||
let configExists = false;
|
||||
|
||||
if (projectRoot) {
|
||||
configPath = path.join(projectRoot, '.taskmasterconfig');
|
||||
configExists = fs.existsSync(configPath);
|
||||
report(
|
||||
'info',
|
||||
`Checking for .taskmasterconfig at: ${configPath}, exists: ${configExists}`
|
||||
);
|
||||
} else {
|
||||
configExists = isConfigFilePresent();
|
||||
report(
|
||||
'info',
|
||||
`Checking for .taskmasterconfig using isConfigFilePresent(), exists: ${configExists}`
|
||||
);
|
||||
}
|
||||
|
||||
if (!configExists) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CONFIG_MISSING',
|
||||
message:
|
||||
'The .taskmasterconfig file is missing. Run "task-master models --setup" to create it.'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Get all available models
|
||||
const allAvailableModels = getAvailableModels(projectRoot);
|
||||
|
||||
if (!allAvailableModels || allAvailableModels.length === 0) {
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
models: [],
|
||||
message: 'No available models found'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Get currently used model IDs
|
||||
const mainModelId = getMainModelId(projectRoot);
|
||||
const researchModelId = getResearchModelId(projectRoot);
|
||||
const fallbackModelId = getFallbackModelId(projectRoot);
|
||||
|
||||
// Filter out placeholder models and active models
|
||||
const activeIds = [mainModelId, researchModelId, fallbackModelId].filter(
|
||||
Boolean
|
||||
);
|
||||
const otherAvailableModels = allAvailableModels.map((model) => ({
|
||||
provider: model.provider || 'N/A',
|
||||
modelId: model.id,
|
||||
sweScore: model.swe_score || null,
|
||||
cost: model.cost_per_1m_tokens || null,
|
||||
allowedRoles: model.allowed_roles || []
|
||||
}));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
models: otherAvailableModels,
|
||||
message: `Successfully retrieved ${otherAvailableModels.length} available models`
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
report('error', `Error getting available models: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MODELS_LIST_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a specific model in the configuration
|
||||
* @param {string} role - The model role to update ('main', 'research', 'fallback')
|
||||
* @param {string} modelId - The model ID to set for the role
|
||||
* @param {Object} [options] - Options for the operation
|
||||
* @param {string} [options.providerHint] - Provider hint if already determined ('openrouter' or 'ollama')
|
||||
* @param {Object} [options.session] - Session object containing environment variables (for MCP)
|
||||
* @param {Function} [options.mcpLog] - MCP logger object (for MCP)
|
||||
* @param {string} [options.projectRoot] - Project root directory
|
||||
* @returns {Object} RESTful response with result of update operation
|
||||
*/
|
||||
async function setModel(role, modelId, options = {}) {
|
||||
const { mcpLog, projectRoot, providerHint } = options;
|
||||
|
||||
const report = (level, ...args) => {
|
||||
if (mcpLog && typeof mcpLog[level] === 'function') {
|
||||
mcpLog[level](...args);
|
||||
}
|
||||
};
|
||||
|
||||
// Check if configuration file exists using provided project root
|
||||
let configPath;
|
||||
let configExists = false;
|
||||
|
||||
if (projectRoot) {
|
||||
configPath = path.join(projectRoot, '.taskmasterconfig');
|
||||
configExists = fs.existsSync(configPath);
|
||||
report(
|
||||
'info',
|
||||
`Checking for .taskmasterconfig at: ${configPath}, exists: ${configExists}`
|
||||
);
|
||||
} else {
|
||||
configExists = isConfigFilePresent();
|
||||
report(
|
||||
'info',
|
||||
`Checking for .taskmasterconfig using isConfigFilePresent(), exists: ${configExists}`
|
||||
);
|
||||
}
|
||||
|
||||
if (!configExists) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CONFIG_MISSING',
|
||||
message:
|
||||
'The .taskmasterconfig file is missing. Run "task-master models --setup" to create it.'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Validate role
|
||||
if (!['main', 'research', 'fallback'].includes(role)) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_ROLE',
|
||||
message: `Invalid role: ${role}. Must be one of: main, research, fallback.`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Validate model ID
|
||||
if (typeof modelId !== 'string' || modelId.trim() === '') {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_MODEL_ID',
|
||||
message: `Invalid model ID: ${modelId}. Must be a non-empty string.`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const availableModels = getAvailableModels(projectRoot);
|
||||
const currentConfig = getConfig(projectRoot);
|
||||
let determinedProvider = null; // Initialize provider
|
||||
let warningMessage = null;
|
||||
|
||||
// Find the model data in internal list initially to see if it exists at all
|
||||
let modelData = availableModels.find((m) => m.id === modelId);
|
||||
|
||||
// --- Revised Logic: Prioritize providerHint --- //
|
||||
|
||||
if (providerHint) {
|
||||
// Hint provided (--ollama or --openrouter flag used)
|
||||
if (modelData && modelData.provider === providerHint) {
|
||||
// Found internally AND provider matches the hint
|
||||
determinedProvider = providerHint;
|
||||
report(
|
||||
'info',
|
||||
`Model ${modelId} found internally with matching provider hint ${determinedProvider}.`
|
||||
);
|
||||
} else {
|
||||
// Either not found internally, OR found but under a DIFFERENT provider than hinted.
|
||||
// Proceed with custom logic based ONLY on the hint.
|
||||
if (providerHint === 'openrouter') {
|
||||
// Check OpenRouter ONLY because hint was openrouter
|
||||
report('info', `Checking OpenRouter for ${modelId} (as hinted)...`);
|
||||
const openRouterModels = await fetchOpenRouterModels();
|
||||
|
||||
if (
|
||||
openRouterModels &&
|
||||
openRouterModels.some((m) => m.id === modelId)
|
||||
) {
|
||||
determinedProvider = 'openrouter';
|
||||
warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;
|
||||
report('warn', warningMessage);
|
||||
} else {
|
||||
// Hinted as OpenRouter but not found in live check
|
||||
throw new Error(
|
||||
`Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`
|
||||
);
|
||||
}
|
||||
} else if (providerHint === 'ollama') {
|
||||
// Hinted as Ollama - set provider directly WITHOUT checking OpenRouter
|
||||
determinedProvider = 'ollama';
|
||||
warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;
|
||||
report('warn', warningMessage);
|
||||
} else {
|
||||
// Invalid provider hint - should not happen
|
||||
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No hint provided (flags not used)
|
||||
if (modelData) {
|
||||
// Found internally, use the provider from the internal list
|
||||
determinedProvider = modelData.provider;
|
||||
report(
|
||||
'info',
|
||||
`Model ${modelId} found internally with provider ${determinedProvider}.`
|
||||
);
|
||||
} else {
|
||||
// Model not found and no provider hint was given
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MODEL_NOT_FOUND_NO_HINT',
|
||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// --- End of Revised Logic --- //
|
||||
|
||||
// At this point, we should have a determinedProvider if the model is valid (internally or custom)
|
||||
if (!determinedProvider) {
|
||||
// This case acts as a safeguard
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'PROVIDER_UNDETERMINED',
|
||||
message: `Could not determine the provider for model ID "${modelId}".`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Update configuration
|
||||
currentConfig.models[role] = {
|
||||
...currentConfig.models[role], // Keep existing params like maxTokens
|
||||
provider: determinedProvider,
|
||||
modelId: modelId
|
||||
};
|
||||
|
||||
// Write updated configuration
|
||||
const writeResult = writeConfig(currentConfig, projectRoot);
|
||||
if (!writeResult) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'WRITE_ERROR',
|
||||
message: 'Error writing updated configuration to .taskmasterconfig'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`;
|
||||
report('info', successMessage);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
role,
|
||||
provider: determinedProvider,
|
||||
modelId,
|
||||
message: successMessage,
|
||||
warning: warningMessage // Include warning in the response data
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
report('error', `Error setting ${role} model: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'SET_MODEL_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get API key status for all known providers.
|
||||
* @param {Object} [options] - Options for the operation
|
||||
* @param {Object} [options.session] - Session object containing environment variables (for MCP)
|
||||
* @param {Function} [options.mcpLog] - MCP logger object (for MCP)
|
||||
* @param {string} [options.projectRoot] - Project root directory
|
||||
* @returns {Object} RESTful response with API key status report
|
||||
*/
|
||||
async function getApiKeyStatusReport(options = {}) {
|
||||
const { mcpLog, projectRoot, session } = options;
|
||||
const report = (level, ...args) => {
|
||||
if (mcpLog && typeof mcpLog[level] === 'function') {
|
||||
mcpLog[level](...args);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
const providers = getAllProviders();
|
||||
const providersToCheck = providers.filter(
|
||||
(p) => p.toLowerCase() !== 'ollama'
|
||||
); // Ollama is not a provider, it's a service, doesn't need an api key usually
|
||||
const statusReport = providersToCheck.map((provider) => {
|
||||
// Use provided projectRoot for MCP status check
|
||||
const cliOk = isApiKeySet(provider, session); // Pass session for CLI check too
|
||||
const mcpOk = getMcpApiKeyStatus(provider, projectRoot);
|
||||
return {
|
||||
provider,
|
||||
cli: cliOk,
|
||||
mcp: mcpOk
|
||||
};
|
||||
});
|
||||
|
||||
report('info', 'Successfully generated API key status report.');
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
report: statusReport,
|
||||
message: 'API key status report generated.'
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
report('error', `Error generating API key status report: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'API_KEY_STATUS_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export {
|
||||
getModelConfiguration,
|
||||
getAvailableModelsList,
|
||||
setModel,
|
||||
getApiKeyStatusReport
|
||||
};
|
||||
212
scripts/modules/task-manager/parse-prd.js
Normal file
212
scripts/modules/task-manager/parse-prd.js
Normal file
@@ -0,0 +1,212 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
log,
|
||||
writeJSON,
|
||||
enableSilentMode,
|
||||
disableSilentMode,
|
||||
isSilentMode
|
||||
} from '../utils.js';
|
||||
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
// Define Zod schema for task validation
|
||||
const TaskSchema = z.object({
|
||||
id: z.number(),
|
||||
title: z.string(),
|
||||
description: z.string(),
|
||||
status: z.string().default('pending'),
|
||||
dependencies: z.array(z.number()).default([]),
|
||||
priority: z.string().default('medium'),
|
||||
details: z.string().optional(),
|
||||
testStrategy: z.string().optional()
|
||||
});
|
||||
|
||||
// Define Zod schema for the complete tasks data
|
||||
const TasksDataSchema = z.object({
|
||||
tasks: z.array(TaskSchema),
|
||||
metadata: z.object({
|
||||
projectName: z.string(),
|
||||
totalTasks: z.number(),
|
||||
sourceFile: z.string(),
|
||||
generatedAt: z.string()
|
||||
})
|
||||
});
|
||||
|
||||
/**
|
||||
* Parse a PRD file and generate tasks
|
||||
* @param {string} prdPath - Path to the PRD file
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @param {Object} options - Additional options
|
||||
* @param {Object} options.reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} options.mcpLog - MCP logger object (optional)
|
||||
* @param {Object} options.session - Session object from MCP server (optional)
|
||||
*/
|
||||
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||
const { reportProgress, mcpLog, session } = options;
|
||||
|
||||
// Determine output format based on mcpLog presence (simplification)
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
|
||||
// Create custom reporter that checks for MCP log and silent mode
|
||||
const report = (message, level = 'info') => {
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
||||
// Only log to console if not in silent mode and outputFormat is 'text'
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
report(`Parsing PRD file: ${prdPath}`, 'info');
|
||||
|
||||
// Read the PRD content
|
||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||
|
||||
// Build system prompt for PRD parsing
|
||||
const systemPrompt = `You are an AI assistant helping to break down a Product Requirements Document (PRD) into a set of sequential development tasks.
|
||||
Your goal is to create ${numTasks} well-structured, actionable development tasks based on the PRD provided.
|
||||
|
||||
Each task should follow this JSON structure:
|
||||
{
|
||||
"id": number,
|
||||
"title": string,
|
||||
"description": string,
|
||||
"status": "pending",
|
||||
"dependencies": number[] (IDs of tasks this depends on),
|
||||
"priority": "high" | "medium" | "low",
|
||||
"details": string (implementation details),
|
||||
"testStrategy": string (validation approach)
|
||||
}
|
||||
|
||||
Guidelines:
|
||||
1. Create exactly ${numTasks} tasks, numbered from 1 to ${numTasks}
|
||||
2. Each task should be atomic and focused on a single responsibility
|
||||
3. Order tasks logically - consider dependencies and implementation sequence
|
||||
4. Early tasks should focus on setup, core functionality first, then advanced features
|
||||
5. Include clear validation/testing approach for each task
|
||||
6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs)
|
||||
7. Assign priority (high/medium/low) based on criticality and dependency order
|
||||
8. Include detailed implementation guidance in the "details" field
|
||||
9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance
|
||||
10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements
|
||||
11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches`;
|
||||
|
||||
// Build user prompt with PRD content
|
||||
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:
|
||||
|
||||
${prdContent}
|
||||
|
||||
Return your response in this format:
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Setup Project Repository",
|
||||
"description": "...",
|
||||
...
|
||||
},
|
||||
...
|
||||
],
|
||||
"metadata": {
|
||||
"projectName": "PRD Implementation",
|
||||
"totalTasks": ${numTasks},
|
||||
"sourceFile": "${prdPath}",
|
||||
"generatedAt": "YYYY-MM-DD"
|
||||
}
|
||||
}`;
|
||||
|
||||
// Call the unified AI service
|
||||
report('Calling AI service to generate tasks from PRD...', 'info');
|
||||
|
||||
// Call generateObjectService with proper parameters
|
||||
const tasksData = await generateObjectService({
|
||||
role: 'main', // Use 'main' role to get the model from config
|
||||
session: session, // Pass session for API key resolution
|
||||
schema: TasksDataSchema, // Pass the schema for validation
|
||||
objectName: 'tasks_data', // Name the generated object
|
||||
systemPrompt: systemPrompt, // System instructions
|
||||
prompt: userPrompt, // User prompt with PRD content
|
||||
reportProgress // Progress reporting function
|
||||
});
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
const tasksDir = path.dirname(tasksPath);
|
||||
if (!fs.existsSync(tasksDir)) {
|
||||
fs.mkdirSync(tasksDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Write the tasks to the file
|
||||
writeJSON(tasksPath, tasksData);
|
||||
report(
|
||||
`Successfully generated ${tasksData.tasks.length} tasks from PRD`,
|
||||
'success'
|
||||
);
|
||||
report(`Tasks saved to: ${tasksPath}`, 'info');
|
||||
|
||||
// Generate individual task files
|
||||
if (reportProgress && mcpLog) {
|
||||
// Enable silent mode when being called from MCP server
|
||||
enableSilentMode();
|
||||
await generateTaskFiles(tasksPath, tasksDir);
|
||||
disableSilentMode();
|
||||
} else {
|
||||
await generateTaskFiles(tasksPath, tasksDir);
|
||||
}
|
||||
|
||||
// Only show success boxes for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully generated ${tasksData.tasks.length} tasks from PRD`
|
||||
),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return tasksData;
|
||||
} catch (error) {
|
||||
report(`Error parsing PRD: ${error.message}`, 'error');
|
||||
|
||||
// Only show error UI for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw error; // Re-throw for JSON output
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default parsePRD;
|
||||
119
scripts/modules/task-manager/remove-subtask.js
Normal file
119
scripts/modules/task-manager/remove-subtask.js
Normal file
@@ -0,0 +1,119 @@
|
||||
import path from 'path';
|
||||
import { log, readJSON, writeJSON } from '../utils.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
* Remove a subtask from its parent task
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} subtaskId - ID of the subtask to remove in format "parentId.subtaskId"
|
||||
* @param {boolean} convertToTask - Whether to convert the subtask to a standalone task
|
||||
* @param {boolean} generateFiles - Whether to regenerate task files after removing the subtask
|
||||
* @returns {Object|null} The removed subtask if convertToTask is true, otherwise null
|
||||
*/
|
||||
async function removeSubtask(
|
||||
tasksPath,
|
||||
subtaskId,
|
||||
convertToTask = false,
|
||||
generateFiles = true
|
||||
) {
|
||||
try {
|
||||
log('info', `Removing subtask ${subtaskId}...`);
|
||||
|
||||
// Read the existing tasks
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid or missing tasks file at ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Parse the subtask ID (format: "parentId.subtaskId")
|
||||
if (!subtaskId.includes('.')) {
|
||||
throw new Error(
|
||||
`Invalid subtask ID format: ${subtaskId}. Expected format: "parentId.subtaskId"`
|
||||
);
|
||||
}
|
||||
|
||||
const [parentIdStr, subtaskIdStr] = subtaskId.split('.');
|
||||
const parentId = parseInt(parentIdStr, 10);
|
||||
const subtaskIdNum = parseInt(subtaskIdStr, 10);
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((t) => t.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task with ID ${parentId} not found`);
|
||||
}
|
||||
|
||||
// Check if parent has subtasks
|
||||
if (!parentTask.subtasks || parentTask.subtasks.length === 0) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks`);
|
||||
}
|
||||
|
||||
// Find the subtask to remove
|
||||
const subtaskIndex = parentTask.subtasks.findIndex(
|
||||
(st) => st.id === subtaskIdNum
|
||||
);
|
||||
if (subtaskIndex === -1) {
|
||||
throw new Error(`Subtask ${subtaskId} not found`);
|
||||
}
|
||||
|
||||
// Get a copy of the subtask before removing it
|
||||
const removedSubtask = { ...parentTask.subtasks[subtaskIndex] };
|
||||
|
||||
// Remove the subtask from the parent
|
||||
parentTask.subtasks.splice(subtaskIndex, 1);
|
||||
|
||||
// If parent has no more subtasks, remove the subtasks array
|
||||
if (parentTask.subtasks.length === 0) {
|
||||
delete parentTask.subtasks;
|
||||
}
|
||||
|
||||
let convertedTask = null;
|
||||
|
||||
// Convert the subtask to a standalone task if requested
|
||||
if (convertToTask) {
|
||||
log('info', `Converting subtask ${subtaskId} to a standalone task...`);
|
||||
|
||||
// Find the highest task ID to determine the next ID
|
||||
const highestId = Math.max(...data.tasks.map((t) => t.id));
|
||||
const newTaskId = highestId + 1;
|
||||
|
||||
// Create the new task from the subtask
|
||||
convertedTask = {
|
||||
id: newTaskId,
|
||||
title: removedSubtask.title,
|
||||
description: removedSubtask.description || '',
|
||||
details: removedSubtask.details || '',
|
||||
status: removedSubtask.status || 'pending',
|
||||
dependencies: removedSubtask.dependencies || [],
|
||||
priority: parentTask.priority || 'medium' // Inherit priority from parent
|
||||
};
|
||||
|
||||
// Add the parent task as a dependency if not already present
|
||||
if (!convertedTask.dependencies.includes(parentId)) {
|
||||
convertedTask.dependencies.push(parentId);
|
||||
}
|
||||
|
||||
// Add the converted task to the tasks array
|
||||
data.tasks.push(convertedTask);
|
||||
|
||||
log('info', `Created new task ${newTaskId} from subtask ${subtaskId}`);
|
||||
} else {
|
||||
log('info', `Subtask ${subtaskId} deleted`);
|
||||
}
|
||||
|
||||
// Write the updated tasks back to the file
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Generate task files if requested
|
||||
if (generateFiles) {
|
||||
log('info', 'Regenerating task files...');
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
}
|
||||
|
||||
return convertedTask;
|
||||
} catch (error) {
|
||||
log('error', `Error removing subtask: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export default removeSubtask;
|
||||
207
scripts/modules/task-manager/remove-task.js
Normal file
207
scripts/modules/task-manager/remove-task.js
Normal file
@@ -0,0 +1,207 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
import { log, readJSON, writeJSON } from '../utils.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import taskExists from './task-exists.js';
|
||||
|
||||
/**
|
||||
* Removes one or more tasks or subtasks from the tasks file
|
||||
* @param {string} tasksPath - Path to the tasks file
|
||||
* @param {string} taskIds - Comma-separated string of task/subtask IDs to remove (e.g., '5,6.1,7')
|
||||
* @returns {Object} Result object with success status, messages, and removed task info
|
||||
*/
|
||||
async function removeTask(tasksPath, taskIds) {
|
||||
const results = {
|
||||
success: true,
|
||||
messages: [],
|
||||
errors: [],
|
||||
removedTasks: []
|
||||
};
|
||||
const taskIdsToRemove = taskIds
|
||||
.split(',')
|
||||
.map((id) => id.trim())
|
||||
.filter(Boolean); // Remove empty strings if any
|
||||
|
||||
if (taskIdsToRemove.length === 0) {
|
||||
results.success = false;
|
||||
results.errors.push('No valid task IDs provided.');
|
||||
return results;
|
||||
}
|
||||
|
||||
try {
|
||||
// Read the tasks file ONCE before the loop
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
const tasksToDeleteFiles = []; // Collect IDs of main tasks whose files should be deleted
|
||||
|
||||
for (const taskId of taskIdsToRemove) {
|
||||
// Check if the task ID exists *before* attempting removal
|
||||
if (!taskExists(data.tasks, taskId)) {
|
||||
const errorMsg = `Task with ID ${taskId} not found or already removed.`;
|
||||
results.errors.push(errorMsg);
|
||||
results.success = false; // Mark overall success as false if any error occurs
|
||||
continue; // Skip to the next ID
|
||||
}
|
||||
|
||||
try {
|
||||
// Handle subtask removal (e.g., '5.2')
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentTaskId, subtaskId] = taskId
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((t) => t.id === parentTaskId);
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
throw new Error(
|
||||
`Parent task ${parentTaskId} or its subtasks not found for subtask ${taskId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Find the subtask to remove
|
||||
const subtaskIndex = parentTask.subtasks.findIndex(
|
||||
(st) => st.id === subtaskId
|
||||
);
|
||||
if (subtaskIndex === -1) {
|
||||
throw new Error(
|
||||
`Subtask ${subtaskId} not found in parent task ${parentTaskId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Store the subtask info before removal
|
||||
const removedSubtask = {
|
||||
...parentTask.subtasks[subtaskIndex],
|
||||
parentTaskId: parentTaskId
|
||||
};
|
||||
results.removedTasks.push(removedSubtask);
|
||||
|
||||
// Remove the subtask from the parent
|
||||
parentTask.subtasks.splice(subtaskIndex, 1);
|
||||
|
||||
results.messages.push(`Successfully removed subtask ${taskId}`);
|
||||
}
|
||||
// Handle main task removal
|
||||
else {
|
||||
const taskIdNum = parseInt(taskId, 10);
|
||||
const taskIndex = data.tasks.findIndex((t) => t.id === taskIdNum);
|
||||
if (taskIndex === -1) {
|
||||
// This case should theoretically be caught by the taskExists check above,
|
||||
// but keep it as a safeguard.
|
||||
throw new Error(`Task with ID ${taskId} not found`);
|
||||
}
|
||||
|
||||
// Store the task info before removal
|
||||
const removedTask = data.tasks[taskIndex];
|
||||
results.removedTasks.push(removedTask);
|
||||
tasksToDeleteFiles.push(taskIdNum); // Add to list for file deletion
|
||||
|
||||
// Remove the task from the main array
|
||||
data.tasks.splice(taskIndex, 1);
|
||||
|
||||
results.messages.push(`Successfully removed task ${taskId}`);
|
||||
}
|
||||
} catch (innerError) {
|
||||
// Catch errors specific to processing *this* ID
|
||||
const errorMsg = `Error processing ID ${taskId}: ${innerError.message}`;
|
||||
results.errors.push(errorMsg);
|
||||
results.success = false;
|
||||
log('warn', errorMsg); // Log as warning and continue with next ID
|
||||
}
|
||||
} // End of loop through taskIdsToRemove
|
||||
|
||||
// --- Post-Loop Operations ---
|
||||
|
||||
// Only proceed with cleanup and saving if at least one task was potentially removed
|
||||
if (results.removedTasks.length > 0) {
|
||||
// Remove all references AFTER all tasks/subtasks are removed
|
||||
const allRemovedIds = new Set(
|
||||
taskIdsToRemove.map((id) =>
|
||||
typeof id === 'string' && id.includes('.') ? id : parseInt(id, 10)
|
||||
)
|
||||
);
|
||||
|
||||
data.tasks.forEach((task) => {
|
||||
// Clean dependencies in main tasks
|
||||
if (task.dependencies) {
|
||||
task.dependencies = task.dependencies.filter(
|
||||
(depId) => !allRemovedIds.has(depId)
|
||||
);
|
||||
}
|
||||
// Clean dependencies in remaining subtasks
|
||||
if (task.subtasks) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
if (subtask.dependencies) {
|
||||
subtask.dependencies = subtask.dependencies.filter(
|
||||
(depId) =>
|
||||
!allRemovedIds.has(`${task.id}.${depId}`) &&
|
||||
!allRemovedIds.has(depId) // check both subtask and main task refs
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Save the updated tasks file ONCE
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Delete task files AFTER saving tasks.json
|
||||
for (const taskIdNum of tasksToDeleteFiles) {
|
||||
const taskFileName = path.join(
|
||||
path.dirname(tasksPath),
|
||||
`task_${taskIdNum.toString().padStart(3, '0')}.txt`
|
||||
);
|
||||
if (fs.existsSync(taskFileName)) {
|
||||
try {
|
||||
fs.unlinkSync(taskFileName);
|
||||
results.messages.push(`Deleted task file: ${taskFileName}`);
|
||||
} catch (unlinkError) {
|
||||
const unlinkMsg = `Failed to delete task file ${taskFileName}: ${unlinkError.message}`;
|
||||
results.errors.push(unlinkMsg);
|
||||
results.success = false;
|
||||
log('warn', unlinkMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate updated task files ONCE
|
||||
try {
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
results.messages.push('Task files regenerated successfully.');
|
||||
} catch (genError) {
|
||||
const genErrMsg = `Failed to regenerate task files: ${genError.message}`;
|
||||
results.errors.push(genErrMsg);
|
||||
results.success = false;
|
||||
log('warn', genErrMsg);
|
||||
}
|
||||
} else if (results.errors.length === 0) {
|
||||
// Case where valid IDs were provided but none existed
|
||||
results.messages.push('No tasks found matching the provided IDs.');
|
||||
}
|
||||
|
||||
// Consolidate messages for final output
|
||||
const finalMessage = results.messages.join('\n');
|
||||
const finalError = results.errors.join('\n');
|
||||
|
||||
return {
|
||||
success: results.success,
|
||||
message: finalMessage || 'No tasks were removed.',
|
||||
error: finalError || null,
|
||||
removedTasks: results.removedTasks
|
||||
};
|
||||
} catch (error) {
|
||||
// Catch errors from reading file or other initial setup
|
||||
log('error', `Error removing tasks: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
message: '',
|
||||
error: `Operation failed: ${error.message}`,
|
||||
removedTasks: []
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default removeTask;
|
||||
114
scripts/modules/task-manager/set-task-status.js
Normal file
114
scripts/modules/task-manager/set-task-status.js
Normal file
@@ -0,0 +1,114 @@
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
|
||||
import { log, readJSON, writeJSON, findTaskById } from '../utils.js';
|
||||
import { displayBanner } from '../ui.js';
|
||||
import { validateTaskDependencies } from '../dependency-manager.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import updateSingleTaskStatus from './update-single-task-status.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
* Set the status of a task
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} taskIdInput - Task ID(s) to update
|
||||
* @param {string} newStatus - New status
|
||||
* @param {Object} options - Additional options (mcpLog for MCP mode)
|
||||
* @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode
|
||||
*/
|
||||
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||
try {
|
||||
// Determine if we're in MCP mode by checking for mcpLog
|
||||
const isMcpMode = !!options?.mcpLog;
|
||||
|
||||
// Only display UI elements if not in MCP mode
|
||||
if (!isMcpMode) {
|
||||
displayBanner();
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Updating Task Status to: ${newStatus}`), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round'
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
log('info', `Reading tasks from ${tasksPath}...`);
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Handle multiple task IDs (comma-separated)
|
||||
const taskIds = taskIdInput.split(',').map((id) => id.trim());
|
||||
const updatedTasks = [];
|
||||
|
||||
// Update each task
|
||||
for (const id of taskIds) {
|
||||
await updateSingleTaskStatus(tasksPath, id, newStatus, data, !isMcpMode);
|
||||
updatedTasks.push(id);
|
||||
}
|
||||
|
||||
// Write the updated tasks to the file
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Validate dependencies after status update
|
||||
log('info', 'Validating dependencies after status update...');
|
||||
validateTaskDependencies(data.tasks);
|
||||
|
||||
// Generate individual task files
|
||||
log('info', 'Regenerating task files...');
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
||||
mcpLog: options.mcpLog
|
||||
});
|
||||
|
||||
// Display success message - only in CLI mode
|
||||
if (!isMcpMode) {
|
||||
for (const id of updatedTasks) {
|
||||
const task = findTaskById(data.tasks, id);
|
||||
const taskName = task ? task.title : id;
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold(`Successfully updated task ${id} status:`) +
|
||||
'\n' +
|
||||
`From: ${chalk.yellow(task ? task.status : 'unknown')}\n` +
|
||||
`To: ${chalk.green(newStatus)}`,
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Return success value for programmatic use
|
||||
return {
|
||||
success: true,
|
||||
updatedTasks: updatedTasks.map((id) => ({
|
||||
id,
|
||||
status: newStatus
|
||||
}))
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `Error setting task status: ${error.message}`);
|
||||
|
||||
// Only show error UI in CLI mode
|
||||
if (!options?.mcpLog) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
// Pass session to getDebugFlag
|
||||
if (getDebugFlag(options?.session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
} else {
|
||||
// In MCP mode, throw the error for the caller to handle
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default setTaskStatus;
|
||||
30
scripts/modules/task-manager/task-exists.js
Normal file
30
scripts/modules/task-manager/task-exists.js
Normal file
@@ -0,0 +1,30 @@
|
||||
/**
|
||||
* Checks if a task with the given ID exists
|
||||
* @param {Array} tasks - Array of tasks to search
|
||||
* @param {string|number} taskId - ID of task or subtask to check
|
||||
* @returns {boolean} Whether the task exists
|
||||
*/
|
||||
function taskExists(tasks, taskId) {
|
||||
// Handle subtask IDs (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentIdStr, subtaskIdStr] = taskId.split('.');
|
||||
const parentId = parseInt(parentIdStr, 10);
|
||||
const subtaskId = parseInt(subtaskIdStr, 10);
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = tasks.find((t) => t.id === parentId);
|
||||
|
||||
// If parent exists, check if subtask exists
|
||||
return (
|
||||
parentTask &&
|
||||
parentTask.subtasks &&
|
||||
parentTask.subtasks.some((st) => st.id === subtaskId)
|
||||
);
|
||||
}
|
||||
|
||||
// Handle regular task IDs
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.some((t) => t.id === id);
|
||||
}
|
||||
|
||||
export default taskExists;
|
||||
126
scripts/modules/task-manager/update-single-task-status.js
Normal file
126
scripts/modules/task-manager/update-single-task-status.js
Normal file
@@ -0,0 +1,126 @@
|
||||
import chalk from 'chalk';
|
||||
|
||||
import { log } from '../utils.js';
|
||||
|
||||
/**
|
||||
* Update the status of a single task
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} taskIdInput - Task ID to update
|
||||
* @param {string} newStatus - New status
|
||||
* @param {Object} data - Tasks data
|
||||
* @param {boolean} showUi - Whether to show UI elements
|
||||
*/
|
||||
async function updateSingleTaskStatus(
|
||||
tasksPath,
|
||||
taskIdInput,
|
||||
newStatus,
|
||||
data,
|
||||
showUi = true
|
||||
) {
|
||||
// Check if it's a subtask (e.g., "1.2")
|
||||
if (taskIdInput.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdInput
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((t) => t.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task ${parentId} not found`);
|
||||
}
|
||||
|
||||
// Find the subtask
|
||||
if (!parentTask.subtasks) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks`);
|
||||
}
|
||||
|
||||
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||
if (!subtask) {
|
||||
throw new Error(
|
||||
`Subtask ${subtaskId} not found in parent task ${parentId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Update the subtask status
|
||||
const oldStatus = subtask.status || 'pending';
|
||||
subtask.status = newStatus;
|
||||
|
||||
log(
|
||||
'info',
|
||||
`Updated subtask ${parentId}.${subtaskId} status from '${oldStatus}' to '${newStatus}'`
|
||||
);
|
||||
|
||||
// Check if all subtasks are done (if setting to 'done')
|
||||
if (
|
||||
newStatus.toLowerCase() === 'done' ||
|
||||
newStatus.toLowerCase() === 'completed'
|
||||
) {
|
||||
const allSubtasksDone = parentTask.subtasks.every(
|
||||
(st) => st.status === 'done' || st.status === 'completed'
|
||||
);
|
||||
|
||||
// Suggest updating parent task if all subtasks are done
|
||||
if (
|
||||
allSubtasksDone &&
|
||||
parentTask.status !== 'done' &&
|
||||
parentTask.status !== 'completed'
|
||||
) {
|
||||
// Only show suggestion in CLI mode
|
||||
if (showUi) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`All subtasks of parent task ${parentId} are now marked as done.`
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`Consider updating the parent task status with: task-master set-status --id=${parentId} --status=done`
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Handle regular task
|
||||
const taskId = parseInt(taskIdInput, 10);
|
||||
const task = data.tasks.find((t) => t.id === taskId);
|
||||
|
||||
if (!task) {
|
||||
throw new Error(`Task ${taskId} not found`);
|
||||
}
|
||||
|
||||
// Update the task status
|
||||
const oldStatus = task.status || 'pending';
|
||||
task.status = newStatus;
|
||||
|
||||
log(
|
||||
'info',
|
||||
`Updated task ${taskId} status from '${oldStatus}' to '${newStatus}'`
|
||||
);
|
||||
|
||||
// If marking as done, also mark all subtasks as done
|
||||
if (
|
||||
(newStatus.toLowerCase() === 'done' ||
|
||||
newStatus.toLowerCase() === 'completed') &&
|
||||
task.subtasks &&
|
||||
task.subtasks.length > 0
|
||||
) {
|
||||
const pendingSubtasks = task.subtasks.filter(
|
||||
(st) => st.status !== 'done' && st.status !== 'completed'
|
||||
);
|
||||
|
||||
if (pendingSubtasks.length > 0) {
|
||||
log(
|
||||
'info',
|
||||
`Also marking ${pendingSubtasks.length} subtasks as '${newStatus}'`
|
||||
);
|
||||
|
||||
pendingSubtasks.forEach((subtask) => {
|
||||
subtask.status = newStatus;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default updateSingleTaskStatus;
|
||||
406
scripts/modules/task-manager/update-subtask-by-id.js
Normal file
406
scripts/modules/task-manager/update-subtask-by-id.js
Normal file
@@ -0,0 +1,406 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import {
|
||||
getStatusWithColor,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
import {
|
||||
log as consoleLog,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from '../utils.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
* Update a subtask by appending additional timestamped information using the unified AI service.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} subtaskId - ID of the subtask to update in format "parentId.subtaskId"
|
||||
* @param {string} prompt - Prompt for generating additional information
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP server.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json'). Automatically 'json' if mcpLog is present.
|
||||
* @returns {Promise<Object|null>} - The updated subtask or null if update failed.
|
||||
*/
|
||||
async function updateSubtaskById(
|
||||
tasksPath,
|
||||
subtaskId,
|
||||
prompt,
|
||||
useResearch = false,
|
||||
context = {},
|
||||
outputFormat = context.mcpLog ? 'json' : 'text'
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const logFn = mcpLog || consoleLog;
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
// Report helper
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
else logFn.info(...args);
|
||||
} else if (!isSilentMode()) {
|
||||
logFn(level, ...args);
|
||||
}
|
||||
};
|
||||
|
||||
let loadingIndicator = null;
|
||||
|
||||
try {
|
||||
report('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
|
||||
|
||||
// Validate subtask ID format
|
||||
if (
|
||||
!subtaskId ||
|
||||
typeof subtaskId !== 'string' ||
|
||||
!subtaskId.includes('.')
|
||||
) {
|
||||
throw new Error(
|
||||
`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`
|
||||
);
|
||||
}
|
||||
|
||||
// Validate prompt
|
||||
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
|
||||
throw new Error(
|
||||
'Prompt cannot be empty. Please provide context for the subtask update.'
|
||||
);
|
||||
}
|
||||
|
||||
// Validate tasks file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
throw new Error(`Tasks file not found at path: ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Read the tasks file
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(
|
||||
`No valid tasks found in ${tasksPath}. The file may be corrupted or have an invalid format.`
|
||||
);
|
||||
}
|
||||
|
||||
// Parse parent and subtask IDs
|
||||
const [parentIdStr, subtaskIdStr] = subtaskId.split('.');
|
||||
const parentId = parseInt(parentIdStr, 10);
|
||||
const subtaskIdNum = parseInt(subtaskIdStr, 10);
|
||||
|
||||
if (
|
||||
isNaN(parentId) ||
|
||||
parentId <= 0 ||
|
||||
isNaN(subtaskIdNum) ||
|
||||
subtaskIdNum <= 0
|
||||
) {
|
||||
throw new Error(
|
||||
`Invalid subtask ID format: ${subtaskId}. Both parent ID and subtask ID must be positive integers.`
|
||||
);
|
||||
}
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((task) => task.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(
|
||||
`Parent task with ID ${parentId} not found. Please verify the task ID and try again.`
|
||||
);
|
||||
}
|
||||
|
||||
// Find the subtask
|
||||
if (!parentTask.subtasks || !Array.isArray(parentTask.subtasks)) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks.`);
|
||||
}
|
||||
|
||||
const subtaskIndex = parentTask.subtasks.findIndex(
|
||||
(st) => st.id === subtaskIdNum
|
||||
);
|
||||
if (subtaskIndex === -1) {
|
||||
throw new Error(
|
||||
`Subtask with ID ${subtaskId} not found. Please verify the subtask ID and try again.`
|
||||
);
|
||||
}
|
||||
|
||||
const subtask = parentTask.subtasks[subtaskIndex];
|
||||
|
||||
// Check if subtask is already completed
|
||||
if (subtask.status === 'done' || subtask.status === 'completed') {
|
||||
report(
|
||||
'warn',
|
||||
`Subtask ${subtaskId} is already marked as done and cannot be updated`
|
||||
);
|
||||
|
||||
// Only show UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.yellow(
|
||||
`Subtask ${subtaskId} is already marked as ${subtask.status} and cannot be updated.`
|
||||
) +
|
||||
'\n\n' +
|
||||
chalk.white(
|
||||
'Completed subtasks are locked to maintain consistency. To modify a completed subtask, you must first:'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(
|
||||
'1. Change its status to "pending" or "in-progress"'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('2. Then run the update-subtask command'),
|
||||
{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Only show UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
// Show the subtask that will be updated
|
||||
const table = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('ID'),
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status')
|
||||
],
|
||||
colWidths: [10, 55, 10]
|
||||
});
|
||||
|
||||
table.push([
|
||||
subtaskId,
|
||||
truncate(subtask.title, 52),
|
||||
getStatusWithColor(subtask.status)
|
||||
]);
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Updating Subtask #${subtaskId}`), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 0 }
|
||||
})
|
||||
);
|
||||
|
||||
console.log(table.toString());
|
||||
|
||||
// Start the loading indicator - only for text output
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Generating additional information with AI...'
|
||||
);
|
||||
}
|
||||
|
||||
let additionalInformation = '';
|
||||
try {
|
||||
// Reverted: Keep the original system prompt
|
||||
const systemPrompt = `You are an AI assistant helping to update software development subtasks with additional information.
|
||||
Given a subtask, you will provide additional details, implementation notes, or technical insights based on user request.
|
||||
Focus only on adding content that enhances the subtask - don't repeat existing information.
|
||||
Be technical, specific, and implementation-focused rather than general.
|
||||
Provide concrete examples, code snippets, or implementation details when relevant.`;
|
||||
|
||||
// Reverted: Use the full JSON stringification for the user message
|
||||
const subtaskData = JSON.stringify(subtask, null, 2);
|
||||
const userMessageContent = `Here is the subtask to enhance:\n${subtaskData}\n\nPlease provide additional information addressing this request:\n${prompt}\n\nReturn ONLY the new information to add - do not repeat existing content.`;
|
||||
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
report('info', `Calling AI text service with role: ${serviceRole}`);
|
||||
|
||||
const streamResult = await generateTextService({
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userMessageContent
|
||||
});
|
||||
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
// Stop indicator immediately since generateText is blocking
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
// Assign the result directly (generateTextService returns the text string)
|
||||
additionalInformation = streamResult ? streamResult.trim() : '';
|
||||
|
||||
if (!additionalInformation) {
|
||||
throw new Error('AI returned empty response.'); // Changed error message slightly
|
||||
}
|
||||
report(
|
||||
// Corrected log message to reflect generateText
|
||||
'success',
|
||||
`Successfully generated text using AI role: ${serviceRole}.`
|
||||
);
|
||||
} catch (aiError) {
|
||||
report('error', `AI service call failed: ${aiError.message}`);
|
||||
throw aiError;
|
||||
} // Removed the inner finally block as streamingInterval is gone
|
||||
|
||||
const currentDate = new Date();
|
||||
|
||||
// Format the additional information with timestamp
|
||||
const formattedInformation = `\n\n<info added on ${currentDate.toISOString()}>\n${additionalInformation}\n</info added on ${currentDate.toISOString()}>`;
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: formattedInformation:',
|
||||
formattedInformation.substring(0, 70) + '...'
|
||||
);
|
||||
}
|
||||
|
||||
// Append to subtask details and description
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: Subtask details BEFORE append:', subtask.details);
|
||||
}
|
||||
|
||||
if (subtask.details) {
|
||||
subtask.details += formattedInformation;
|
||||
} else {
|
||||
subtask.details = `${formattedInformation}`;
|
||||
}
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: Subtask details AFTER append:', subtask.details);
|
||||
}
|
||||
|
||||
if (subtask.description) {
|
||||
// Only append to description if it makes sense (for shorter updates)
|
||||
if (additionalInformation.length < 200) {
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask description BEFORE append:',
|
||||
subtask.description
|
||||
);
|
||||
}
|
||||
subtask.description += ` [Updated: ${currentDate.toLocaleDateString()}]`;
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask description AFTER append:',
|
||||
subtask.description
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: About to call writeJSON with updated data...');
|
||||
}
|
||||
|
||||
// Update the subtask in the parent task's array
|
||||
parentTask.subtasks[subtaskIndex] = subtask;
|
||||
|
||||
// Write the updated tasks to the file
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: writeJSON call completed.');
|
||||
}
|
||||
|
||||
report('success', `Successfully updated subtask ${subtaskId}`);
|
||||
|
||||
// Generate individual task files
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// Stop indicator before final console output - only for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(`Successfully updated subtask #${subtaskId}`) +
|
||||
'\n\n' +
|
||||
chalk.white.bold('Title:') +
|
||||
' ' +
|
||||
subtask.title +
|
||||
'\n\n' +
|
||||
chalk.white.bold('Information Added:') +
|
||||
'\n' +
|
||||
chalk.white(truncate(additionalInformation, 300, true)),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return subtask;
|
||||
} catch (error) {
|
||||
// Outer catch block handles final errors after loop/attempts
|
||||
// Stop indicator on error - only for text output (CLI)
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
report('error', `Error updating subtask: ${error.message}`);
|
||||
|
||||
// Only show error UI for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
// Provide helpful error messages based on error type
|
||||
if (error.message?.includes('ANTHROPIC_API_KEY')) {
|
||||
console.log(
|
||||
chalk.yellow('\nTo fix this issue, set your Anthropic API key:')
|
||||
);
|
||||
console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
|
||||
} else if (error.message?.includes('PERPLEXITY_API_KEY')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(
|
||||
' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'
|
||||
);
|
||||
console.log(
|
||||
' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt="..."'
|
||||
);
|
||||
} else if (error.message?.includes('overloaded')) {
|
||||
// Catch final overload error
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nAI model overloaded, and fallback failed or was unavailable:'
|
||||
)
|
||||
);
|
||||
console.log(' 1. Try again in a few minutes.');
|
||||
console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
|
||||
console.log(' 3. Consider breaking your prompt into smaller updates.');
|
||||
} else if (error.message?.includes('not found')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(
|
||||
' 1. Run task-master list --with-subtasks to see all available subtask IDs'
|
||||
);
|
||||
console.log(
|
||||
' 2. Use a valid subtask ID with the --id parameter in format "parentId.subtaskId"'
|
||||
);
|
||||
} else if (error.message?.includes('empty stream response')) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nThe AI model returned an empty response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
} else {
|
||||
throw error; // Re-throw for JSON output
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export default updateSubtaskById;
|
||||
483
scripts/modules/task-manager/update-task-by-id.js
Normal file
483
scripts/modules/task-manager/update-task-by-id.js
Normal file
@@ -0,0 +1,483 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod'; // Keep Zod for post-parse validation
|
||||
|
||||
import {
|
||||
log as consoleLog,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from '../utils.js';
|
||||
|
||||
import {
|
||||
getStatusWithColor,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
isApiKeySet // Keep this check
|
||||
} from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
// Zod schema for post-parsing validation of the updated task object
|
||||
const updatedTaskSchema = z
|
||||
.object({
|
||||
id: z.number().int(),
|
||||
title: z.string(), // Title should be preserved, but check it exists
|
||||
description: z.string(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.union([z.number().int(), z.string()])),
|
||||
priority: z.string().optional(),
|
||||
details: z.string().optional(),
|
||||
testStrategy: z.string().optional(),
|
||||
subtasks: z.array(z.any()).optional()
|
||||
})
|
||||
.strip(); // Allows parsing even if AI adds extra fields, but validation focuses on schema
|
||||
|
||||
/**
|
||||
* Parses a single updated task object from AI's text response.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} expectedTaskId - The ID of the task expected.
|
||||
* @param {Function | Object} logFn - Logging function or MCP logger.
|
||||
* @param {boolean} isMCP - Flag indicating MCP context.
|
||||
* @returns {Object} Parsed and validated task object.
|
||||
* @throws {Error} If parsing or validation fails.
|
||||
*/
|
||||
function parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {
|
||||
// Report helper consistent with the established pattern
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
else logFn.info(...args);
|
||||
} else if (!isSilentMode()) {
|
||||
logFn(level, ...args);
|
||||
}
|
||||
};
|
||||
|
||||
report(
|
||||
'info',
|
||||
'Attempting to parse updated task object from text response...'
|
||||
);
|
||||
if (!text || text.trim() === '')
|
||||
throw new Error('AI response text is empty.');
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
|
||||
// Extract from Markdown code block first
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
report('info', 'Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// If no code block, find first '{' and last '}' for the object
|
||||
const firstBrace = cleanedResponse.indexOf('{');
|
||||
const lastBrace = cleanedResponse.lastIndexOf('}');
|
||||
if (firstBrace !== -1 && lastBrace > firstBrace) {
|
||||
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
|
||||
report('info', 'Extracted content between first { and last }.');
|
||||
} else {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain a JSON object structure. Parsing raw response.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let parsedTask;
|
||||
try {
|
||||
parsedTask = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
report('error', `Failed to parse JSON object: ${parseError.message}`);
|
||||
report(
|
||||
'error',
|
||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
||||
);
|
||||
throw new Error(
|
||||
`Failed to parse JSON response object: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
if (!parsedTask || typeof parsedTask !== 'object') {
|
||||
report(
|
||||
'error',
|
||||
`Parsed content is not an object. Type: ${typeof parsedTask}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}`
|
||||
);
|
||||
throw new Error('Parsed AI response is not a valid JSON object.');
|
||||
}
|
||||
|
||||
// Validate the parsed task object using Zod
|
||||
const validationResult = updatedTaskSchema.safeParse(parsedTask);
|
||||
if (!validationResult.success) {
|
||||
report('error', 'Parsed task object failed Zod validation.');
|
||||
validationResult.error.errors.forEach((err) => {
|
||||
report('error', ` - Field '${err.path.join('.')}': ${err.message}`);
|
||||
});
|
||||
throw new Error(
|
||||
`AI response failed task structure validation: ${validationResult.error.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// Final check: ensure ID matches expected ID (AI might hallucinate)
|
||||
if (validationResult.data.id !== expectedTaskId) {
|
||||
report(
|
||||
'warn',
|
||||
`AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.`
|
||||
);
|
||||
validationResult.data.id = expectedTaskId; // Enforce correct ID
|
||||
}
|
||||
|
||||
report('info', 'Successfully validated updated task structure.');
|
||||
return validationResult.data; // Return the validated task data
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a single task by ID using the unified AI service.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} taskId - Task ID to update
|
||||
* @param {string} prompt - Prompt with new context
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP server.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
|
||||
* @returns {Promise<Object|null>} - Updated task data or null if task wasn't updated/found.
|
||||
*/
|
||||
async function updateTaskById(
|
||||
tasksPath,
|
||||
taskId,
|
||||
prompt,
|
||||
useResearch = false,
|
||||
context = {},
|
||||
outputFormat = 'text'
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const logFn = mcpLog || consoleLog;
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
// Use report helper for logging
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
else logFn.info(...args);
|
||||
} else if (!isSilentMode()) {
|
||||
logFn(level, ...args);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
report('info', `Updating single task ${taskId} with prompt: "${prompt}"`);
|
||||
|
||||
// --- Input Validations (Keep existing) ---
|
||||
if (!Number.isInteger(taskId) || taskId <= 0)
|
||||
throw new Error(
|
||||
`Invalid task ID: ${taskId}. Task ID must be a positive integer.`
|
||||
);
|
||||
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '')
|
||||
throw new Error('Prompt cannot be empty.');
|
||||
if (useResearch && !isApiKeySet('perplexity', session)) {
|
||||
report(
|
||||
'warn',
|
||||
'Perplexity research requested but API key not set. Falling back.'
|
||||
);
|
||||
if (outputFormat === 'text')
|
||||
console.log(
|
||||
chalk.yellow('Perplexity AI not available. Falling back to main AI.')
|
||||
);
|
||||
useResearch = false;
|
||||
}
|
||||
if (!fs.existsSync(tasksPath))
|
||||
throw new Error(`Tasks file not found: ${tasksPath}`);
|
||||
// --- End Input Validations ---
|
||||
|
||||
// --- Task Loading and Status Check (Keep existing) ---
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks)
|
||||
throw new Error(`No valid tasks found in ${tasksPath}.`);
|
||||
const taskIndex = data.tasks.findIndex((task) => task.id === taskId);
|
||||
if (taskIndex === -1) throw new Error(`Task with ID ${taskId} not found.`);
|
||||
const taskToUpdate = data.tasks[taskIndex];
|
||||
if (taskToUpdate.status === 'done' || taskToUpdate.status === 'completed') {
|
||||
report(
|
||||
'warn',
|
||||
`Task ${taskId} is already marked as done and cannot be updated`
|
||||
);
|
||||
|
||||
// Only show warning box for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.yellow(
|
||||
`Task ${taskId} is already marked as ${taskToUpdate.status} and cannot be updated.`
|
||||
) +
|
||||
'\n\n' +
|
||||
chalk.white(
|
||||
'Completed tasks are locked to maintain consistency. To modify a completed task, you must first:'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(
|
||||
'1. Change its status to "pending" or "in-progress"'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('2. Then run the update-task command'),
|
||||
{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
// --- End Task Loading ---
|
||||
|
||||
// --- Display Task Info (CLI Only - Keep existing) ---
|
||||
if (outputFormat === 'text') {
|
||||
// Show the task that will be updated
|
||||
const table = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('ID'),
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status')
|
||||
],
|
||||
colWidths: [5, 60, 10]
|
||||
});
|
||||
|
||||
table.push([
|
||||
taskToUpdate.id,
|
||||
truncate(taskToUpdate.title, 57),
|
||||
getStatusWithColor(taskToUpdate.status)
|
||||
]);
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Updating Task #${taskId}`), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 0 }
|
||||
})
|
||||
);
|
||||
|
||||
console.log(table.toString());
|
||||
|
||||
// Display a message about how completed subtasks are handled
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.cyan.bold('How Completed Subtasks Are Handled:') +
|
||||
'\n\n' +
|
||||
chalk.white(
|
||||
'• Subtasks marked as "done" or "completed" will be preserved\n'
|
||||
) +
|
||||
chalk.white(
|
||||
'• New subtasks will build upon what has already been completed\n'
|
||||
) +
|
||||
chalk.white(
|
||||
'• If completed work needs revision, a new subtask will be created instead of modifying done items\n'
|
||||
) +
|
||||
chalk.white(
|
||||
'• This approach maintains a clear record of completed work and new requirements'
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// --- Build Prompts (Keep EXACT original prompts) ---
|
||||
const systemPrompt = `You are an AI assistant helping to update a software development task based on new context.
|
||||
You will be given a task and a prompt describing changes or new implementation details.
|
||||
Your job is to update the task to reflect these changes, while preserving its basic structure.
|
||||
|
||||
Guidelines:
|
||||
1. VERY IMPORTANT: NEVER change the title of the task - keep it exactly as is
|
||||
2. Maintain the same ID, status, and dependencies unless specifically mentioned in the prompt
|
||||
3. Update the description, details, and test strategy to reflect the new information
|
||||
4. Do not change anything unnecessarily - just adapt what needs to change based on the prompt
|
||||
5. Return a complete valid JSON object representing the updated task
|
||||
6. VERY IMPORTANT: Preserve all subtasks marked as "done" or "completed" - do not modify their content
|
||||
7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything
|
||||
8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly
|
||||
9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced
|
||||
10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted
|
||||
11. Ensure any new subtasks have unique IDs that don't conflict with existing ones
|
||||
|
||||
The changes described in the prompt should be thoughtfully applied to make the task more accurate and actionable.`;
|
||||
|
||||
const taskDataString = JSON.stringify(taskToUpdate, null, 2); // Use original task data
|
||||
const userPrompt = `Here is the task to update:\n${taskDataString}\n\nPlease update this task based on the following new context:\n${prompt}\n\nIMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated task as a valid JSON object.`;
|
||||
// --- End Build Prompts ---
|
||||
|
||||
let updatedTask;
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
useResearch ? 'Updating task with research...' : 'Updating task...'
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
try {
|
||||
// --- Call Unified AI Service (generateTextService) ---
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
report('info', `Using AI service with role: ${role}`);
|
||||
|
||||
responseText = await generateTextService({
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt,
|
||||
role,
|
||||
session
|
||||
});
|
||||
report('success', 'Successfully received text response from AI service');
|
||||
// --- End AI Service Call ---
|
||||
} catch (error) {
|
||||
// Catch errors from generateTextService
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
report('error', `Error during AI service call: ${error.message}`);
|
||||
if (error.message.includes('API key')) {
|
||||
report('error', 'Please ensure API keys are configured correctly.');
|
||||
}
|
||||
throw error; // Re-throw error
|
||||
} finally {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
// --- Parse and Validate Response ---
|
||||
try {
|
||||
// Pass logFn and isMCP flag to the parser
|
||||
updatedTask = parseUpdatedTaskFromText(
|
||||
responseText,
|
||||
taskId,
|
||||
logFn,
|
||||
isMCP
|
||||
);
|
||||
} catch (parseError) {
|
||||
report(
|
||||
'error',
|
||||
`Failed to parse updated task from AI response: ${parseError.message}`
|
||||
);
|
||||
if (getDebugFlag(session)) {
|
||||
report('error', `Raw AI Response:\n${responseText}`);
|
||||
}
|
||||
throw new Error(
|
||||
`Failed to parse valid updated task from AI response: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
// --- End Parse/Validate ---
|
||||
|
||||
// --- Task Validation/Correction (Keep existing logic) ---
|
||||
if (!updatedTask || typeof updatedTask !== 'object')
|
||||
throw new Error('Received invalid task object from AI.');
|
||||
if (!updatedTask.title || !updatedTask.description)
|
||||
throw new Error('Updated task missing required fields.');
|
||||
// Preserve ID if AI changed it
|
||||
if (updatedTask.id !== taskId) {
|
||||
report('warn', `AI changed task ID. Restoring original ID ${taskId}.`);
|
||||
updatedTask.id = taskId;
|
||||
}
|
||||
// Preserve status if AI changed it
|
||||
if (
|
||||
updatedTask.status !== taskToUpdate.status &&
|
||||
!prompt.toLowerCase().includes('status')
|
||||
) {
|
||||
report(
|
||||
'warn',
|
||||
`AI changed task status. Restoring original status '${taskToUpdate.status}'.`
|
||||
);
|
||||
updatedTask.status = taskToUpdate.status;
|
||||
}
|
||||
// Preserve completed subtasks (Keep existing logic)
|
||||
if (taskToUpdate.subtasks?.length > 0) {
|
||||
if (!updatedTask.subtasks) {
|
||||
report('warn', 'Subtasks removed by AI. Restoring original subtasks.');
|
||||
updatedTask.subtasks = taskToUpdate.subtasks;
|
||||
} else {
|
||||
const completedOriginal = taskToUpdate.subtasks.filter(
|
||||
(st) => st.status === 'done' || st.status === 'completed'
|
||||
);
|
||||
completedOriginal.forEach((compSub) => {
|
||||
const updatedSub = updatedTask.subtasks.find(
|
||||
(st) => st.id === compSub.id
|
||||
);
|
||||
if (
|
||||
!updatedSub ||
|
||||
JSON.stringify(updatedSub) !== JSON.stringify(compSub)
|
||||
) {
|
||||
report(
|
||||
'warn',
|
||||
`Completed subtask ${compSub.id} was modified or removed. Restoring.`
|
||||
);
|
||||
// Remove potentially modified version
|
||||
updatedTask.subtasks = updatedTask.subtasks.filter(
|
||||
(st) => st.id !== compSub.id
|
||||
);
|
||||
// Add back original
|
||||
updatedTask.subtasks.push(compSub);
|
||||
}
|
||||
});
|
||||
// Deduplicate just in case
|
||||
const subtaskIds = new Set();
|
||||
updatedTask.subtasks = updatedTask.subtasks.filter((st) => {
|
||||
if (!subtaskIds.has(st.id)) {
|
||||
subtaskIds.add(st.id);
|
||||
return true;
|
||||
}
|
||||
report('warn', `Duplicate subtask ID ${st.id} removed.`);
|
||||
return false;
|
||||
});
|
||||
}
|
||||
}
|
||||
// --- End Task Validation/Correction ---
|
||||
|
||||
// --- Update Task Data (Keep existing) ---
|
||||
data.tasks[taskIndex] = updatedTask;
|
||||
// --- End Update Task Data ---
|
||||
|
||||
// --- Write File and Generate (Keep existing) ---
|
||||
writeJSON(tasksPath, data);
|
||||
report('success', `Successfully updated task ${taskId}`);
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
// --- End Write File ---
|
||||
|
||||
// --- Final CLI Output (Keep existing) ---
|
||||
if (outputFormat === 'text') {
|
||||
/* ... success boxen ... */
|
||||
}
|
||||
// --- End Final CLI Output ---
|
||||
|
||||
return updatedTask; // Return the updated task
|
||||
} catch (error) {
|
||||
// General error catch
|
||||
// --- General Error Handling (Keep existing) ---
|
||||
report('error', `Error updating task: ${error.message}`);
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
// ... helpful hints ...
|
||||
if (getDebugFlag(session)) console.error(error);
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw error; // Re-throw for MCP
|
||||
}
|
||||
return null; // Indicate failure in CLI case if process doesn't exit
|
||||
// --- End General Error Handling ---
|
||||
}
|
||||
}
|
||||
|
||||
export default updateTaskById;
|
||||
445
scripts/modules/task-manager/update-tasks.js
Normal file
445
scripts/modules/task-manager/update-tasks.js
Normal file
@@ -0,0 +1,445 @@
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod'; // Keep Zod for post-parsing validation
|
||||
|
||||
import {
|
||||
log as consoleLog,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from '../utils.js';
|
||||
|
||||
import {
|
||||
getStatusWithColor,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
|
||||
// Zod schema for validating the structure of tasks AFTER parsing
|
||||
const updatedTaskSchema = z
|
||||
.object({
|
||||
id: z.number().int(),
|
||||
title: z.string(),
|
||||
description: z.string(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.union([z.number().int(), z.string()])),
|
||||
priority: z.string().optional(),
|
||||
details: z.string().optional(),
|
||||
testStrategy: z.string().optional(),
|
||||
subtasks: z.array(z.any()).optional() // Keep subtasks flexible for now
|
||||
})
|
||||
.strip(); // Allow potential extra fields during parsing if needed, then validate structure
|
||||
const updatedTaskArraySchema = z.array(updatedTaskSchema);
|
||||
|
||||
/**
|
||||
* Parses an array of task objects from AI's text response.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} expectedCount - Expected number of tasks.
|
||||
* @param {Function | Object} logFn - The logging function (consoleLog) or MCP log object.
|
||||
* @param {boolean} isMCP - Flag indicating if logFn is MCP logger.
|
||||
* @returns {Array} Parsed and validated tasks array.
|
||||
* @throws {Error} If parsing or validation fails.
|
||||
*/
|
||||
function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
// Helper for consistent logging inside parser
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
else logFn.info(...args);
|
||||
} else if (!isSilentMode()) {
|
||||
// Check silent mode for consoleLog
|
||||
consoleLog(level, ...args);
|
||||
}
|
||||
};
|
||||
|
||||
report(
|
||||
'info',
|
||||
'Attempting to parse updated tasks array from text response...'
|
||||
);
|
||||
if (!text || text.trim() === '')
|
||||
throw new Error('AI response text is empty.');
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
|
||||
// Extract from Markdown code block first
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
report('info', 'Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// If no code block, find first '[' and last ']' for the array
|
||||
const firstBracket = cleanedResponse.indexOf('[');
|
||||
const lastBracket = cleanedResponse.lastIndexOf(']');
|
||||
if (firstBracket !== -1 && lastBracket > firstBracket) {
|
||||
cleanedResponse = cleanedResponse.substring(
|
||||
firstBracket,
|
||||
lastBracket + 1
|
||||
);
|
||||
report('info', 'Extracted content between first [ and last ].');
|
||||
} else {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain a JSON array structure. Parsing raw response.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to parse the array
|
||||
let parsedTasks;
|
||||
try {
|
||||
parsedTasks = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
report('error', `Failed to parse JSON array: ${parseError.message}`);
|
||||
report(
|
||||
'error',
|
||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
||||
);
|
||||
throw new Error(
|
||||
`Failed to parse JSON response array: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// Validate Array structure
|
||||
if (!Array.isArray(parsedTasks)) {
|
||||
report(
|
||||
'error',
|
||||
`Parsed content is not an array. Type: ${typeof parsedTasks}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Parsed content sample: ${JSON.stringify(parsedTasks).substring(0, 200)}`
|
||||
);
|
||||
throw new Error('Parsed AI response is not a valid JSON array.');
|
||||
}
|
||||
|
||||
report('info', `Successfully parsed ${parsedTasks.length} potential tasks.`);
|
||||
if (expectedCount && parsedTasks.length !== expectedCount) {
|
||||
report(
|
||||
'warn',
|
||||
`Expected ${expectedCount} tasks, but parsed ${parsedTasks.length}.`
|
||||
);
|
||||
}
|
||||
|
||||
// Validate each task object using Zod
|
||||
const validationResult = updatedTaskArraySchema.safeParse(parsedTasks);
|
||||
if (!validationResult.success) {
|
||||
report('error', 'Parsed task array failed Zod validation.');
|
||||
validationResult.error.errors.forEach((err) => {
|
||||
report('error', ` - Path '${err.path.join('.')}': ${err.message}`);
|
||||
});
|
||||
throw new Error(
|
||||
`AI response failed task structure validation: ${validationResult.error.message}`
|
||||
);
|
||||
}
|
||||
|
||||
report('info', 'Successfully validated task structure.');
|
||||
// Return the validated data, potentially filtering/adjusting length if needed
|
||||
return validationResult.data.slice(
|
||||
0,
|
||||
expectedCount || validationResult.data.length
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update tasks based on new context using the unified AI service.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} fromId - Task ID to start updating from
|
||||
* @param {string} prompt - Prompt with new context
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP server.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
|
||||
*/
|
||||
async function updateTasks(
|
||||
tasksPath,
|
||||
fromId,
|
||||
prompt,
|
||||
useResearch = false,
|
||||
context = {},
|
||||
outputFormat = 'text' // Default to text for CLI
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
// Use mcpLog if available, otherwise use the imported consoleLog function
|
||||
const logFn = mcpLog || consoleLog;
|
||||
// Flag to easily check which logger type we have
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
if (isMCP)
|
||||
logFn.info(`updateTasks called with context: session=${!!session}`);
|
||||
else logFn('info', `updateTasks called`); // CLI log
|
||||
|
||||
try {
|
||||
if (isMCP) logFn.info(`Updating tasks from ID ${fromId}`);
|
||||
else
|
||||
logFn(
|
||||
'info',
|
||||
`Updating tasks from ID ${fromId} with prompt: "${prompt}"`
|
||||
);
|
||||
|
||||
// --- Task Loading/Filtering (Unchanged) ---
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks)
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
const tasksToUpdate = data.tasks.filter(
|
||||
(task) => task.id >= fromId && task.status !== 'done'
|
||||
);
|
||||
if (tasksToUpdate.length === 0) {
|
||||
if (isMCP)
|
||||
logFn.info(`No tasks to update (ID >= ${fromId} and not 'done').`);
|
||||
else
|
||||
logFn('info', `No tasks to update (ID >= ${fromId} and not 'done').`);
|
||||
if (outputFormat === 'text') console.log(/* yellow message */);
|
||||
return; // Nothing to do
|
||||
}
|
||||
// --- End Task Loading/Filtering ---
|
||||
|
||||
// --- Display Tasks to Update (CLI Only - Unchanged) ---
|
||||
if (outputFormat === 'text') {
|
||||
// Show the tasks that will be updated
|
||||
const table = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('ID'),
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status')
|
||||
],
|
||||
colWidths: [5, 60, 10]
|
||||
});
|
||||
|
||||
tasksToUpdate.forEach((task) => {
|
||||
table.push([
|
||||
task.id,
|
||||
truncate(task.title, 57),
|
||||
getStatusWithColor(task.status)
|
||||
]);
|
||||
});
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Updating ${tasksToUpdate.length} tasks`), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 0 }
|
||||
})
|
||||
);
|
||||
|
||||
console.log(table.toString());
|
||||
|
||||
// Display a message about how completed subtasks are handled
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.cyan.bold('How Completed Subtasks Are Handled:') +
|
||||
'\n\n' +
|
||||
chalk.white(
|
||||
'• Subtasks marked as "done" or "completed" will be preserved\n'
|
||||
) +
|
||||
chalk.white(
|
||||
'• New subtasks will build upon what has already been completed\n'
|
||||
) +
|
||||
chalk.white(
|
||||
'• If completed work needs revision, a new subtask will be created instead of modifying done items\n'
|
||||
) +
|
||||
chalk.white(
|
||||
'• This approach maintains a clear record of completed work and new requirements'
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
// --- End Display Tasks ---
|
||||
|
||||
// --- Build Prompts (Unchanged Core Logic) ---
|
||||
// Keep the original system prompt logic
|
||||
const systemPrompt = `You are an AI assistant helping to update software development tasks based on new context.
|
||||
You will be given a set of tasks and a prompt describing changes or new implementation details.
|
||||
Your job is to update the tasks to reflect these changes, while preserving their basic structure.
|
||||
|
||||
Guidelines:
|
||||
1. Maintain the same IDs, statuses, and dependencies unless specifically mentioned in the prompt
|
||||
2. Update titles, descriptions, details, and test strategies to reflect the new information
|
||||
3. Do not change anything unnecessarily - just adapt what needs to change based on the prompt
|
||||
4. You should return ALL the tasks in order, not just the modified ones
|
||||
5. Return a complete valid JSON object with the updated tasks array
|
||||
6. VERY IMPORTANT: Preserve all subtasks marked as "done" or "completed" - do not modify their content
|
||||
7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything
|
||||
8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly
|
||||
9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced
|
||||
10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted
|
||||
|
||||
The changes described in the prompt should be applied to ALL tasks in the list.`;
|
||||
|
||||
// Keep the original user prompt logic
|
||||
const taskDataString = JSON.stringify(tasksToUpdate, null, 2);
|
||||
const userPrompt = `Here are the tasks to update:\n${taskDataString}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`;
|
||||
// --- End Build Prompts ---
|
||||
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Calling AI service to update tasks...'
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
let updatedTasks;
|
||||
|
||||
try {
|
||||
// --- Call Unified AI Service ---
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
if (isMCP) logFn.info(`Using AI service with role: ${role}`);
|
||||
else logFn('info', `Using AI service with role: ${role}`);
|
||||
|
||||
responseText = await generateTextService({
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt,
|
||||
role,
|
||||
session
|
||||
});
|
||||
if (isMCP) logFn.info('Successfully received text response');
|
||||
else
|
||||
logFn('success', 'Successfully received text response via AI service');
|
||||
// --- End AI Service Call ---
|
||||
} catch (error) {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
if (isMCP) logFn.error(`Error during AI service call: ${error.message}`);
|
||||
else logFn('error', `Error during AI service call: ${error.message}`);
|
||||
if (error.message.includes('API key')) {
|
||||
if (isMCP)
|
||||
logFn.error(
|
||||
'Please ensure API keys are configured correctly in .env or mcp.json.'
|
||||
);
|
||||
else
|
||||
logFn(
|
||||
'error',
|
||||
'Please ensure API keys are configured correctly in .env or mcp.json.'
|
||||
);
|
||||
}
|
||||
throw error; // Re-throw error
|
||||
} finally {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
// --- Parse and Validate Response ---
|
||||
try {
|
||||
updatedTasks = parseUpdatedTasksFromText(
|
||||
responseText,
|
||||
tasksToUpdate.length,
|
||||
logFn,
|
||||
isMCP
|
||||
);
|
||||
} catch (parseError) {
|
||||
if (isMCP)
|
||||
logFn.error(
|
||||
`Failed to parse updated tasks from AI response: ${parseError.message}`
|
||||
);
|
||||
else
|
||||
logFn(
|
||||
'error',
|
||||
`Failed to parse updated tasks from AI response: ${parseError.message}`
|
||||
);
|
||||
if (getDebugFlag(session)) {
|
||||
if (isMCP) logFn.error(`Raw AI Response:\n${responseText}`);
|
||||
else logFn('error', `Raw AI Response:\n${responseText}`);
|
||||
}
|
||||
throw new Error(
|
||||
`Failed to parse valid updated tasks from AI response: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
// --- End Parse/Validate ---
|
||||
|
||||
// --- Update Tasks Data (Unchanged) ---
|
||||
if (!Array.isArray(updatedTasks)) {
|
||||
// Should be caught by parser, but extra check
|
||||
throw new Error('Parsed AI response for updated tasks was not an array.');
|
||||
}
|
||||
if (isMCP)
|
||||
logFn.info(`Received ${updatedTasks.length} updated tasks from AI.`);
|
||||
else
|
||||
logFn('info', `Received ${updatedTasks.length} updated tasks from AI.`);
|
||||
// Create a map for efficient lookup
|
||||
const updatedTasksMap = new Map(
|
||||
updatedTasks.map((task) => [task.id, task])
|
||||
);
|
||||
|
||||
// Iterate through the original data and update based on the map
|
||||
let actualUpdateCount = 0;
|
||||
data.tasks.forEach((task, index) => {
|
||||
if (updatedTasksMap.has(task.id)) {
|
||||
// Only update if the task was part of the set sent to AI
|
||||
data.tasks[index] = updatedTasksMap.get(task.id);
|
||||
actualUpdateCount++;
|
||||
}
|
||||
});
|
||||
if (isMCP)
|
||||
logFn.info(
|
||||
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
|
||||
);
|
||||
else
|
||||
logFn(
|
||||
'info',
|
||||
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
|
||||
);
|
||||
// --- End Update Tasks Data ---
|
||||
|
||||
// --- Write File and Generate (Unchanged) ---
|
||||
writeJSON(tasksPath, data);
|
||||
if (isMCP)
|
||||
logFn.info(
|
||||
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
|
||||
);
|
||||
else
|
||||
logFn(
|
||||
'success',
|
||||
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
|
||||
);
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
// --- End Write File ---
|
||||
|
||||
// --- Final CLI Output (Unchanged) ---
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(chalk.green(`Successfully updated ${actualUpdateCount} tasks`), {
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round'
|
||||
})
|
||||
);
|
||||
}
|
||||
// --- End Final CLI Output ---
|
||||
} catch (error) {
|
||||
// --- General Error Handling (Unchanged) ---
|
||||
if (isMCP) logFn.error(`Error updating tasks: ${error.message}`);
|
||||
else logFn('error', `Error updating tasks: ${error.message}`);
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
if (getDebugFlag(session)) {
|
||||
console.error(error);
|
||||
}
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw error; // Re-throw for MCP/programmatic callers
|
||||
}
|
||||
// --- End General Error Handling ---
|
||||
}
|
||||
}
|
||||
|
||||
export default updateTasks;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,22 +6,60 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
// Import specific config getters needed here
|
||||
import { getLogLevel, getDebugFlag } from './config-manager.js';
|
||||
|
||||
// Global silent mode flag
|
||||
let silentMode = false;
|
||||
|
||||
// Configuration and constants
|
||||
const CONFIG = {
|
||||
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
|
||||
debug: process.env.DEBUG === 'true',
|
||||
logLevel: process.env.LOG_LEVEL || 'info',
|
||||
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'),
|
||||
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium',
|
||||
projectName: process.env.PROJECT_NAME || 'Task Master',
|
||||
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable
|
||||
};
|
||||
// --- Environment Variable Resolution Utility ---
|
||||
/**
|
||||
* Resolves an environment variable by checking process.env first, then session.env.
|
||||
* @param {string} varName - The name of the environment variable.
|
||||
* @param {string|null} session - The MCP session object (optional).
|
||||
* @returns {string|undefined} The value of the environment variable or undefined if not found.
|
||||
*/
|
||||
function resolveEnvVariable(varName, session) {
|
||||
// Ensure session and session.env exist before attempting access
|
||||
const sessionValue =
|
||||
session && session.env ? session.env[varName] : undefined;
|
||||
return process.env[varName] ?? sessionValue;
|
||||
}
|
||||
|
||||
// --- Project Root Finding Utility ---
|
||||
/**
|
||||
* Finds the project root directory by searching upwards from a given starting point
|
||||
* for a marker file or directory (e.g., 'package.json', '.git').
|
||||
* @param {string} [startPath=process.cwd()] - The directory to start searching from.
|
||||
* @param {string[]} [markers=['package.json', '.git', '.taskmasterconfig']] - Marker files/dirs to look for.
|
||||
* @returns {string|null} The path to the project root directory, or null if not found.
|
||||
*/
|
||||
function findProjectRoot(
|
||||
startPath = process.cwd(),
|
||||
markers = ['package.json', '.git', '.taskmasterconfig']
|
||||
) {
|
||||
let currentPath = path.resolve(startPath);
|
||||
while (true) {
|
||||
for (const marker of markers) {
|
||||
if (fs.existsSync(path.join(currentPath, marker))) {
|
||||
return currentPath;
|
||||
}
|
||||
}
|
||||
const parentPath = path.dirname(currentPath);
|
||||
if (parentPath === currentPath) {
|
||||
// Reached the filesystem root
|
||||
return null;
|
||||
}
|
||||
currentPath = parentPath;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Dynamic Configuration Function --- (REMOVED)
|
||||
/*
|
||||
function getConfig(session = null) {
|
||||
// ... implementation removed ...
|
||||
}
|
||||
*/
|
||||
|
||||
// Set up logging based on log level
|
||||
const LOG_LEVELS = {
|
||||
@@ -73,6 +111,9 @@ function log(level, ...args) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get log level dynamically from config-manager
|
||||
const configLevel = getLogLevel() || 'info'; // Use getter
|
||||
|
||||
// Use text prefixes instead of emojis
|
||||
const prefixes = {
|
||||
debug: chalk.gray('[DEBUG]'),
|
||||
@@ -84,7 +125,6 @@ function log(level, ...args) {
|
||||
|
||||
// Ensure level exists, default to info if not
|
||||
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
|
||||
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
|
||||
|
||||
// Check log level configuration
|
||||
if (
|
||||
@@ -106,12 +146,15 @@ function log(level, ...args) {
|
||||
* @returns {Object|null} Parsed JSON data or null if error occurs
|
||||
*/
|
||||
function readJSON(filepath) {
|
||||
// Get debug flag dynamically from config-manager
|
||||
const isDebug = getDebugFlag();
|
||||
try {
|
||||
const rawData = fs.readFileSync(filepath, 'utf8');
|
||||
return JSON.parse(rawData);
|
||||
} catch (error) {
|
||||
log('error', `Error reading JSON file ${filepath}:`, error.message);
|
||||
if (CONFIG.debug) {
|
||||
if (isDebug) {
|
||||
// Use dynamic debug flag
|
||||
// Use log utility for debug output too
|
||||
log('error', 'Full error details:', error);
|
||||
}
|
||||
@@ -125,6 +168,8 @@ function readJSON(filepath) {
|
||||
* @param {Object} data - Data to write
|
||||
*/
|
||||
function writeJSON(filepath, data) {
|
||||
// Get debug flag dynamically from config-manager
|
||||
const isDebug = getDebugFlag();
|
||||
try {
|
||||
const dir = path.dirname(filepath);
|
||||
if (!fs.existsSync(dir)) {
|
||||
@@ -133,7 +178,8 @@ function writeJSON(filepath, data) {
|
||||
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
|
||||
} catch (error) {
|
||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||
if (CONFIG.debug) {
|
||||
if (isDebug) {
|
||||
// Use dynamic debug flag
|
||||
// Use log utility for debug output too
|
||||
log('error', 'Full error details:', error);
|
||||
}
|
||||
@@ -156,6 +202,8 @@ function sanitizePrompt(prompt) {
|
||||
* @returns {Object|null} The parsed complexity report or null if not found
|
||||
*/
|
||||
function readComplexityReport(customPath = null) {
|
||||
// Get debug flag dynamically from config-manager
|
||||
const isDebug = getDebugFlag();
|
||||
try {
|
||||
const reportPath =
|
||||
customPath ||
|
||||
@@ -168,6 +216,11 @@ function readComplexityReport(customPath = null) {
|
||||
return JSON.parse(reportData);
|
||||
} catch (error) {
|
||||
log('warn', `Could not read complexity report: ${error.message}`);
|
||||
// Optionally log full error in debug mode
|
||||
if (isDebug) {
|
||||
// Use dynamic debug flag
|
||||
log('error', 'Full error details:', error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -246,28 +299,37 @@ function formatTaskId(id) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds a task by ID in the tasks array
|
||||
* Finds a task by ID in the tasks array. Optionally filters subtasks by status.
|
||||
* @param {Array} tasks - The tasks array
|
||||
* @param {string|number} taskId - The task ID to find
|
||||
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||
* @returns {Object|null} The task object or null if not found
|
||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||
* @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found.
|
||||
*/
|
||||
function findTaskById(tasks, taskId, complexityReport = null) {
|
||||
function findTaskById(
|
||||
tasks,
|
||||
taskId,
|
||||
complexityReport = null,
|
||||
statusFilter = null
|
||||
) {
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return null;
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
let taskResult = null;
|
||||
let originalSubtaskCount = null;
|
||||
|
||||
// Check if it's a subtask ID (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
// If looking for a subtask, statusFilter doesn't apply directly here.
|
||||
const [parentId, subtaskId] = taskId
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
const parentTask = tasks.find((t) => t.id === parentId);
|
||||
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
return null;
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||
@@ -279,19 +341,47 @@ function findTaskById(tasks, taskId, complexityReport = null) {
|
||||
status: parentTask.status
|
||||
};
|
||||
subtask.isSubtask = true;
|
||||
taskResult = subtask;
|
||||
}
|
||||
} else {
|
||||
const id = parseInt(taskId, 10);
|
||||
taskResult = tasks.find((t) => t.id === id) || null;
|
||||
|
||||
// If we found a task, check for complexity data
|
||||
if (subtask && complexityReport) {
|
||||
addComplexityToTask(subtask, complexityReport);
|
||||
}
|
||||
|
||||
taskResult = subtask;
|
||||
}
|
||||
|
||||
// If we found a task, check for complexity data
|
||||
// Find the main task
|
||||
const id = parseInt(taskId, 10);
|
||||
const task = tasks.find((t) => t.id === id) || null;
|
||||
|
||||
// If task not found, return nulls
|
||||
if (!task) {
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
// If task found and statusFilter provided, filter its subtasks
|
||||
if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {
|
||||
const originalSubtaskCount = task.subtasks.length;
|
||||
// Clone the task to avoid modifying the original array
|
||||
const filteredTask = { ...task };
|
||||
filteredTask.subtasks = task.subtasks.filter(
|
||||
(subtask) =>
|
||||
subtask.status &&
|
||||
subtask.status.toLowerCase() === statusFilter.toLowerCase()
|
||||
);
|
||||
|
||||
taskResult = filteredTask;
|
||||
originalSubtaskCount = originalSubtaskCount;
|
||||
}
|
||||
|
||||
// If task found and complexityReport provided, add complexity data
|
||||
if (taskResult && complexityReport) {
|
||||
addComplexityToTask(taskResult, complexityReport);
|
||||
}
|
||||
|
||||
return taskResult;
|
||||
// Return the found task and original subtask count
|
||||
return { task: taskResult, originalSubtaskCount };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -418,6 +508,21 @@ function detectCamelCaseFlags(args) {
|
||||
// Export all utility functions and configuration
|
||||
export {
|
||||
CONFIG,
|
||||
// CONFIG, <-- Already Removed
|
||||
// getConfig <-- Removing now
|
||||
LOG_LEVELS,
|
||||
log,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
sanitizePrompt,
|
||||
readComplexityReport,
|
||||
findTaskInComplexityReport,
|
||||
taskExists,
|
||||
formatTaskId,
|
||||
findTaskById,
|
||||
truncate,
|
||||
findCycles,
|
||||
toKebabCase,
|
||||
detectCamelCaseFlags,
|
||||
disableSilentMode,
|
||||
enableSilentMode,
|
||||
@@ -436,5 +541,9 @@ export {
|
||||
taskExists,
|
||||
toKebabCase,
|
||||
truncate,
|
||||
writeJSON
|
||||
writeJSON,
|
||||
resolveEnvVariable,
|
||||
getTaskManager,
|
||||
findProjectRoot
|
||||
// getConfig <-- Removed
|
||||
};
|
||||
|
||||
@@ -1,211 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* This script prepares the package for publication to NPM.
|
||||
* It ensures all necessary files are included and properly configured.
|
||||
*
|
||||
* Additional options:
|
||||
* --patch: Increment patch version (default)
|
||||
* --minor: Increment minor version
|
||||
* --major: Increment major version
|
||||
* --version=x.y.z: Set specific version
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
// Define colors for console output
|
||||
const COLORS = {
|
||||
reset: '\x1b[0m',
|
||||
bright: '\x1b[1m',
|
||||
dim: '\x1b[2m',
|
||||
red: '\x1b[31m',
|
||||
green: '\x1b[32m',
|
||||
yellow: '\x1b[33m',
|
||||
blue: '\x1b[34m',
|
||||
magenta: '\x1b[35m',
|
||||
cyan: '\x1b[36m'
|
||||
};
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const versionBump = args.includes('--major')
|
||||
? 'major'
|
||||
: args.includes('--minor')
|
||||
? 'minor'
|
||||
: 'patch';
|
||||
|
||||
// Check for explicit version
|
||||
const versionArg = args.find((arg) => arg.startsWith('--version='));
|
||||
const explicitVersion = versionArg ? versionArg.split('=')[1] : null;
|
||||
|
||||
// Log function with color support
|
||||
function log(level, ...args) {
|
||||
const prefix = {
|
||||
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
|
||||
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
|
||||
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
|
||||
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
|
||||
}[level.toLowerCase()];
|
||||
|
||||
console.log(prefix, ...args);
|
||||
}
|
||||
|
||||
// Function to check if a file exists
|
||||
function fileExists(filePath) {
|
||||
return fs.existsSync(filePath);
|
||||
}
|
||||
|
||||
// Function to ensure a file is executable
|
||||
function ensureExecutable(filePath) {
|
||||
try {
|
||||
fs.chmodSync(filePath, '755');
|
||||
log('info', `Made ${filePath} executable`);
|
||||
} catch (error) {
|
||||
log('error', `Failed to make ${filePath} executable:`, error.message);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Function to sync template files
|
||||
function syncTemplateFiles() {
|
||||
// We no longer need to sync files since we're using them directly
|
||||
log(
|
||||
'info',
|
||||
'Template syncing has been deprecated - using source files directly'
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Function to increment version
|
||||
function incrementVersion(currentVersion, type = 'patch') {
|
||||
const [major, minor, patch] = currentVersion.split('.').map(Number);
|
||||
|
||||
switch (type) {
|
||||
case 'major':
|
||||
return `${major + 1}.0.0`;
|
||||
case 'minor':
|
||||
return `${major}.${minor + 1}.0`;
|
||||
case 'patch':
|
||||
default:
|
||||
return `${major}.${minor}.${patch + 1}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Main function to prepare the package
|
||||
function preparePackage() {
|
||||
const rootDir = path.join(__dirname, '..');
|
||||
log('info', `Preparing package in ${rootDir}`);
|
||||
|
||||
// Update version in package.json
|
||||
const packageJsonPath = path.join(rootDir, 'package.json');
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
const currentVersion = packageJson.version;
|
||||
|
||||
let newVersion;
|
||||
if (explicitVersion) {
|
||||
newVersion = explicitVersion;
|
||||
log(
|
||||
'info',
|
||||
`Setting version to specified ${newVersion} (was ${currentVersion})`
|
||||
);
|
||||
} else {
|
||||
newVersion = incrementVersion(currentVersion, versionBump);
|
||||
log(
|
||||
'info',
|
||||
`Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`
|
||||
);
|
||||
}
|
||||
|
||||
packageJson.version = newVersion;
|
||||
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
||||
log('success', `Updated package.json version to ${newVersion}`);
|
||||
|
||||
// Check for required files
|
||||
const requiredFiles = [
|
||||
'package.json',
|
||||
'README-task-master.md',
|
||||
'index.js',
|
||||
'scripts/init.js',
|
||||
'scripts/dev.js',
|
||||
'assets/env.example',
|
||||
'assets/gitignore',
|
||||
'assets/example_prd.txt',
|
||||
'assets/scripts_README.md',
|
||||
'.cursor/rules/dev_workflow.mdc',
|
||||
'.cursor/rules/taskmaster.mdc',
|
||||
'.cursor/rules/cursor_rules.mdc',
|
||||
'.cursor/rules/self_improve.mdc'
|
||||
];
|
||||
|
||||
let allFilesExist = true;
|
||||
for (const file of requiredFiles) {
|
||||
const filePath = path.join(rootDir, file);
|
||||
if (!fileExists(filePath)) {
|
||||
log('error', `Required file ${file} does not exist`);
|
||||
allFilesExist = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allFilesExist) {
|
||||
log(
|
||||
'error',
|
||||
'Some required files are missing. Package preparation failed.'
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Ensure scripts are executable
|
||||
const executableScripts = ['scripts/init.js', 'scripts/dev.js'];
|
||||
|
||||
let allScriptsExecutable = true;
|
||||
for (const script of executableScripts) {
|
||||
const scriptPath = path.join(rootDir, script);
|
||||
if (!ensureExecutable(scriptPath)) {
|
||||
allScriptsExecutable = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allScriptsExecutable) {
|
||||
log(
|
||||
'warn',
|
||||
'Some scripts could not be made executable. This may cause issues.'
|
||||
);
|
||||
}
|
||||
|
||||
// Run npm pack to test package creation
|
||||
try {
|
||||
log('info', 'Running npm pack to test package creation...');
|
||||
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
|
||||
log('info', output);
|
||||
} catch (error) {
|
||||
log('error', 'Failed to run npm pack:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Make scripts executable
|
||||
log('info', 'Making scripts executable...');
|
||||
try {
|
||||
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
|
||||
log('info', 'Made scripts/init.js executable');
|
||||
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
|
||||
log('info', 'Made scripts/dev.js executable');
|
||||
} catch (error) {
|
||||
log('error', 'Failed to make scripts executable:', error.message);
|
||||
}
|
||||
|
||||
log('success', `Package preparation completed successfully! 🎉`);
|
||||
log('success', `Version updated to ${newVersion}`);
|
||||
log('info', 'You can now publish the package with:');
|
||||
log('info', ' npm publish');
|
||||
}
|
||||
|
||||
// Run the preparation
|
||||
preparePackage();
|
||||
@@ -1,203 +1,259 @@
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2025-03-24T20:01:35.986Z",
|
||||
"tasksAnalyzed": 24,
|
||||
"generatedAt": "2025-04-25T02:29:42.258Z",
|
||||
"tasksAnalyzed": 31,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"projectName": "Task Master",
|
||||
"usedResearch": false
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 1,
|
||||
"taskTitle": "Implement Task Data Structure",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
|
||||
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
|
||||
},
|
||||
{
|
||||
"taskId": 2,
|
||||
"taskTitle": "Develop Command Line Interface Foundation",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
|
||||
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
|
||||
},
|
||||
{
|
||||
"taskId": 3,
|
||||
"taskTitle": "Implement Basic Task Operations",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
|
||||
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
|
||||
},
|
||||
{
|
||||
"taskId": 4,
|
||||
"taskTitle": "Create Task File Generation System",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
|
||||
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
|
||||
},
|
||||
{
|
||||
"taskId": 5,
|
||||
"taskTitle": "Integrate Anthropic Claude API",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
|
||||
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
|
||||
},
|
||||
{
|
||||
"taskId": 6,
|
||||
"taskTitle": "Build PRD Parsing System",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
|
||||
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
|
||||
},
|
||||
{
|
||||
"taskId": 7,
|
||||
"taskTitle": "Implement Task Expansion with Claude",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
|
||||
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
|
||||
},
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
|
||||
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
|
||||
},
|
||||
{
|
||||
"taskId": 9,
|
||||
"taskTitle": "Integrate Perplexity API",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
|
||||
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
|
||||
},
|
||||
{
|
||||
"taskId": 10,
|
||||
"taskTitle": "Create Research-Backed Subtask Generation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
|
||||
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
|
||||
},
|
||||
{
|
||||
"taskId": 11,
|
||||
"taskTitle": "Implement Batch Operations",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
|
||||
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
|
||||
},
|
||||
{
|
||||
"taskId": 12,
|
||||
"taskTitle": "Develop Project Initialization System",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
|
||||
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
|
||||
},
|
||||
{
|
||||
"taskId": 13,
|
||||
"taskTitle": "Create Cursor Rules Implementation",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
|
||||
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
|
||||
},
|
||||
{
|
||||
"taskId": 14,
|
||||
"taskTitle": "Develop Agent Workflow Guidelines",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
|
||||
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
|
||||
},
|
||||
{
|
||||
"taskId": 15,
|
||||
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
|
||||
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
|
||||
},
|
||||
{
|
||||
"taskId": 16,
|
||||
"taskTitle": "Create Configuration Management System",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
|
||||
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
|
||||
},
|
||||
{
|
||||
"taskId": 17,
|
||||
"taskTitle": "Implement Comprehensive Logging System",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
|
||||
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
|
||||
},
|
||||
{
|
||||
"taskId": 18,
|
||||
"taskTitle": "Create Comprehensive User Documentation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
|
||||
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
|
||||
},
|
||||
{
|
||||
"taskId": 19,
|
||||
"taskTitle": "Implement Error Handling and Recovery",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
|
||||
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
|
||||
},
|
||||
{
|
||||
"taskId": 20,
|
||||
"taskTitle": "Create Token Usage Tracking and Cost Management",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
|
||||
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
|
||||
},
|
||||
{
|
||||
"taskId": 21,
|
||||
"taskTitle": "Refactor dev.js into Modular Components",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
|
||||
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
|
||||
},
|
||||
{
|
||||
"taskId": 22,
|
||||
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
|
||||
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
|
||||
},
|
||||
{
|
||||
"taskId": 23,
|
||||
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
|
||||
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
|
||||
},
|
||||
{
|
||||
"taskId": 24,
|
||||
"taskTitle": "Implement AI-Powered Test Generation Command",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Break down the implementation of an AI-powered test generation command into granular steps, covering CLI integration, task retrieval, AI prompt construction, API integration, test file formatting, error handling, documentation, and comprehensive testing (unit, integration, error cases, and manual verification).",
|
||||
"reasoning": "This task involves advanced CLI development, deep integration with external AI APIs, dynamic prompt engineering, file system operations, error handling, and extensive testing. It requires orchestrating multiple subsystems and ensuring robust, user-friendly output. The cognitive and technical demands are high, justifying a high complexity score and a need for further decomposition into at least 10 subtasks to manage risk and ensure quality.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 26,
|
||||
"taskTitle": "Implement Context Foundation for AI Operations",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
|
||||
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the context foundation implementation into detailed subtasks for CLI flag integration, file reading utilities, error handling, context formatting, command handler updates, documentation, and comprehensive testing for both functionality and error scenarios.",
|
||||
"reasoning": "This task introduces foundational context management across multiple commands, requiring careful CLI design, file I/O, error handling, and integration with AI prompt construction. While less complex than full AI-powered features, it still spans several modules and requires robust validation, suggesting a moderate-to-high complexity and a need for further breakdown.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 27,
|
||||
"taskTitle": "Implement Context Enhancements for AI Operations",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Decompose the context enhancement task into subtasks for code context extraction, task history integration, PRD summarization, context formatting, token optimization, error handling, and comprehensive testing for each new context type.",
|
||||
"reasoning": "This phase builds on the foundation to add sophisticated context extraction (code, history, PRD), requiring advanced parsing, summarization, and prompt engineering. The need to optimize for token limits and maintain performance across large codebases increases both technical and cognitive complexity, warranting a high score and further subtask expansion.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 28,
|
||||
"taskTitle": "Implement Advanced ContextManager System",
|
||||
"complexityScore": 10,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Expand the ContextManager implementation into subtasks for class design, context source integration, optimization algorithms, caching, token management, command interface updates, AI service integration, performance monitoring, logging, and comprehensive testing (unit, integration, performance, and user experience).",
|
||||
"reasoning": "This is a highly complex architectural task involving advanced class design, optimization algorithms, dynamic context prioritization, caching, and integration with multiple AI services. It requires deep system knowledge, careful performance considerations, and robust error handling, making it one of the most complex tasks in the set and justifying a large number of subtasks.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 32,
|
||||
"taskTitle": "Implement \"learn\" Command for Automatic Cursor Rule Generation",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 15,
|
||||
"expansionPrompt": "Break down the 'learn' command implementation into subtasks for file structure setup, path utilities, chat history analysis, rule management, AI integration, error handling, performance optimization, CLI integration, logging, and comprehensive testing.",
|
||||
"reasoning": "This task requires orchestrating file system operations, parsing complex chat and code histories, managing rule templates, integrating with AI for pattern extraction, and ensuring robust error handling and performance. The breadth and depth of required functionality, along with the need for both automatic and manual triggers, make this a highly complex task needing extensive decomposition.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 35,
|
||||
"taskTitle": "Integrate Grok3 API for Research Capabilities",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the Grok3 API integration into subtasks for API client development, service layer updates, payload/response adaptation, error handling, configuration management, UI updates, backward compatibility, and documentation/testing.",
|
||||
"reasoning": "This migration task involves replacing a core external API, adapting to new request/response formats, updating configuration and UI, and ensuring backward compatibility. While not as cognitively complex as some AI tasks, the risk and breadth of impact across the system justify a moderate-to-high complexity and further breakdown.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 36,
|
||||
"taskTitle": "Add Ollama Support for AI Services as Claude Alternative",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Decompose the Ollama integration into subtasks for service class implementation, configuration, model selection, prompt formatting, error handling, fallback logic, documentation, and comprehensive testing.",
|
||||
"reasoning": "Adding a local AI provider requires interface compatibility, configuration management, error handling, and fallback logic, as well as user documentation. The technical complexity is moderate-to-high, especially in ensuring seamless switching and robust error handling, warranting further subtasking.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 37,
|
||||
"taskTitle": "Add Gemini Support for Main AI Services as Claude Alternative",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand Gemini integration into subtasks for service class creation, authentication, prompt/response mapping, configuration, error handling, streaming support, documentation, and comprehensive testing.",
|
||||
"reasoning": "Integrating a new cloud AI provider involves authentication, API adaptation, configuration, and ensuring feature parity. The complexity is similar to other provider integrations, requiring careful planning and multiple subtasks for robust implementation and testing.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 40,
|
||||
"taskTitle": "Implement 'plan' Command for Task Implementation Planning",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down the 'plan' command implementation into subtasks for CLI integration, task/subtask retrieval, AI prompt construction, plan formatting, error handling, and testing.",
|
||||
"reasoning": "This task involves AI prompt engineering, CLI integration, and content formatting, but is more focused and less technically demanding than full AI service or context management features. It still requires careful error handling and testing, suggesting a moderate complexity and a handful of subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 41,
|
||||
"taskTitle": "Implement Visual Task Dependency Graph in Terminal",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Expand the visual dependency graph implementation into subtasks for CLI command setup, graph layout algorithms, ASCII/Unicode rendering, color coding, circular dependency detection, filtering, accessibility, performance optimization, documentation, and testing.",
|
||||
"reasoning": "Rendering complex dependency graphs in the terminal with color coding, layout optimization, and accessibility features is technically challenging and requires careful algorithm design and robust error handling. The need for performance optimization and user-friendly output increases the complexity, justifying a high score and further subtasking.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 42,
|
||||
"taskTitle": "Implement MCP-to-MCP Communication Protocol",
|
||||
"complexityScore": 10,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Break down the MCP-to-MCP protocol implementation into subtasks for protocol definition, adapter pattern, client module, reference integration, mode support, core module updates, configuration, documentation, error handling, security, and comprehensive testing.",
|
||||
"reasoning": "Designing and implementing a standardized communication protocol with dynamic mode switching, adapter patterns, and robust error handling is architecturally complex. It requires deep system understanding, security considerations, and extensive testing, making it one of the most complex tasks and requiring significant decomposition.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 43,
|
||||
"taskTitle": "Add Research Flag to Add-Task Command",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the research flag implementation into subtasks for CLI parser updates, subtask generation logic, parent linking, help documentation, and testing.",
|
||||
"reasoning": "This is a focused feature addition involving CLI parsing, subtask generation, and documentation. While it requires some integration with AI or templating logic, the scope is well-defined and less complex than architectural or multi-module tasks, suggesting a moderate complexity and a handful of subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 44,
|
||||
"taskTitle": "Implement Task Automation with Webhooks and Event Triggers",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Decompose the webhook and event trigger system into subtasks for event system design, webhook registration, trigger definition, incoming/outgoing webhook handling, authentication, rate limiting, CLI management, payload templating, logging, and comprehensive testing.",
|
||||
"reasoning": "Building a robust automation system with webhooks and event triggers involves designing an event system, secure webhook handling, trigger logic, CLI management, and error handling. The breadth and integration requirements make this a highly complex task needing extensive breakdown.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 45,
|
||||
"taskTitle": "Implement GitHub Issue Import Feature",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the GitHub issue import feature into subtasks for CLI flag parsing, URL extraction, API integration, data mapping, authentication, error handling, override logic, documentation, and testing.",
|
||||
"reasoning": "This task involves external API integration, data mapping, authentication, error handling, and user override logic. While not as complex as architectural changes, it still requires careful planning and multiple subtasks for robust implementation and testing.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 46,
|
||||
"taskTitle": "Implement ICE Analysis Command for Task Prioritization",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Break down the ICE analysis command into subtasks for scoring algorithm development, LLM prompt engineering, report generation, CLI rendering, integration with complexity reports, sorting/filtering, error handling, and testing.",
|
||||
"reasoning": "Implementing a prioritization command with LLM-based scoring, report generation, and CLI rendering involves moderate technical and cognitive complexity, especially in ensuring accurate and actionable outputs. It requires several subtasks for robust implementation and validation.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 47,
|
||||
"taskTitle": "Enhance Task Suggestion Actions Card Workflow",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the workflow enhancement into subtasks for UI redesign, phase management logic, interactive elements, progress tracking, context addition, task management integration, accessibility, and comprehensive testing.",
|
||||
"reasoning": "Redesigning a multi-phase workflow with interactive UI elements, progress tracking, and context management involves both UI/UX and logic complexity. The need for seamless transitions and robust state management increases the complexity, warranting further breakdown.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 48,
|
||||
"taskTitle": "Refactor Prompts into Centralized Structure",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down the prompt refactoring into subtasks for directory setup, prompt extraction, import updates, naming conventions, documentation, and regression testing.",
|
||||
"reasoning": "This is a codebase refactoring task focused on maintainability and organization. While it touches many files, the technical complexity is moderate, but careful planning and testing are needed to avoid regressions, suggesting a moderate complexity and several subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 49,
|
||||
"taskTitle": "Implement Code Quality Analysis Command",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Expand the code quality analysis command into subtasks for pattern recognition, best practice verification, AI integration, recommendation generation, task integration, CLI development, configuration, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "This task involves static code analysis, AI integration for best practice checks, recommendation generation, and task creation workflows. The technical and cognitive demands are high, requiring robust validation and integration, justifying a high complexity and multiple subtasks.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 50,
|
||||
"taskTitle": "Implement Test Coverage Tracking System by Task",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Break down the test coverage tracking system into subtasks for data structure design, coverage parsing, mapping algorithms, CLI commands, LLM-powered test generation, MCP integration, visualization, workflow integration, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "Mapping test coverage to tasks, integrating with coverage tools, generating targeted tests, and visualizing coverage requires advanced data modeling, parsing, AI integration, and workflow design. The breadth and depth of this system make it highly complex and in need of extensive decomposition.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 51,
|
||||
"taskTitle": "Implement Perplexity Research Command",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the Perplexity research command into subtasks for API client development, context extraction, CLI interface, result formatting, caching, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "This task involves external API integration, context extraction, CLI development, result formatting, caching, and error handling. The technical complexity is moderate-to-high, especially in ensuring robust and user-friendly output, suggesting multiple subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 52,
|
||||
"taskTitle": "Implement Task Suggestion Command for CLI",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down the task suggestion command into subtasks for task snapshot collection, context extraction, AI suggestion generation, interactive CLI interface, error handling, and testing.",
|
||||
"reasoning": "This is a focused feature involving AI suggestion generation and interactive CLI elements. While it requires careful context management and error handling, the scope is well-defined and less complex than architectural or multi-module tasks, suggesting a moderate complexity and several subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 53,
|
||||
"taskTitle": "Implement Subtask Suggestion Feature for Parent Tasks",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the subtask suggestion feature into subtasks for parent task validation, context gathering, AI suggestion logic, interactive CLI interface, subtask linking, and testing.",
|
||||
"reasoning": "Similar to the task suggestion command, this feature is focused but requires robust context management, AI integration, and interactive CLI handling. The complexity is moderate, warranting several subtasks for a robust implementation.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 54,
|
||||
"taskTitle": "Add Research Flag to Add-Task Command",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the research flag enhancement into subtasks for CLI parser updates, research invocation, user interaction, task creation flow integration, and testing.",
|
||||
"reasoning": "This is a focused enhancement involving CLI parsing, research invocation, and user interaction. The technical complexity is moderate, with a clear scope and integration points, suggesting a handful of subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 55,
|
||||
"taskTitle": "Implement Positional Arguments Support for CLI Commands",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand positional argument support into subtasks for parser updates, argument mapping, help documentation, error handling, backward compatibility, and comprehensive testing.",
|
||||
"reasoning": "Upgrading CLI parsing to support positional arguments requires careful mapping, error handling, documentation, and regression testing to maintain backward compatibility. The complexity is moderate, suggesting several subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 56,
|
||||
"taskTitle": "Refactor Task-Master Files into Node Module Structure",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Break down the refactoring into subtasks for directory setup, file migration, import path updates, build script adjustments, compatibility checks, documentation, regression testing, and rollback planning.",
|
||||
"reasoning": "This is a high-risk, broad refactoring affecting many files and build processes. It requires careful planning, incremental changes, and extensive testing to avoid regressions, justifying a high complexity and multiple subtasks.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 57,
|
||||
"taskTitle": "Enhance Task-Master CLI User Experience and Interface",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the CLI UX enhancement into subtasks for log management, visual design, interactive elements, output formatting, help/documentation, accessibility, performance optimization, and comprehensive testing.",
|
||||
"reasoning": "Improving CLI UX involves log management, visual enhancements, interactive elements, and accessibility, requiring both technical and design skills. The breadth of improvements and need for robust testing increase the complexity, suggesting multiple subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 58,
|
||||
"taskTitle": "Implement Elegant Package Update Mechanism for Task-Master",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Break down the update mechanism into subtasks for version detection, update command implementation, file management, configuration migration, notification system, rollback logic, documentation, and comprehensive testing.",
|
||||
"reasoning": "Implementing a robust update mechanism involves version management, file operations, configuration migration, rollback planning, and user communication. The technical and operational complexity is moderate-to-high, requiring multiple subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 59,
|
||||
"taskTitle": "Remove Manual Package.json Modifications and Implement Automatic Dependency Management",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the dependency management refactor into subtasks for code audit, removal of manual modifications, npm dependency updates, initialization command updates, documentation, and regression testing.",
|
||||
"reasoning": "This is a focused refactoring to align with npm best practices. While it touches installation and configuration logic, the technical complexity is moderate, with a clear scope and manageable risk, suggesting several subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 60,
|
||||
"taskTitle": "Implement Mentor System with Round-Table Discussion Feature",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Break down the mentor system implementation into subtasks for mentor management, round-table simulation, CLI integration, AI personality simulation, task integration, output formatting, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "This task involves designing a new system for mentor management, simulating multi-personality AI discussions, integrating with tasks, and ensuring robust CLI and output handling. The breadth and novelty of the feature, along with the need for robust simulation and integration, make it highly complex and in need of extensive decomposition.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 61,
|
||||
"taskTitle": "Implement Flexible AI Model Management",
|
||||
"complexityScore": 10,
|
||||
"recommendedSubtasks": 15,
|
||||
"expansionPrompt": "Expand the AI model management implementation into subtasks for configuration management, CLI command parsing, provider module development, unified service abstraction, environment variable handling, documentation, integration testing, migration planning, and cleanup of legacy code.",
|
||||
"reasoning": "This is a major architectural overhaul involving configuration management, CLI design, multi-provider integration, abstraction layers, environment variable handling, documentation, and migration. The technical and organizational complexity is extremely high, requiring extensive decomposition and careful coordination.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 62,
|
||||
"taskTitle": "Add --simple Flag to Update Commands for Direct Text Input",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the --simple flag implementation into subtasks for CLI parser updates, update logic modification, timestamp formatting, display logic, documentation, and testing.",
|
||||
"reasoning": "This is a focused feature addition involving CLI parsing, conditional logic, timestamp formatting, and display updates. The technical complexity is moderate, with a clear scope and manageable risk, suggesting a handful of subtasks.[1][3][4]"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -158,7 +158,7 @@ async function runTests() {
|
||||
|
||||
try {
|
||||
const smallResult = execSync(
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`,
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --num-tasks=5`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
@@ -179,7 +179,7 @@ async function runTests() {
|
||||
|
||||
try {
|
||||
const mediumResult = execSync(
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`,
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --num-tasks=15`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
@@ -200,7 +200,7 @@ async function runTests() {
|
||||
|
||||
try {
|
||||
const largeResult = execSync(
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`,
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --num-tasks=25`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user