fix merge conflicts to prep for merge with branch next

- Enhance E2E testing and LLM analysis report and:
  - Add --analyze-log flag to run_e2e.sh to re-run LLM analysis on existing logs.
  - Add test:e2e and analyze-log scripts to package.json for easier execution.

- Correct display errors and dependency validation output:
  - Update chalk usage in add-task.js to use bracket notation (chalk[color]) compatible with v5, resolving 'chalk.keyword is not a function' error.
  - Modify fix-dependencies command output to show red failure box with issue count instead of green success box when validation fails.

- Refactor interactive model setup:
  - Verify inclusion of 'No change' option during interactive model setup flow (task-master models --setup).

- Update model definitions:
  - Add max_tokens field for gpt-4o in supported-models.json.

- Remove unused scripts:
  - Delete prepare-package.js and rule-transformer.test.js.

Release candidate
This commit is contained in:
Eyal Toledano
2025-04-29 01:54:42 -04:00
48 changed files with 4744 additions and 1237 deletions

View File

@@ -10,7 +10,6 @@ import boxen from 'boxen';
import fs from 'fs';
import https from 'https';
import inquirer from 'inquirer';
import Table from 'cli-table3';
import { log, readJSON } from './utils.js';
import {
@@ -45,9 +44,9 @@ import {
getDebugFlag,
getConfig,
writeConfig,
ConfigurationError, // Import the custom error
getAllProviders,
isConfigFilePresent
ConfigurationError,
isConfigFilePresent,
getAvailableModels
} from './config-manager.js';
import {
@@ -71,8 +70,8 @@ import {
getAvailableModelsList,
setModel,
getApiKeyStatusReport
} from './task-manager/models.js'; // Import new core functions
import { findProjectRoot } from './utils.js'; // Import findProjectRoot
} from './task-manager/models.js';
import { findProjectRoot } from './utils.js';
/**
* Runs the interactive setup process for model configuration.
@@ -88,6 +87,22 @@ async function runInteractiveSetup(projectRoot) {
process.exit(1);
}
const currentConfigResult = await getModelConfiguration({ projectRoot });
const currentModels = currentConfigResult.success
? currentConfigResult.data.activeModels
: { main: null, research: null, fallback: null };
// Handle potential config load failure gracefully for the setup flow
if (
!currentConfigResult.success &&
currentConfigResult.error?.code !== 'CONFIG_MISSING'
) {
console.warn(
chalk.yellow(
`Warning: Could not load current model configuration: ${currentConfigResult.error?.message || 'Unknown error'}. Proceeding with defaults.`
)
);
}
// Helper function to fetch OpenRouter models (duplicated for CLI context)
function fetchOpenRouterModelsCLI() {
return new Promise((resolve) => {
@@ -131,93 +146,108 @@ async function runInteractiveSetup(projectRoot) {
});
}
// Get available models - pass projectRoot
const availableModelsResult = await getAvailableModelsList({ projectRoot });
if (!availableModelsResult.success) {
console.error(
chalk.red(
`Error fetching available models: ${availableModelsResult.error?.message || 'Unknown error'}`
)
);
process.exit(1);
}
const availableModelsForSetup = availableModelsResult.data.models;
// Get current config - pass projectRoot
const currentConfigResult = await getModelConfiguration({ projectRoot });
// Allow setup even if current config fails (might be first time run)
const currentModels = currentConfigResult.success
? currentConfigResult.data?.activeModels
: { main: {}, research: {}, fallback: {} };
if (
!currentConfigResult.success &&
currentConfigResult.error?.code !== 'CONFIG_MISSING'
) {
// Log error if it's not just a missing file
console.error(
chalk.red(
`Warning: Could not fetch current configuration: ${currentConfigResult.error?.message || 'Unknown error'}`
)
);
}
console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
// Helper to get choices and default index for a role
const getPromptData = (role, allowNone = false) => {
// Filter models FIRST based on allowed roles
const filteredModels = availableModelsForSetup
.filter((model) => !model.modelId.startsWith('[')) // Filter out placeholders
.filter((model) => model.allowedRoles?.includes(role)); // Filter by allowed role
const currentModel = currentModels[role]; // Use the fetched data
const allModelsRaw = getAvailableModels(); // Get all available models
// THEN map the filtered models to the choice format
const roleChoices = filteredModels.map((model) => ({
name: `${model.provider} / ${model.modelId}`,
value: { provider: model.provider, id: model.modelId }
}));
// Manually group models by provider
const modelsByProvider = allModelsRaw.reduce((acc, model) => {
if (!acc[model.provider]) {
acc[model.provider] = [];
}
acc[model.provider].push(model);
return acc;
}, {});
let choices = []; // Initialize choices array
let defaultIndex = -1;
const currentModelId = currentModels[role]?.modelId;
const cancelOption = { name: '⏹ Cancel Model Setup', value: '__CANCEL__' }; // Symbol updated
const noChangeOption = currentModel?.modelId
? {
name: `∘ No change to current ${role} model (${currentModel.modelId})`, // Symbol updated
value: '__NO_CHANGE__'
}
: null;
// --- Add Custom/Cancel Options --- //
const customOpenRouterOption = {
name: 'OpenRouter (Enter Custom ID)',
name: '* Custom OpenRouter model', // Symbol updated
value: '__CUSTOM_OPENROUTER__'
};
const customOllamaOption = {
name: 'Ollama (Enter Custom ID)',
value: '__CUSTOM_OLLAMA__'
};
const cancelOption = { name: 'Cancel setup', value: '__CANCEL__' };
// Find the index of the current model within the role-specific choices *before* adding custom options
const currentChoiceIndex = roleChoices.findIndex(
(c) => c.value.id === currentModelId
);
let choices = [];
let defaultIndex = 0; // Default to 'Cancel'
// Filter and format models allowed for this role using the manually grouped data
const roleChoices = Object.entries(modelsByProvider)
.map(([provider, models]) => {
const providerModels = models
.filter((m) => m.allowed_roles.includes(role))
.map((m) => ({
name: `${provider} / ${m.id} ${
m.cost_per_1m_tokens
? chalk.gray(
`($${m.cost_per_1m_tokens.input.toFixed(2)} input | $${m.cost_per_1m_tokens.output.toFixed(2)} output)`
)
: ''
}`,
value: { id: m.id, provider },
short: `${provider}/${m.id}`
}));
if (providerModels.length > 0) {
return [...providerModels];
}
return null;
})
.filter(Boolean)
.flat();
// Find the index of the currently selected model for setting the default
let currentChoiceIndex = -1;
if (currentModel?.modelId && currentModel?.provider) {
currentChoiceIndex = roleChoices.findIndex(
(choice) =>
typeof choice.value === 'object' &&
choice.value.id === currentModel.modelId &&
choice.value.provider === currentModel.provider
);
}
// Construct final choices list based on whether 'None' is allowed
const commonPrefix = [cancelOption];
if (noChangeOption) {
commonPrefix.push(noChangeOption); // Add if it exists
}
commonPrefix.push(customOpenRouterOption);
let prefixLength = commonPrefix.length; // Initial prefix length
if (allowNone) {
choices = [
cancelOption,
customOpenRouterOption,
customOllamaOption,
...commonPrefix,
new inquirer.Separator(),
{ name: 'None (disable)', value: null },
{ name: 'None (disable)', value: null }, // Symbol updated
new inquirer.Separator(),
...roleChoices
];
// Adjust default index for extra options (Cancel, CustomOR, CustomOllama, Sep1, None, Sep2)
defaultIndex = currentChoiceIndex !== -1 ? currentChoiceIndex + 6 : 4; // Default to 'None' if no current model matched
// Adjust default index: Prefix + Sep1 + None + Sep2 (+3)
const noneOptionIndex = prefixLength + 1;
defaultIndex =
currentChoiceIndex !== -1
? currentChoiceIndex + prefixLength + 3 // Offset by prefix and separators
: noneOptionIndex; // Default to 'None' if no current model matched
} else {
choices = [
cancelOption,
customOpenRouterOption,
customOllamaOption,
...commonPrefix,
new inquirer.Separator(),
...roleChoices
...roleChoices,
new inquirer.Separator()
];
// Adjust default index for extra options (Cancel, CustomOR, CustomOllama, Sep)
defaultIndex = currentChoiceIndex !== -1 ? currentChoiceIndex + 4 : 0; // Default to 'Cancel' if no current model matched
// Adjust default index: Prefix + Sep (+1)
defaultIndex =
currentChoiceIndex !== -1
? currentChoiceIndex + prefixLength + 1 // Offset by prefix and separator
: noChangeOption
? 1
: 0; // Default to 'No Change' if present, else 'Cancel'
}
// Ensure defaultIndex is valid within the final choices array length
@@ -274,9 +304,16 @@ async function runInteractiveSetup(projectRoot) {
console.log(
chalk.yellow(`\nSetup canceled during ${role} model selection.`)
);
setupSuccess = false; // Also mark success as false on cancel
return false; // Indicate cancellation
}
// Handle the new 'No Change' option
if (selectedValue === '__NO_CHANGE__') {
console.log(chalk.gray(`No change selected for ${role} model.`));
return true; // Indicate success, continue setup
}
let modelIdToSet = null;
let providerHint = null;
let isCustomSelection = false;
@@ -310,21 +347,6 @@ async function runInteractiveSetup(projectRoot) {
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
} else if (selectedValue === '__CUSTOM_OLLAMA__') {
isCustomSelection = true;
const { customId } = await inquirer.prompt([
{
type: 'input',
name: 'customId',
message: `Enter the custom Ollama Model ID for the ${role} role:`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true;
}
modelIdToSet = customId;
providerHint = 'ollama';
} else if (
selectedValue &&
typeof selectedValue === 'object' &&
@@ -406,26 +428,29 @@ async function runInteractiveSetup(projectRoot) {
!(await handleSetModel(
'main',
answers.mainModel,
currentModels.main?.modelId
currentModels.main?.modelId // <--- Now 'currentModels' is defined
))
)
return;
) {
return false; // Explicitly return false if cancelled
}
if (
!(await handleSetModel(
'research',
answers.researchModel,
currentModels.research?.modelId
currentModels.research?.modelId // <--- Now 'currentModels' is defined
))
)
return;
) {
return false; // Explicitly return false if cancelled
}
if (
!(await handleSetModel(
'fallback',
answers.fallbackModel,
currentModels.fallback?.modelId
currentModels.fallback?.modelId // <--- Now 'currentModels' is defined
))
)
return;
) {
return false; // Explicitly return false if cancelled
}
if (setupSuccess && setupConfigModified) {
console.log(chalk.green.bold('\nModel setup complete!'));
@@ -438,6 +463,7 @@ async function runInteractiveSetup(projectRoot) {
)
);
}
return true; // Indicate setup flow completed (not cancelled)
// Let the main command flow continue to display results
}
@@ -475,6 +501,10 @@ function registerCommands(programInstance) {
.option('-o, --output <file>', 'Output file path', 'tasks/tasks.json')
.option('-n, --num-tasks <number>', 'Number of tasks to generate', '10')
.option('-f, --force', 'Skip confirmation when overwriting existing tasks')
.option(
'--append',
'Append new tasks to existing tasks.json instead of overwriting'
)
.action(async (file, options) => {
// Use input option if file argument not provided
const inputFile = file || options.input;
@@ -482,10 +512,11 @@ function registerCommands(programInstance) {
const numTasks = parseInt(options.numTasks, 10);
const outputPath = options.output;
const force = options.force || false;
const append = options.append || false;
// Helper function to check if tasks.json exists and confirm overwrite
async function confirmOverwriteIfNeeded() {
if (fs.existsSync(outputPath) && !force) {
if (fs.existsSync(outputPath) && !force && !append) {
const shouldContinue = await confirmTaskOverwrite(outputPath);
if (!shouldContinue) {
console.log(chalk.yellow('Operation cancelled by user.'));
@@ -504,7 +535,7 @@ function registerCommands(programInstance) {
if (!(await confirmOverwriteIfNeeded())) return;
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
await parsePRD(defaultPrdPath, outputPath, numTasks);
await parsePRD(defaultPrdPath, outputPath, numTasks, { append });
return;
}
@@ -525,17 +556,21 @@ function registerCommands(programInstance) {
' -i, --input <file> Path to the PRD file (alternative to positional argument)\n' +
' -o, --output <file> Output file path (default: "tasks/tasks.json")\n' +
' -n, --num-tasks <number> Number of tasks to generate (default: 10)\n' +
' -f, --force Skip confirmation when overwriting existing tasks\n\n' +
' -f, --force Skip confirmation when overwriting existing tasks\n' +
' --append Append new tasks to existing tasks.json instead of overwriting\n\n' +
chalk.cyan('Example:') +
'\n' +
' task-master parse-prd requirements.txt --num-tasks 15\n' +
' task-master parse-prd --input=requirements.txt\n' +
' task-master parse-prd --force\n\n' +
' task-master parse-prd --force\n' +
' task-master parse-prd requirements_v2.txt --append\n\n' +
chalk.yellow('Note: This command will:') +
'\n' +
' 1. Look for a PRD file at scripts/prd.txt by default\n' +
' 2. Use the file specified by --input or positional argument if provided\n' +
' 3. Generate tasks from the PRD and overwrite any existing tasks.json file',
' 3. Generate tasks from the PRD and either:\n' +
' - Overwrite any existing tasks.json file (default)\n' +
' - Append to existing tasks.json if --append is used',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
)
);
@@ -547,8 +582,11 @@ function registerCommands(programInstance) {
console.log(chalk.blue(`Parsing PRD file: ${inputFile}`));
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
if (append) {
console.log(chalk.blue('Appending to existing tasks...'));
}
await parsePRD(inputFile, outputPath, numTasks);
await parsePRD(inputFile, outputPath, numTasks, { append });
});
// update command
@@ -1781,6 +1819,7 @@ function registerCommands(programInstance) {
programInstance
.command('remove-task')
.description('Remove one or more tasks or subtasks permanently')
.description('Remove one or more tasks or subtasks permanently')
.option(
'-i, --id <ids>',
'ID(s) of the task(s) or subtask(s) to remove (e.g., "5", "5.2", or "5,6.1,7")'
@@ -1995,6 +2034,11 @@ function registerCommands(programInstance) {
`Note: The following IDs were not found initially and were skipped: ${nonExistentIds.join(', ')}`
)
);
// Exit with error if any removals failed
if (successfulRemovals.length === 0) {
process.exit(1);
}
}
} catch (error) {
console.error(
@@ -2085,15 +2129,33 @@ Examples:
process.exit(1);
}
// --- Handle Interactive Setup ---
if (options.setup) {
// Assume runInteractiveSetup is defined elsewhere in this file
await runInteractiveSetup(projectRoot);
// No return here, flow continues to display results below
// Determine the primary action based on flags
const isSetup = options.setup;
const isSetOperation =
options.setMain || options.setResearch || options.setFallback;
// --- Execute Action ---
if (isSetup) {
// Action 1: Run Interactive Setup
console.log(chalk.blue('Starting interactive model setup...')); // Added feedback
try {
await runInteractiveSetup(projectRoot);
// runInteractiveSetup logs its own completion/error messages
} catch (setupError) {
console.error(
chalk.red('\\nInteractive setup failed unexpectedly:'),
setupError.message
);
}
// --- IMPORTANT: Exit after setup ---
return; // Stop execution here
}
// --- Handle Direct Set Operations (only if not running setup) ---
else {
let modelUpdated = false;
if (isSetOperation) {
// Action 2: Perform Direct Set Operations
let updateOccurred = false; // Track if any update actually happened
if (options.setMain) {
const result = await setModel('main', options.setMain, {
projectRoot,
@@ -2105,13 +2167,13 @@ Examples:
});
if (result.success) {
console.log(chalk.green(`${result.data.message}`));
if (result.data.warning) {
if (result.data.warning)
console.log(chalk.yellow(result.data.warning));
}
modelUpdated = true;
updateOccurred = true;
} else {
console.error(chalk.red(`❌ Error: ${result.error.message}`));
// Optionally exit or provide more specific feedback
console.error(
chalk.red(`❌ Error setting main model: ${result.error.message}`)
);
}
}
if (options.setResearch) {
@@ -2125,12 +2187,15 @@ Examples:
});
if (result.success) {
console.log(chalk.green(`${result.data.message}`));
if (result.data.warning) {
if (result.data.warning)
console.log(chalk.yellow(result.data.warning));
}
modelUpdated = true;
updateOccurred = true;
} else {
console.error(chalk.red(`❌ Error: ${result.error.message}`));
console.error(
chalk.red(
`❌ Error setting research model: ${result.error.message}`
)
);
}
}
if (options.setFallback) {
@@ -2144,42 +2209,47 @@ Examples:
});
if (result.success) {
console.log(chalk.green(`${result.data.message}`));
if (result.data.warning) {
if (result.data.warning)
console.log(chalk.yellow(result.data.warning));
}
modelUpdated = true;
updateOccurred = true;
} else {
console.error(chalk.red(`❌ Error: ${result.error.message}`));
console.error(
chalk.red(
`❌ Error setting fallback model: ${result.error.message}`
)
);
}
}
// If only set flags were used, we still proceed to display the results
}
// --- Always Display Status After Setup or Set ---
// Optional: Add a final confirmation if any update occurred
if (updateOccurred) {
console.log(chalk.blue('\nModel configuration updated.'));
} else {
console.log(
chalk.yellow(
'\nNo model configuration changes were made (or errors occurred).'
)
);
}
// --- IMPORTANT: Exit after set operations ---
return; // Stop execution here
}
// Action 3: Display Full Status (Only runs if no setup and no set flags)
console.log(chalk.blue('Fetching current model configuration...')); // Added feedback
const configResult = await getModelConfiguration({ projectRoot });
// Fetch available models *before* displaying config to use for formatting
const availableResult = await getAvailableModelsList({ projectRoot });
const apiKeyStatusResult = await getApiKeyStatusReport({ projectRoot }); // Fetch API key status
const apiKeyStatusResult = await getApiKeyStatusReport({ projectRoot });
// 1. Display Active Models
if (!configResult.success) {
// If config is missing AFTER setup attempt, it might indicate an issue saving.
if (options.setup && configResult.error?.code === 'CONFIG_MISSING') {
console.error(
chalk.red(
`❌ Error: Configuration file still missing after setup attempt. Check file permissions.`
)
);
} else {
console.error(
chalk.red(
`❌ Error fetching configuration: ${configResult.error.message}`
)
);
}
// Attempt to display other info even if config fails
console.error(
chalk.red(
`❌ Error fetching configuration: ${configResult.error.message}`
)
);
} else {
// Pass available models list for SWE score formatting
displayModelConfiguration(
configResult.data,
availableResult.data?.models || []
@@ -2199,7 +2269,6 @@ Examples:
// 3. Display Other Available Models (Filtered)
if (availableResult.success) {
// Filter out models that are already actively configured and placeholders
const activeIds = configResult.success
? [
configResult.data.activeModels.main.modelId,
@@ -2208,9 +2277,9 @@ Examples:
].filter(Boolean)
: [];
const displayableAvailable = availableResult.data.models.filter(
(m) => !activeIds.includes(m.modelId) && !m.modelId.startsWith('[') // Exclude placeholders like [ollama-any]
(m) => !activeIds.includes(m.modelId) && !m.modelId.startsWith('[')
);
displayAvailableModels(displayableAvailable); // This function now includes the "Next Steps" box
displayAvailableModels(displayableAvailable);
} else {
console.error(
chalk.yellow(
@@ -2220,7 +2289,7 @@ Examples:
}
// 4. Conditional Hint if Config File is Missing
const configExists = isConfigFilePresent(projectRoot); // Re-check after potential setup/writes
const configExists = isConfigFilePresent(projectRoot);
if (!configExists) {
console.log(
chalk.yellow(
@@ -2228,6 +2297,8 @@ Examples:
)
);
}
// --- IMPORTANT: Exit after displaying status ---
return; // Stop execution here
});
return programInstance;

View File

@@ -179,18 +179,20 @@ async function addDependency(tasksPath, taskId, dependencyId) {
);
// Display a more visually appealing success message
console.log(
boxen(
chalk.green(`Successfully added dependency:\n\n`) +
`Task ${chalk.bold(formattedTaskId)} now depends on ${chalk.bold(formattedDependencyId)}`,
{
padding: 1,
borderColor: 'green',
borderStyle: 'round',
margin: { top: 1 }
}
)
);
if (!isSilentMode()) {
console.log(
boxen(
chalk.green(`Successfully added dependency:\n\n`) +
`Task ${chalk.bold(formattedTaskId)} now depends on ${chalk.bold(formattedDependencyId)}`,
{
padding: 1,
borderColor: 'green',
borderStyle: 'round',
margin: { top: 1 }
}
)
);
}
// Generate updated task files
await generateTaskFiles(tasksPath, 'tasks');
@@ -353,11 +355,13 @@ function isCircularDependency(tasks, taskId, chain = []) {
// Find the task or subtask
let task = null;
let parentIdForSubtask = null;
// Check if this is a subtask reference (e.g., "1.2")
if (taskIdStr.includes('.')) {
const [parentId, subtaskId] = taskIdStr.split('.').map(Number);
const parentTask = tasks.find((t) => t.id === parentId);
parentIdForSubtask = parentId; // Store parent ID if it's a subtask
if (parentTask && parentTask.subtasks) {
task = parentTask.subtasks.find((st) => st.id === subtaskId);
@@ -377,10 +381,18 @@ function isCircularDependency(tasks, taskId, chain = []) {
}
// Check each dependency recursively
const newChain = [...chain, taskId];
return task.dependencies.some((depId) =>
isCircularDependency(tasks, depId, newChain)
);
const newChain = [...chain, taskIdStr]; // Use taskIdStr for consistency
return task.dependencies.some((depId) => {
let normalizedDepId = String(depId);
// Normalize relative subtask dependencies
if (typeof depId === 'number' && parentIdForSubtask !== null) {
// If the current task is a subtask AND the dependency is a number,
// assume it refers to a sibling subtask.
normalizedDepId = `${parentIdForSubtask}.${depId}`;
}
// Pass the normalized ID to the recursive call
return isCircularDependency(tasks, normalizedDepId, newChain);
});
}
/**
@@ -579,118 +591,43 @@ async function validateDependenciesCommand(tasksPath, options = {}) {
`Analyzing dependencies for ${taskCount} tasks and ${subtaskCount} subtasks...`
);
// Track validation statistics
const stats = {
nonExistentDependenciesRemoved: 0,
selfDependenciesRemoved: 0,
tasksFixed: 0,
subtasksFixed: 0
};
// Create a custom logger instead of reassigning the imported log function
const warnings = [];
const customLogger = function (level, ...args) {
if (level === 'warn') {
warnings.push(args.join(' '));
// Count the type of fix based on the warning message
const msg = args.join(' ');
if (msg.includes('self-dependency')) {
stats.selfDependenciesRemoved++;
} else if (msg.includes('invalid')) {
stats.nonExistentDependenciesRemoved++;
}
// Count if it's a task or subtask being fixed
if (msg.includes('from subtask')) {
stats.subtasksFixed++;
} else if (msg.includes('from task')) {
stats.tasksFixed++;
}
}
// Call the original log function
return log(level, ...args);
};
// Run validation with custom logger
try {
// Temporarily save validateTaskDependencies function with normal log
const originalValidateTaskDependencies = validateTaskDependencies;
// Directly call the validation function
const validationResult = validateTaskDependencies(data.tasks);
// Create patched version that uses customLogger
const patchedValidateTaskDependencies = (tasks, tasksPath) => {
// Temporarily redirect log calls in this scope
const originalLog = log;
const logProxy = function (...args) {
return customLogger(...args);
};
if (!validationResult.valid) {
log(
'error',
`Dependency validation failed. Found ${validationResult.issues.length} issue(s):`
);
validationResult.issues.forEach((issue) => {
let errorMsg = ` [${issue.type.toUpperCase()}] Task ${issue.taskId}: ${issue.message}`;
if (issue.dependencyId) {
errorMsg += ` (Dependency: ${issue.dependencyId})`;
}
log('error', errorMsg); // Log each issue as an error
});
// Call the original function in a context where log calls are intercepted
const result = (() => {
// Use Function.prototype.bind to create a new function that has logProxy available
// Pass isCircularDependency explicitly to make it available
return Function(
'tasks',
'tasksPath',
'log',
'customLogger',
'isCircularDependency',
'taskExists',
`return (${originalValidateTaskDependencies.toString()})(tasks, tasksPath);`
)(
tasks,
tasksPath,
logProxy,
customLogger,
isCircularDependency,
taskExists
);
})();
// Optionally exit if validation fails, depending on desired behavior
// process.exit(1); // Uncomment if validation failure should stop the process
return result;
};
const changesDetected = patchedValidateTaskDependencies(
data.tasks,
tasksPath
);
// Create a detailed report
if (changesDetected) {
log('success', 'Invalid dependencies were removed from tasks.json');
// Show detailed stats in a nice box - only if not in silent mode
// Display summary box even on failure, showing issues found
if (!isSilentMode()) {
console.log(
boxen(
chalk.green(`Dependency Validation Results:\n\n`) +
chalk.red(`Dependency Validation FAILED\n\n`) +
`${chalk.cyan('Tasks checked:')} ${taskCount}\n` +
`${chalk.cyan('Subtasks checked:')} ${subtaskCount}\n` +
`${chalk.cyan('Non-existent dependencies removed:')} ${stats.nonExistentDependenciesRemoved}\n` +
`${chalk.cyan('Self-dependencies removed:')} ${stats.selfDependenciesRemoved}\n` +
`${chalk.cyan('Tasks fixed:')} ${stats.tasksFixed}\n` +
`${chalk.cyan('Subtasks fixed:')} ${stats.subtasksFixed}`,
`${chalk.red('Issues found:')} ${validationResult.issues.length}`, // Display count from result
{
padding: 1,
borderColor: 'green',
borderColor: 'red',
borderStyle: 'round',
margin: { top: 1, bottom: 1 }
}
)
);
// Show all warnings in a collapsible list if there are many
if (warnings.length > 0) {
console.log(chalk.yellow('\nDetailed fixes:'));
warnings.forEach((warning) => {
console.log(` ${warning}`);
});
}
}
// Regenerate task files to reflect the changes
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
log('info', 'Task files regenerated to reflect dependency changes');
} else {
log(
'success',

View File

@@ -0,0 +1,315 @@
/**
* Rule Transformer Module
* Handles conversion of Cursor rules to Roo rules
*
* This module procedurally generates .roo/rules files from .cursor/rules files,
* eliminating the need to maintain both sets of files manually.
*/
import fs from 'fs';
import path from 'path';
import { log } from './utils.js';
// Configuration for term conversions - centralized for easier future updates
const conversionConfig = {
// Product and brand name replacements
brandTerms: [
{ from: /cursor\.so/g, to: 'roocode.com' },
{ from: /\[cursor\.so\]/g, to: '[roocode.com]' },
{ from: /href="https:\/\/cursor\.so/g, to: 'href="https://roocode.com' },
{ from: /\(https:\/\/cursor\.so/g, to: '(https://roocode.com' },
{
from: /\bcursor\b/gi,
to: (match) => (match === 'Cursor' ? 'Roo Code' : 'roo')
},
{ from: /Cursor/g, to: 'Roo Code' }
],
// File extension replacements
fileExtensions: [{ from: /\.mdc\b/g, to: '.md' }],
// Documentation URL replacements
docUrls: [
{
from: /https:\/\/docs\.cursor\.com\/[^\s)'"]+/g,
to: (match) => match.replace('docs.cursor.com', 'docs.roocode.com')
},
{ from: /https:\/\/docs\.roo\.com\//g, to: 'https://docs.roocode.com/' }
],
// Tool references - direct replacements
toolNames: {
search: 'search_files',
read_file: 'read_file',
edit_file: 'apply_diff',
create_file: 'write_to_file',
run_command: 'execute_command',
terminal_command: 'execute_command',
use_mcp: 'use_mcp_tool',
switch_mode: 'switch_mode'
},
// Tool references in context - more specific replacements
toolContexts: [
{ from: /\bsearch tool\b/g, to: 'search_files tool' },
{ from: /\bedit_file tool\b/g, to: 'apply_diff tool' },
{ from: /\buse the search\b/g, to: 'use the search_files' },
{ from: /\bThe edit_file\b/g, to: 'The apply_diff' },
{ from: /\brun_command executes\b/g, to: 'execute_command executes' },
{ from: /\buse_mcp connects\b/g, to: 'use_mcp_tool connects' },
// Additional contextual patterns for flexibility
{ from: /\bCursor search\b/g, to: 'Roo Code search_files' },
{ from: /\bCursor edit\b/g, to: 'Roo Code apply_diff' },
{ from: /\bCursor create\b/g, to: 'Roo Code write_to_file' },
{ from: /\bCursor run\b/g, to: 'Roo Code execute_command' }
],
// Tool group and category names
toolGroups: [
{ from: /\bSearch tools\b/g, to: 'Read Group tools' },
{ from: /\bEdit tools\b/g, to: 'Edit Group tools' },
{ from: /\bRun tools\b/g, to: 'Command Group tools' },
{ from: /\bMCP servers\b/g, to: 'MCP Group tools' },
{ from: /\bSearch Group\b/g, to: 'Read Group' },
{ from: /\bEdit Group\b/g, to: 'Edit Group' },
{ from: /\bRun Group\b/g, to: 'Command Group' }
],
// File references in markdown links
fileReferences: {
pathPattern: /\[(.+?)\]\(mdc:\.cursor\/rules\/(.+?)\.mdc\)/g,
replacement: (match, text, filePath) => {
// Get the base filename
const baseName = path.basename(filePath, '.mdc');
// Get the new filename (either from mapping or by replacing extension)
const newFileName = fileMap[`${baseName}.mdc`] || `${baseName}.md`;
// Return the updated link
return `[${text}](mdc:.roo/rules/${newFileName})`;
}
}
};
// File name mapping (specific files with naming changes)
const fileMap = {
'cursor_rules.mdc': 'roo_rules.md',
'dev_workflow.mdc': 'dev_workflow.md',
'self_improve.mdc': 'self_improve.md',
'taskmaster.mdc': 'taskmaster.md'
// Add other mappings as needed
};
/**
* Replace basic Cursor terms with Roo equivalents
*/
function replaceBasicTerms(content) {
let result = content;
// Apply brand term replacements
conversionConfig.brandTerms.forEach((pattern) => {
if (typeof pattern.to === 'function') {
result = result.replace(pattern.from, pattern.to);
} else {
result = result.replace(pattern.from, pattern.to);
}
});
// Apply file extension replacements
conversionConfig.fileExtensions.forEach((pattern) => {
result = result.replace(pattern.from, pattern.to);
});
return result;
}
/**
* Replace Cursor tool references with Roo tool equivalents
*/
function replaceToolReferences(content) {
let result = content;
// Basic pattern for direct tool name replacements
const toolNames = conversionConfig.toolNames;
const toolReferencePattern = new RegExp(
`\\b(${Object.keys(toolNames).join('|')})\\b`,
'g'
);
// Apply direct tool name replacements
result = result.replace(toolReferencePattern, (match, toolName) => {
return toolNames[toolName] || toolName;
});
// Apply contextual tool replacements
conversionConfig.toolContexts.forEach((pattern) => {
result = result.replace(pattern.from, pattern.to);
});
// Apply tool group replacements
conversionConfig.toolGroups.forEach((pattern) => {
result = result.replace(pattern.from, pattern.to);
});
return result;
}
/**
* Update documentation URLs to point to Roo documentation
*/
function updateDocReferences(content) {
let result = content;
// Apply documentation URL replacements
conversionConfig.docUrls.forEach((pattern) => {
if (typeof pattern.to === 'function') {
result = result.replace(pattern.from, pattern.to);
} else {
result = result.replace(pattern.from, pattern.to);
}
});
return result;
}
/**
* Update file references in markdown links
*/
function updateFileReferences(content) {
const { pathPattern, replacement } = conversionConfig.fileReferences;
return content.replace(pathPattern, replacement);
}
/**
* Main transformation function that applies all conversions
*/
function transformCursorToRooRules(content) {
// Apply all transformations in appropriate order
let result = content;
result = replaceBasicTerms(result);
result = replaceToolReferences(result);
result = updateDocReferences(result);
result = updateFileReferences(result);
// Super aggressive failsafe pass to catch any variations we might have missed
// This ensures critical transformations are applied even in contexts we didn't anticipate
// 1. Handle cursor.so in any possible context
result = result.replace(/cursor\.so/gi, 'roocode.com');
// Edge case: URL with different formatting
result = result.replace(/cursor\s*\.\s*so/gi, 'roocode.com');
result = result.replace(/https?:\/\/cursor\.so/gi, 'https://roocode.com');
result = result.replace(
/https?:\/\/www\.cursor\.so/gi,
'https://www.roocode.com'
);
// 2. Handle tool references - even partial ones
result = result.replace(/search/g, 'search_files');
result = result.replace(/\bedit_file\b/gi, 'apply_diff');
result = result.replace(/\bsearch tool\b/gi, 'search_files tool');
result = result.replace(/\bSearch Tool\b/g, 'Search_Files Tool');
// 3. Handle basic terms (with case handling)
result = result.replace(/\bcursor\b/gi, (match) =>
match.charAt(0) === 'C' ? 'Roo Code' : 'roo'
);
result = result.replace(/Cursor/g, 'Roo Code');
result = result.replace(/CURSOR/g, 'ROO CODE');
// 4. Handle file extensions
result = result.replace(/\.mdc\b/g, '.md');
// 5. Handle any missed URL patterns
result = result.replace(/docs\.cursor\.com/gi, 'docs.roocode.com');
result = result.replace(/docs\.roo\.com/gi, 'docs.roocode.com');
return result;
}
/**
* Convert a single Cursor rule file to Roo rule format
*/
function convertCursorRuleToRooRule(sourcePath, targetPath) {
try {
log(
'info',
`Converting Cursor rule ${path.basename(sourcePath)} to Roo rule ${path.basename(targetPath)}`
);
// Read source content
const content = fs.readFileSync(sourcePath, 'utf8');
// Transform content
const transformedContent = transformCursorToRooRules(content);
// Ensure target directory exists
const targetDir = path.dirname(targetPath);
if (!fs.existsSync(targetDir)) {
fs.mkdirSync(targetDir, { recursive: true });
}
// Write transformed content
fs.writeFileSync(targetPath, transformedContent);
log(
'success',
`Successfully converted ${path.basename(sourcePath)} to ${path.basename(targetPath)}`
);
return true;
} catch (error) {
log(
'error',
`Failed to convert rule file ${path.basename(sourcePath)}: ${error.message}`
);
return false;
}
}
/**
* Process all Cursor rules and convert to Roo rules
*/
function convertAllCursorRulesToRooRules(projectDir) {
const cursorRulesDir = path.join(projectDir, '.cursor', 'rules');
const rooRulesDir = path.join(projectDir, '.roo', 'rules');
if (!fs.existsSync(cursorRulesDir)) {
log('warn', `Cursor rules directory not found: ${cursorRulesDir}`);
return { success: 0, failed: 0 };
}
// Ensure Roo rules directory exists
if (!fs.existsSync(rooRulesDir)) {
fs.mkdirSync(rooRulesDir, { recursive: true });
log('info', `Created Roo rules directory: ${rooRulesDir}`);
}
// Count successful and failed conversions
let success = 0;
let failed = 0;
// Process each file in the Cursor rules directory
fs.readdirSync(cursorRulesDir).forEach((file) => {
if (file.endsWith('.mdc')) {
const sourcePath = path.join(cursorRulesDir, file);
// Determine target file name (either from mapping or by replacing extension)
const targetFilename = fileMap[file] || file.replace('.mdc', '.md');
const targetPath = path.join(rooRulesDir, targetFilename);
// Convert the file
if (convertCursorRuleToRooRule(sourcePath, targetPath)) {
success++;
} else {
failed++;
}
}
});
log(
'info',
`Rule conversion complete: ${success} successful, ${failed} failed`
);
return { success, failed };
}
export { convertAllCursorRulesToRooRules, convertCursorRuleToRooRule };

View File

@@ -34,7 +34,8 @@
"id": "gpt-4o",
"swe_score": 0.332,
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
},
{
"id": "o1",

View File

@@ -215,6 +215,7 @@ async function addTask(
// Determine the service role based on the useResearch flag
const serviceRole = useResearch ? 'research' : 'main';
report('DEBUG: Calling generateObjectService...', 'debug');
// Call the unified AI service
const aiGeneratedTaskData = await generateObjectService({
role: serviceRole, // <-- Use the determined role
@@ -225,14 +226,20 @@ async function addTask(
prompt: userPrompt,
reportProgress // Pass progress reporter if available
});
report('DEBUG: generateObjectService returned successfully.', 'debug');
report('Successfully generated task data from AI.', 'success');
taskData = aiGeneratedTaskData; // Assign the validated object
} catch (error) {
report(
`DEBUG: generateObjectService caught error: ${error.message}`,
'debug'
);
report(`Error generating task with AI: ${error.message}`, 'error');
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
throw error; // Re-throw error after logging
} finally {
report('DEBUG: generateObjectService finally block reached.', 'debug');
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
}
// --- End Refactored AI Interaction ---
@@ -254,13 +261,17 @@ async function addTask(
// Add the task to the tasks array
data.tasks.push(newTask);
report('DEBUG: Writing tasks.json...', 'debug');
// Write the updated tasks to the file
writeJSON(tasksPath, data);
report('DEBUG: tasks.json written.', 'debug');
// Generate markdown task files
report('Generating task files...', 'info');
report('DEBUG: Calling generateTaskFiles...', 'debug');
// Pass mcpLog if available to generateTaskFiles
await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
report('DEBUG: generateTaskFiles finished.', 'debug');
// Show success message - only for text output (CLI)
if (outputFormat === 'text') {
@@ -305,7 +316,7 @@ async function addTask(
chalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +
'\n' +
chalk.white(
`Priority: ${chalk.keyword(getPriorityColor(newTask.priority))(newTask.priority)}`
`Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}`
) +
'\n' +
(numericDependencies.length > 0
@@ -332,6 +343,7 @@ async function addTask(
}
// Return the new task ID
report(`DEBUG: Returning new task ID: ${newTaskId}`, 'debug');
return newTaskId;
} catch (error) {
// Stop any loading indicator on error

View File

@@ -427,7 +427,7 @@ function displayHelp() {
commands: [
{
name: 'parse-prd',
args: '--input=<file.txt> [--tasks=10]',
args: '--input=<file.txt> [--num-tasks=10]',
desc: 'Generate tasks from a PRD document'
},
{
@@ -1953,7 +1953,7 @@ function displayAvailableModels(availableModels) {
) +
'\n' +
chalk.cyan(
`5. Use custom models: ${chalk.yellow('task-master models --custom --set-main|research|fallback <model_id>')}`
`5. Use custom ollama/openrouter models: ${chalk.yellow('task-master models --openrouter|ollama --set-main|research|fallback <model_id>')}`
),
{
padding: 1,