Merge branch 'next' of github.com:eyaltoledano/claude-task-master into add-complexity-score-to-task
This commit is contained in:
@@ -180,9 +180,9 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
|
||||
// Map template names to their actual source paths
|
||||
switch (templateName) {
|
||||
case 'scripts_README.md':
|
||||
sourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md');
|
||||
break;
|
||||
// case 'scripts_README.md':
|
||||
// sourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md');
|
||||
// break;
|
||||
case 'dev_workflow.mdc':
|
||||
sourcePath = path.join(
|
||||
__dirname,
|
||||
@@ -219,8 +219,8 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
'self_improve.mdc'
|
||||
);
|
||||
break;
|
||||
case 'README-task-master.md':
|
||||
sourcePath = path.join(__dirname, '..', 'README-task-master.md');
|
||||
// case 'README-task-master.md':
|
||||
// sourcePath = path.join(__dirname, '..', 'README-task-master.md');
|
||||
break;
|
||||
case 'windsurfrules':
|
||||
sourcePath = path.join(__dirname, '..', 'assets', '.windsurfrules');
|
||||
@@ -351,18 +351,18 @@ async function initializeProject(options = {}) {
|
||||
}
|
||||
|
||||
// Debug logging only if not in silent mode
|
||||
if (!isSilentMode()) {
|
||||
console.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED =====');
|
||||
console.log('Full options object:', JSON.stringify(options));
|
||||
console.log('options.yes:', options.yes);
|
||||
console.log('==================================================');
|
||||
}
|
||||
// if (!isSilentMode()) {
|
||||
// console.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED =====');
|
||||
// console.log('Full options object:', JSON.stringify(options));
|
||||
// console.log('options.yes:', options.yes);
|
||||
// console.log('==================================================');
|
||||
// }
|
||||
|
||||
const skipPrompts = options.yes || (options.name && options.description);
|
||||
|
||||
if (!isSilentMode()) {
|
||||
console.log('Skip prompts determined:', skipPrompts);
|
||||
}
|
||||
// if (!isSilentMode()) {
|
||||
// console.log('Skip prompts determined:', skipPrompts);
|
||||
// }
|
||||
|
||||
if (skipPrompts) {
|
||||
if (!isSilentMode()) {
|
||||
@@ -565,12 +565,12 @@ function createProjectStructure(addAliases, dryRun) {
|
||||
path.join(targetDir, 'scripts', 'example_prd.txt')
|
||||
);
|
||||
|
||||
// Create main README.md
|
||||
copyTemplateFile(
|
||||
'README-task-master.md',
|
||||
path.join(targetDir, 'README-task-master.md'),
|
||||
replacements
|
||||
);
|
||||
// // Create main README.md
|
||||
// copyTemplateFile(
|
||||
// 'README-task-master.md',
|
||||
// path.join(targetDir, 'README-task-master.md'),
|
||||
// replacements
|
||||
// );
|
||||
|
||||
// Initialize git repository if git is available
|
||||
try {
|
||||
@@ -761,21 +761,22 @@ function setupMCPConfiguration(targetDir) {
|
||||
const newMCPServer = {
|
||||
'task-master-ai': {
|
||||
command: 'npx',
|
||||
args: ['-y', 'task-master-mcp'],
|
||||
args: ['-y', '--package=task-master-ai', 'task-master-ai'],
|
||||
env: {
|
||||
ANTHROPIC_API_KEY: 'YOUR_ANTHROPIC_API_KEY',
|
||||
PERPLEXITY_API_KEY: 'YOUR_PERPLEXITY_API_KEY',
|
||||
MODEL: 'claude-3-7-sonnet-20250219',
|
||||
PERPLEXITY_MODEL: 'sonar-pro',
|
||||
MAX_TOKENS: '64000',
|
||||
TEMPERATURE: '0.2',
|
||||
DEFAULT_SUBTASKS: '5',
|
||||
DEFAULT_PRIORITY: 'medium'
|
||||
ANTHROPIC_API_KEY: 'ANTHROPIC_API_KEY_HERE',
|
||||
PERPLEXITY_API_KEY: 'PERPLEXITY_API_KEY_HERE',
|
||||
OPENAI_API_KEY: 'OPENAI_API_KEY_HERE',
|
||||
GOOGLE_API_KEY: 'GOOGLE_API_KEY_HERE',
|
||||
XAI_API_KEY: 'XAI_API_KEY_HERE',
|
||||
OPENROUTER_API_KEY: 'OPENROUTER_API_KEY_HERE',
|
||||
MISTRAL_API_KEY: 'MISTRAL_API_KEY_HERE',
|
||||
AZURE_OPENAI_API_KEY: 'AZURE_OPENAI_API_KEY_HERE',
|
||||
OLLAMA_API_KEY: 'OLLAMA_API_KEY_HERE'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Check if mcp.json already exists
|
||||
// Check if mcp.json already existsimage.png
|
||||
if (fs.existsSync(mcpJsonPath)) {
|
||||
log(
|
||||
'info',
|
||||
@@ -795,14 +796,14 @@ function setupMCPConfiguration(targetDir) {
|
||||
(server) =>
|
||||
server.args &&
|
||||
server.args.some(
|
||||
(arg) => typeof arg === 'string' && arg.includes('task-master-mcp')
|
||||
(arg) => typeof arg === 'string' && arg.includes('task-master-ai')
|
||||
)
|
||||
);
|
||||
|
||||
if (hasMCPString) {
|
||||
log(
|
||||
'info',
|
||||
'Found existing task-master-mcp configuration in mcp.json, leaving untouched'
|
||||
'Found existing task-master-ai MCP configuration in mcp.json, leaving untouched'
|
||||
);
|
||||
return; // Exit early, don't modify the existing configuration
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import {
|
||||
getFallbackModelId,
|
||||
getParametersForRole
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable } from './utils.js';
|
||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||
|
||||
import * as anthropic from '../../src/ai-providers/anthropic.js';
|
||||
import * as perplexity from '../../src/ai-providers/perplexity.js';
|
||||
@@ -136,10 +136,11 @@ function _extractErrorMessage(error) {
|
||||
* Internal helper to resolve the API key for a given provider.
|
||||
* @param {string} providerName - The name of the provider (lowercase).
|
||||
* @param {object|null} session - Optional MCP session object.
|
||||
* @param {string|null} projectRoot - Optional project root path for .env fallback.
|
||||
* @returns {string|null} The API key or null if not found/needed.
|
||||
* @throws {Error} If a required API key is missing.
|
||||
*/
|
||||
function _resolveApiKey(providerName, session) {
|
||||
function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
const keyMap = {
|
||||
openai: 'OPENAI_API_KEY',
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
@@ -163,10 +164,10 @@ function _resolveApiKey(providerName, session) {
|
||||
);
|
||||
}
|
||||
|
||||
const apiKey = resolveEnvVariable(envVarName, session);
|
||||
const apiKey = resolveEnvVariable(envVarName, session, projectRoot);
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
`Required API key ${envVarName} for provider '${providerName}' is not set in environment or session.`
|
||||
`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`
|
||||
);
|
||||
}
|
||||
return apiKey;
|
||||
@@ -241,27 +242,35 @@ async function _attemptProviderCallWithRetries(
|
||||
* Base logic for unified service functions.
|
||||
* @param {string} serviceType - Type of service ('generateText', 'streamText', 'generateObject').
|
||||
* @param {object} params - Original parameters passed to the service function.
|
||||
* @param {string} [params.projectRoot] - Optional project root path.
|
||||
* @returns {Promise<any>} Result from the underlying provider call.
|
||||
*/
|
||||
async function _unifiedServiceRunner(serviceType, params) {
|
||||
const {
|
||||
role: initialRole,
|
||||
session,
|
||||
projectRoot,
|
||||
systemPrompt,
|
||||
prompt,
|
||||
schema,
|
||||
objectName,
|
||||
...restApiParams
|
||||
} = params;
|
||||
log('info', `${serviceType}Service called`, { role: initialRole });
|
||||
log('info', `${serviceType}Service called`, {
|
||||
role: initialRole,
|
||||
projectRoot
|
||||
});
|
||||
|
||||
// Determine the effective project root (passed in or detected)
|
||||
const effectiveProjectRoot = projectRoot || findProjectRoot();
|
||||
|
||||
let sequence;
|
||||
if (initialRole === 'main') {
|
||||
sequence = ['main', 'fallback', 'research'];
|
||||
} else if (initialRole === 'fallback') {
|
||||
sequence = ['fallback', 'research'];
|
||||
} else if (initialRole === 'research') {
|
||||
sequence = ['research', 'fallback'];
|
||||
sequence = ['research', 'fallback', 'main'];
|
||||
} else if (initialRole === 'fallback') {
|
||||
sequence = ['fallback', 'main', 'research'];
|
||||
} else {
|
||||
log(
|
||||
'warn',
|
||||
@@ -281,16 +290,16 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
log('info', `New AI service call with role: ${currentRole}`);
|
||||
|
||||
// 1. Get Config: Provider, Model, Parameters for the current role
|
||||
// Call individual getters based on the current role
|
||||
// Pass effectiveProjectRoot to config getters
|
||||
if (currentRole === 'main') {
|
||||
providerName = getMainProvider();
|
||||
modelId = getMainModelId();
|
||||
providerName = getMainProvider(effectiveProjectRoot);
|
||||
modelId = getMainModelId(effectiveProjectRoot);
|
||||
} else if (currentRole === 'research') {
|
||||
providerName = getResearchProvider();
|
||||
modelId = getResearchModelId();
|
||||
providerName = getResearchProvider(effectiveProjectRoot);
|
||||
modelId = getResearchModelId(effectiveProjectRoot);
|
||||
} else if (currentRole === 'fallback') {
|
||||
providerName = getFallbackProvider();
|
||||
modelId = getFallbackModelId();
|
||||
providerName = getFallbackProvider(effectiveProjectRoot);
|
||||
modelId = getFallbackModelId(effectiveProjectRoot);
|
||||
} else {
|
||||
log(
|
||||
'error',
|
||||
@@ -314,7 +323,8 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
continue;
|
||||
}
|
||||
|
||||
roleParams = getParametersForRole(currentRole);
|
||||
// Pass effectiveProjectRoot to getParametersForRole
|
||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||
|
||||
// 2. Get Provider Function Set
|
||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||
@@ -345,7 +355,12 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
}
|
||||
|
||||
// 3. Resolve API Key (will throw if required and missing)
|
||||
apiKey = _resolveApiKey(providerName?.toLowerCase(), session);
|
||||
// Pass effectiveProjectRoot to _resolveApiKey
|
||||
apiKey = _resolveApiKey(
|
||||
providerName?.toLowerCase(),
|
||||
session,
|
||||
effectiveProjectRoot
|
||||
);
|
||||
|
||||
// 4. Construct Messages Array
|
||||
const messages = [];
|
||||
@@ -443,6 +458,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
* @param {object} params - Parameters for the service call.
|
||||
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||
* @param {object} [params.session=null] - Optional MCP session object.
|
||||
* @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.
|
||||
* @param {string} params.prompt - The prompt for the AI.
|
||||
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||
* // Other specific generateText params can be included here.
|
||||
@@ -459,6 +475,7 @@ async function generateTextService(params) {
|
||||
* @param {object} params - Parameters for the service call.
|
||||
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||
* @param {object} [params.session=null] - Optional MCP session object.
|
||||
* @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.
|
||||
* @param {string} params.prompt - The prompt for the AI.
|
||||
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||
* // Other specific streamText params can be included here.
|
||||
@@ -475,6 +492,7 @@ async function streamTextService(params) {
|
||||
* @param {object} params - Parameters for the service call.
|
||||
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||
* @param {object} [params.session=null] - Optional MCP session object.
|
||||
* @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.
|
||||
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.
|
||||
* @param {string} params.prompt - The prompt for the AI.
|
||||
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||
|
||||
@@ -10,6 +10,7 @@ import boxen from 'boxen';
|
||||
import fs from 'fs';
|
||||
import https from 'https';
|
||||
import inquirer from 'inquirer';
|
||||
import ora from 'ora'; // Import ora
|
||||
|
||||
import { log, readJSON } from './utils.js';
|
||||
import {
|
||||
@@ -514,80 +515,111 @@ function registerCommands(programInstance) {
|
||||
const outputPath = options.output;
|
||||
const force = options.force || false;
|
||||
const append = options.append || false;
|
||||
let useForce = false;
|
||||
let useAppend = false;
|
||||
|
||||
// Helper function to check if tasks.json exists and confirm overwrite
|
||||
async function confirmOverwriteIfNeeded() {
|
||||
if (fs.existsSync(outputPath) && !force && !append) {
|
||||
const shouldContinue = await confirmTaskOverwrite(outputPath);
|
||||
if (!shouldContinue) {
|
||||
console.log(chalk.yellow('Operation cancelled by user.'));
|
||||
if (fs.existsSync(outputPath) && !useForce && !useAppend) {
|
||||
const overwrite = await confirmTaskOverwrite(outputPath);
|
||||
if (!overwrite) {
|
||||
log('info', 'Operation cancelled.');
|
||||
return false;
|
||||
}
|
||||
// If user confirms 'y', we should set useForce = true for the parsePRD call
|
||||
// Only overwrite if not appending
|
||||
useForce = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// If no input file specified, check for default PRD location
|
||||
if (!inputFile) {
|
||||
if (fs.existsSync(defaultPrdPath)) {
|
||||
console.log(chalk.blue(`Using default PRD file: ${defaultPrdPath}`));
|
||||
let spinner;
|
||||
|
||||
// Check for existing tasks.json before proceeding
|
||||
if (!(await confirmOverwriteIfNeeded())) return;
|
||||
try {
|
||||
if (!inputFile) {
|
||||
if (fs.existsSync(defaultPrdPath)) {
|
||||
console.log(
|
||||
chalk.blue(`Using default PRD file path: ${defaultPrdPath}`)
|
||||
);
|
||||
if (!(await confirmOverwriteIfNeeded())) return;
|
||||
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
await parsePRD(defaultPrdPath, outputPath, numTasks, { append });
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
spinner = ora('Parsing PRD and generating tasks...').start();
|
||||
await parsePRD(defaultPrdPath, outputPath, numTasks, {
|
||||
useAppend,
|
||||
useForce
|
||||
});
|
||||
spinner.succeed('Tasks generated successfully!');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'No PRD file specified and default PRD file not found at scripts/prd.txt.'
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Parse PRD Help') +
|
||||
'\n\n' +
|
||||
chalk.cyan('Usage:') +
|
||||
'\n' +
|
||||
` task-master parse-prd <prd-file.txt> [options]\n\n` +
|
||||
chalk.cyan('Options:') +
|
||||
'\n' +
|
||||
' -i, --input <file> Path to the PRD file (alternative to positional argument)\n' +
|
||||
' -o, --output <file> Output file path (default: "tasks/tasks.json")\n' +
|
||||
' -n, --num-tasks <number> Number of tasks to generate (default: 10)\n' +
|
||||
' -f, --force Skip confirmation when overwriting existing tasks\n' +
|
||||
' --append Append new tasks to existing tasks.json instead of overwriting\n\n' +
|
||||
chalk.cyan('Example:') +
|
||||
'\n' +
|
||||
' task-master parse-prd requirements.txt --num-tasks 15\n' +
|
||||
' task-master parse-prd --input=requirements.txt\n' +
|
||||
' task-master parse-prd --force\n' +
|
||||
' task-master parse-prd requirements_v2.txt --append\n\n' +
|
||||
chalk.yellow('Note: This command will:') +
|
||||
'\n' +
|
||||
' 1. Look for a PRD file at scripts/prd.txt by default\n' +
|
||||
' 2. Use the file specified by --input or positional argument if provided\n' +
|
||||
' 3. Generate tasks from the PRD and either:\n' +
|
||||
' - Overwrite any existing tasks.json file (default)\n' +
|
||||
' - Append to existing tasks.json if --append is used',
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'No PRD file specified and default PRD file not found at scripts/prd.txt.'
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Parse PRD Help') +
|
||||
'\n\n' +
|
||||
chalk.cyan('Usage:') +
|
||||
'\n' +
|
||||
` task-master parse-prd <prd-file.txt> [options]\n\n` +
|
||||
chalk.cyan('Options:') +
|
||||
'\n' +
|
||||
' -i, --input <file> Path to the PRD file (alternative to positional argument)\n' +
|
||||
' -o, --output <file> Output file path (default: "tasks/tasks.json")\n' +
|
||||
' -n, --num-tasks <number> Number of tasks to generate (default: 10)\n' +
|
||||
' -f, --force Skip confirmation when overwriting existing tasks\n' +
|
||||
' --append Append new tasks to existing tasks.json instead of overwriting\n\n' +
|
||||
chalk.cyan('Example:') +
|
||||
'\n' +
|
||||
' task-master parse-prd requirements.txt --num-tasks 15\n' +
|
||||
' task-master parse-prd --input=requirements.txt\n' +
|
||||
' task-master parse-prd --force\n' +
|
||||
' task-master parse-prd requirements_v2.txt --append\n\n' +
|
||||
chalk.yellow('Note: This command will:') +
|
||||
'\n' +
|
||||
' 1. Look for a PRD file at scripts/prd.txt by default\n' +
|
||||
' 2. Use the file specified by --input or positional argument if provided\n' +
|
||||
' 3. Generate tasks from the PRD and either:\n' +
|
||||
' - Overwrite any existing tasks.json file (default)\n' +
|
||||
' - Append to existing tasks.json if --append is used',
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
return;
|
||||
if (!fs.existsSync(inputFile)) {
|
||||
console.error(
|
||||
chalk.red(`Error: Input PRD file not found: ${inputFile}`)
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!(await confirmOverwriteIfNeeded())) return;
|
||||
|
||||
console.log(chalk.blue(`Parsing PRD file: ${inputFile}`));
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
if (append) {
|
||||
console.log(chalk.blue('Appending to existing tasks...'));
|
||||
}
|
||||
|
||||
spinner = ora('Parsing PRD and generating tasks...').start();
|
||||
await parsePRD(inputFile, outputPath, numTasks, {
|
||||
append: useAppend,
|
||||
force: useForce
|
||||
});
|
||||
spinner.succeed('Tasks generated successfully!');
|
||||
} catch (error) {
|
||||
if (spinner) {
|
||||
spinner.fail(`Error parsing PRD: ${error.message}`);
|
||||
} else {
|
||||
console.error(chalk.red(`Error parsing PRD: ${error.message}`));
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Check for existing tasks.json before proceeding with specified input file
|
||||
if (!(await confirmOverwriteIfNeeded())) return;
|
||||
|
||||
console.log(chalk.blue(`Parsing PRD file: ${inputFile}`));
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
if (append) {
|
||||
console.log(chalk.blue('Appending to existing tasks...'));
|
||||
}
|
||||
|
||||
await parsePRD(inputFile, outputPath, numTasks, { append });
|
||||
});
|
||||
|
||||
// update command
|
||||
|
||||
@@ -345,6 +345,12 @@ function getDefaultSubtasks(explicitRoot = null) {
|
||||
return isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal;
|
||||
}
|
||||
|
||||
function getDefaultNumTasks(explicitRoot = null) {
|
||||
const val = getGlobalConfig(explicitRoot).defaultNumTasks;
|
||||
const parsedVal = parseInt(val, 10);
|
||||
return isNaN(parsedVal) ? DEFAULTS.global.defaultNumTasks : parsedVal;
|
||||
}
|
||||
|
||||
function getDefaultPriority(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).defaultPriority;
|
||||
@@ -424,12 +430,13 @@ function getParametersForRole(role, explicitRoot = null) {
|
||||
|
||||
/**
|
||||
* Checks if the API key for a given provider is set in the environment.
|
||||
* Checks process.env first, then session.env if session is provided.
|
||||
* Checks process.env first, then session.env if session is provided, then .env file if projectRoot provided.
|
||||
* @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
|
||||
* @param {object|null} [session=null] - The MCP session object (optional).
|
||||
* @param {string|null} [projectRoot=null] - The project root directory (optional, for .env file check).
|
||||
* @returns {boolean} True if the API key is set, false otherwise.
|
||||
*/
|
||||
function isApiKeySet(providerName, session = null) {
|
||||
function isApiKeySet(providerName, session = null, projectRoot = null) {
|
||||
// Define the expected environment variable name for each provider
|
||||
if (providerName?.toLowerCase() === 'ollama') {
|
||||
return true; // Indicate key status is effectively "OK"
|
||||
@@ -454,7 +461,7 @@ function isApiKeySet(providerName, session = null) {
|
||||
}
|
||||
|
||||
const envVarName = keyMap[providerKey];
|
||||
const apiKeyValue = resolveEnvVariable(envVarName, session);
|
||||
const apiKeyValue = resolveEnvVariable(envVarName, session, projectRoot);
|
||||
|
||||
// Check if the key exists, is not empty, and is not a placeholder
|
||||
return (
|
||||
@@ -701,6 +708,7 @@ export {
|
||||
// Global setting getters (No env var overrides)
|
||||
getLogLevel,
|
||||
getDebugFlag,
|
||||
getDefaultNumTasks,
|
||||
getDefaultSubtasks,
|
||||
getDefaultPriority,
|
||||
getProjectName,
|
||||
|
||||
@@ -195,7 +195,7 @@ async function addDependency(tasksPath, taskId, dependencyId) {
|
||||
}
|
||||
|
||||
// Generate updated task files
|
||||
await generateTaskFiles(tasksPath, 'tasks');
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
log('info', 'Task files regenerated with updated dependencies.');
|
||||
} else {
|
||||
@@ -334,7 +334,7 @@ async function removeDependency(tasksPath, taskId, dependencyId) {
|
||||
}
|
||||
|
||||
// Regenerate task files
|
||||
await generateTaskFiles(tasksPath, 'tasks');
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -13,20 +13,6 @@
|
||||
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "claude-3-5-haiku-20241022",
|
||||
"swe_score": 0.406,
|
||||
"cost_per_1m_tokens": { "input": 0.8, "output": 4.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "claude-3-opus-20240229",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 15, "output": 75 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
}
|
||||
],
|
||||
"openai": [
|
||||
@@ -41,7 +27,7 @@
|
||||
"id": "o1",
|
||||
"swe_score": 0.489,
|
||||
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "o3",
|
||||
@@ -53,7 +39,7 @@
|
||||
"id": "o3-mini",
|
||||
"swe_score": 0.493,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
@@ -66,49 +52,49 @@
|
||||
"id": "o1-mini",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "o1-pro",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150.0, "output": 600.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-5-preview",
|
||||
"swe_score": 0.38,
|
||||
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-1-mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-1-nano",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-search-preview",
|
||||
"swe_score": 0.33,
|
||||
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
"allowed_roles": ["research"]
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini-search-preview",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
"allowed_roles": ["research"]
|
||||
}
|
||||
],
|
||||
"google": [
|
||||
@@ -189,14 +175,6 @@
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini",
|
||||
"name": "Grok 3 Mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.3, "output": 0.5 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-fast",
|
||||
"name": "Grok 3 Fast",
|
||||
@@ -204,13 +182,6 @@
|
||||
"cost_per_1m_tokens": { "input": 5, "output": 25 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini-fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.6, "output": 4 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
}
|
||||
],
|
||||
"ollama": [
|
||||
@@ -283,7 +254,7 @@
|
||||
"id": "deepseek/deepseek-chat-v3-0324",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.27, "output": 1.1 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
@@ -312,14 +283,14 @@
|
||||
"id": "google/gemini-2.5-flash-preview",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview:thinking",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 3.5 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||
import { readJSON, writeJSON, log as consoleLog, truncate } from '../utils.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDefaultPriority } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
@@ -42,19 +42,41 @@ const AiTaskDataSchema = z.object({
|
||||
* @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated
|
||||
* @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)
|
||||
* @param {boolean} useResearch - Whether to use the research model (passed to unified service)
|
||||
* @param {Object} context - Context object containing session and potentially projectRoot
|
||||
* @param {string} [context.projectRoot] - Project root path (for MCP/env fallback)
|
||||
* @returns {number} The new task ID
|
||||
*/
|
||||
async function addTask(
|
||||
tasksPath,
|
||||
prompt,
|
||||
dependencies = [],
|
||||
priority = getDefaultPriority(), // Keep getter for default priority
|
||||
{ reportProgress, mcpLog, session } = {},
|
||||
outputFormat = 'text',
|
||||
// customEnv = null, // Removed as AI param overrides are deprecated
|
||||
priority = null,
|
||||
context = {},
|
||||
outputFormat = 'text', // Default to text for CLI
|
||||
manualTaskData = null,
|
||||
useResearch = false // <-- Add useResearch parameter
|
||||
useResearch = false
|
||||
) {
|
||||
const { session, mcpLog, projectRoot } = context;
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
// Create a consistent logFn object regardless of context
|
||||
const logFn = isMCP
|
||||
? mcpLog // Use MCP logger if provided
|
||||
: {
|
||||
// Create a wrapper around consoleLog for CLI
|
||||
info: (...args) => consoleLog('info', ...args),
|
||||
warn: (...args) => consoleLog('warn', ...args),
|
||||
error: (...args) => consoleLog('error', ...args),
|
||||
debug: (...args) => consoleLog('debug', ...args),
|
||||
success: (...args) => consoleLog('success', ...args)
|
||||
};
|
||||
|
||||
const effectivePriority = priority || getDefaultPriority(projectRoot);
|
||||
|
||||
logFn.info(
|
||||
`Adding new task with prompt: "${prompt}", Priority: ${effectivePriority}, Dependencies: ${dependencies.join(', ') || 'None'}, Research: ${useResearch}, ProjectRoot: ${projectRoot}`
|
||||
);
|
||||
|
||||
let loadingIndicator = null;
|
||||
|
||||
// Create custom reporter that checks for MCP log
|
||||
@@ -62,7 +84,7 @@ async function addTask(
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
} else if (outputFormat === 'text') {
|
||||
log(level, message);
|
||||
consoleLog(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -220,11 +242,11 @@ async function addTask(
|
||||
const aiGeneratedTaskData = await generateObjectService({
|
||||
role: serviceRole, // <-- Use the determined role
|
||||
session: session, // Pass session for API key resolution
|
||||
projectRoot: projectRoot, // <<< Pass projectRoot here
|
||||
schema: AiTaskDataSchema, // Pass the Zod schema
|
||||
objectName: 'newTaskData', // Name for the object
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
reportProgress // Pass progress reporter if available
|
||||
prompt: userPrompt
|
||||
});
|
||||
report('DEBUG: generateObjectService returned successfully.', 'debug');
|
||||
|
||||
@@ -254,7 +276,7 @@ async function addTask(
|
||||
testStrategy: taskData.testStrategy || '',
|
||||
status: 'pending',
|
||||
dependencies: numericDependencies, // Use validated numeric dependencies
|
||||
priority: priority,
|
||||
priority: effectivePriority,
|
||||
subtasks: [] // Initialize with empty subtasks array
|
||||
};
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ Do not include any explanatory text, markdown formatting, or code block markers
|
||||
* @param {string} options.output - Path to report output file
|
||||
* @param {string|number} [options.threshold] - Complexity threshold
|
||||
* @param {boolean} [options.research] - Use research role
|
||||
* @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).
|
||||
* @param {Object} [options._filteredTasksData] - Pre-filtered task data (internal use)
|
||||
* @param {number} [options._originalTaskCount] - Original task count (internal use)
|
||||
* @param {Object} context - Context object, potentially containing session and mcpLog
|
||||
@@ -59,6 +60,7 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
const outputPath = options.output || 'scripts/task-complexity-report.json';
|
||||
const thresholdScore = parseFloat(options.threshold || '5');
|
||||
const useResearch = options.research || false;
|
||||
const projectRoot = options.projectRoot;
|
||||
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
|
||||
@@ -209,15 +211,13 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
reportLog(`Using AI service with role: ${role}`, 'info');
|
||||
|
||||
// *** CHANGED: Use generateTextService ***
|
||||
fullResponse = await generateTextService({
|
||||
prompt,
|
||||
systemPrompt,
|
||||
role,
|
||||
session
|
||||
// No schema or objectName needed
|
||||
session,
|
||||
projectRoot
|
||||
});
|
||||
// *** End Service Call Change ***
|
||||
|
||||
reportLog(
|
||||
'Successfully received text response via AI service',
|
||||
|
||||
@@ -3,7 +3,7 @@ import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
|
||||
import { displayBanner } from '../ui.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
@@ -22,14 +22,16 @@ function clearSubtasks(tasksPath, taskIds) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold('Clearing Subtasks'), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
})
|
||||
);
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(chalk.white.bold('Clearing Subtasks'), {
|
||||
padding: 1,
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 1 }
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Handle multiple task IDs (comma-separated)
|
||||
const taskIdArray = taskIds.split(',').map((id) => id.trim());
|
||||
@@ -85,59 +87,65 @@ function clearSubtasks(tasksPath, taskIds) {
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Show summary table
|
||||
console.log(
|
||||
boxen(chalk.white.bold('Subtask Clearing Summary:'), {
|
||||
padding: { left: 2, right: 2, top: 0, bottom: 0 },
|
||||
margin: { top: 1, bottom: 0 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round'
|
||||
})
|
||||
);
|
||||
console.log(summaryTable.toString());
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(chalk.white.bold('Subtask Clearing Summary:'), {
|
||||
padding: { left: 2, right: 2, top: 0, bottom: 0 },
|
||||
margin: { top: 1, bottom: 0 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round'
|
||||
})
|
||||
);
|
||||
console.log(summaryTable.toString());
|
||||
}
|
||||
|
||||
// Regenerate task files to reflect changes
|
||||
log('info', 'Regenerating task files...');
|
||||
generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// Success message
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully cleared subtasks from ${chalk.bold(clearedCount)} task(s)`
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully cleared subtasks from ${chalk.bold(clearedCount)} task(s)`
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// Next steps suggestion
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master expand --id=<id>')} to generate new subtasks\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master list --with-subtasks')} to verify changes`,
|
||||
{
|
||||
// Next steps suggestion
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master expand --id=<id>')} to generate new subtasks\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master list --with-subtasks')} to verify changes`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
if (!isSilentMode()) {
|
||||
console.log(
|
||||
boxen(chalk.yellow('No subtasks were cleared'), {
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderColor: 'yellow',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
boxen(chalk.yellow('No subtasks were cleared'), {
|
||||
padding: 1,
|
||||
borderColor: 'yellow',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
})
|
||||
);
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -503,7 +503,8 @@ async function expandTask(
|
||||
prompt: promptContent,
|
||||
systemPrompt: systemPrompt, // Use the determined system prompt
|
||||
role,
|
||||
session
|
||||
session,
|
||||
projectRoot
|
||||
});
|
||||
logger.info(
|
||||
'Successfully received text response from AI service',
|
||||
|
||||
@@ -77,7 +77,7 @@ function fetchOpenRouterModels() {
|
||||
* @returns {Object} RESTful response with current model configuration
|
||||
*/
|
||||
async function getModelConfiguration(options = {}) {
|
||||
const { mcpLog, projectRoot } = options;
|
||||
const { mcpLog, projectRoot, session } = options;
|
||||
|
||||
const report = (level, ...args) => {
|
||||
if (mcpLog && typeof mcpLog[level] === 'function') {
|
||||
@@ -125,12 +125,16 @@ async function getModelConfiguration(options = {}) {
|
||||
const fallbackModelId = getFallbackModelId(projectRoot);
|
||||
|
||||
// Check API keys
|
||||
const mainCliKeyOk = isApiKeySet(mainProvider);
|
||||
const mainCliKeyOk = isApiKeySet(mainProvider, session, projectRoot);
|
||||
const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot);
|
||||
const researchCliKeyOk = isApiKeySet(researchProvider);
|
||||
const researchCliKeyOk = isApiKeySet(
|
||||
researchProvider,
|
||||
session,
|
||||
projectRoot
|
||||
);
|
||||
const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot);
|
||||
const fallbackCliKeyOk = fallbackProvider
|
||||
? isApiKeySet(fallbackProvider)
|
||||
? isApiKeySet(fallbackProvider, session, projectRoot)
|
||||
: true;
|
||||
const fallbackMcpKeyOk = fallbackProvider
|
||||
? getMcpApiKeyStatus(fallbackProvider, projectRoot)
|
||||
@@ -523,7 +527,7 @@ async function getApiKeyStatusReport(options = {}) {
|
||||
); // Ollama is not a provider, it's a service, doesn't need an api key usually
|
||||
const statusReport = providersToCheck.map((provider) => {
|
||||
// Use provided projectRoot for MCP status check
|
||||
const cliOk = isApiKeySet(provider, session); // Pass session for CLI check too
|
||||
const cliOk = isApiKeySet(provider, session, projectRoot); // Pass session and projectRoot for CLI check
|
||||
const mcpOk = getMcpApiKeyStatus(provider, projectRoot);
|
||||
return {
|
||||
provider,
|
||||
|
||||
@@ -9,28 +9,30 @@ import {
|
||||
writeJSON,
|
||||
enableSilentMode,
|
||||
disableSilentMode,
|
||||
isSilentMode
|
||||
isSilentMode,
|
||||
readJSON,
|
||||
findTaskById
|
||||
} from '../utils.js';
|
||||
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
// Define Zod schema for task validation
|
||||
const TaskSchema = z.object({
|
||||
id: z.number(),
|
||||
title: z.string(),
|
||||
description: z.string(),
|
||||
status: z.string().default('pending'),
|
||||
dependencies: z.array(z.number()).default([]),
|
||||
priority: z.string().default('medium'),
|
||||
details: z.string().optional(),
|
||||
testStrategy: z.string().optional()
|
||||
// Define the Zod schema for a SINGLE task object
|
||||
const prdSingleTaskSchema = z.object({
|
||||
id: z.number().int().positive(),
|
||||
title: z.string().min(1),
|
||||
description: z.string().min(1),
|
||||
details: z.string().optional().default(''),
|
||||
testStrategy: z.string().optional().default(''),
|
||||
priority: z.enum(['high', 'medium', 'low']).default('medium'),
|
||||
dependencies: z.array(z.number().int().positive()).optional().default([]),
|
||||
status: z.string().optional().default('pending')
|
||||
});
|
||||
|
||||
// Define Zod schema for the complete tasks data
|
||||
const TasksDataSchema = z.object({
|
||||
tasks: z.array(TaskSchema),
|
||||
// Define the Zod schema for the ENTIRE expected AI response object
|
||||
const prdResponseSchema = z.object({
|
||||
tasks: z.array(prdSingleTaskSchema),
|
||||
metadata: z.object({
|
||||
projectName: z.string(),
|
||||
totalTasks: z.number(),
|
||||
@@ -45,35 +47,114 @@ const TasksDataSchema = z.object({
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @param {Object} options - Additional options
|
||||
* @param {Object} options.reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} options.mcpLog - MCP logger object (optional)
|
||||
* @param {Object} options.session - Session object from MCP server (optional)
|
||||
* @param {boolean} [options.useForce=false] - Whether to overwrite existing tasks.json.
|
||||
* @param {boolean} [options.useAppend=false] - Append to existing tasks file.
|
||||
* @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused).
|
||||
* @param {Object} [options.mcpLog] - MCP logger object (optional).
|
||||
* @param {Object} [options.session] - Session object from MCP server (optional).
|
||||
* @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
|
||||
*/
|
||||
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||
const { reportProgress, mcpLog, session } = options;
|
||||
const {
|
||||
reportProgress,
|
||||
mcpLog,
|
||||
session,
|
||||
projectRoot,
|
||||
useForce = false,
|
||||
useAppend = false
|
||||
} = options;
|
||||
const isMCP = !!mcpLog;
|
||||
const outputFormat = isMCP ? 'json' : 'text';
|
||||
|
||||
// Determine output format based on mcpLog presence (simplification)
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
const logFn = mcpLog
|
||||
? mcpLog
|
||||
: {
|
||||
// Wrapper for CLI
|
||||
info: (...args) => log('info', ...args),
|
||||
warn: (...args) => log('warn', ...args),
|
||||
error: (...args) => log('error', ...args),
|
||||
debug: (...args) => log('debug', ...args),
|
||||
success: (...args) => log('success', ...args)
|
||||
};
|
||||
|
||||
// Create custom reporter that checks for MCP log and silent mode
|
||||
// Create custom reporter using logFn
|
||||
const report = (message, level = 'info') => {
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
// Check logFn directly
|
||||
if (logFn && typeof logFn[level] === 'function') {
|
||||
logFn[level](message);
|
||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
||||
// Only log to console if not in silent mode and outputFormat is 'text'
|
||||
// Fallback to original log only if necessary and in CLI text mode
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
report(`Parsing PRD file: ${prdPath}`, 'info');
|
||||
report(
|
||||
`Parsing PRD file: ${prdPath}, Force: ${useForce}, Append: ${useAppend}`
|
||||
);
|
||||
|
||||
// Read the PRD content
|
||||
let existingTasks = [];
|
||||
let nextId = 1;
|
||||
|
||||
try {
|
||||
// Handle file existence and overwrite/append logic
|
||||
if (fs.existsSync(tasksPath)) {
|
||||
if (useAppend) {
|
||||
report(
|
||||
`Append mode enabled. Reading existing tasks from ${tasksPath}`,
|
||||
'info'
|
||||
);
|
||||
const existingData = readJSON(tasksPath); // Use readJSON utility
|
||||
if (existingData && Array.isArray(existingData.tasks)) {
|
||||
existingTasks = existingData.tasks;
|
||||
if (existingTasks.length > 0) {
|
||||
nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;
|
||||
report(
|
||||
`Found ${existingTasks.length} existing tasks. Next ID will be ${nextId}.`,
|
||||
'info'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
report(
|
||||
`Could not read existing tasks from ${tasksPath} or format is invalid. Proceeding without appending.`,
|
||||
'warn'
|
||||
);
|
||||
existingTasks = []; // Reset if read fails
|
||||
}
|
||||
} else if (!useForce) {
|
||||
// Not appending and not forcing overwrite
|
||||
const overwriteError = new Error(
|
||||
`Output file ${tasksPath} already exists. Use --force to overwrite or --append.`
|
||||
);
|
||||
report(overwriteError.message, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(overwriteError.message));
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw overwriteError;
|
||||
}
|
||||
} else {
|
||||
// Force overwrite is true
|
||||
report(
|
||||
`Force flag enabled. Overwriting existing file: ${tasksPath}`,
|
||||
'info'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
report(`Reading PRD content from ${prdPath}`, 'info');
|
||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||
if (!prdContent) {
|
||||
throw new Error(`Input file ${prdPath} is empty or could not be read.`);
|
||||
}
|
||||
|
||||
// Build system prompt for PRD parsing
|
||||
const systemPrompt = `You are an AI assistant helping to break down a Product Requirements Document (PRD) into a set of sequential development tasks.
|
||||
Your goal is to create ${numTasks} well-structured, actionable development tasks based on the PRD provided.
|
||||
const systemPrompt = `You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format.
|
||||
Analyze the provided PRD content and generate approximately ${numTasks} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD
|
||||
Each task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task.
|
||||
Assign sequential IDs starting from ${nextId}. Infer title, description, details, and test strategy for each task based *only* on the PRD content.
|
||||
Set status to 'pending', dependencies to an empty array [], and priority to 'medium' initially for all tasks.
|
||||
Respond ONLY with a valid JSON object containing a single key "tasks", where the value is an array of task objects adhering to the provided Zod schema. Do not include any explanation or markdown formatting.
|
||||
|
||||
Each task should follow this JSON structure:
|
||||
{
|
||||
@@ -88,12 +169,12 @@ Each task should follow this JSON structure:
|
||||
}
|
||||
|
||||
Guidelines:
|
||||
1. Create exactly ${numTasks} tasks, numbered from 1 to ${numTasks}
|
||||
2. Each task should be atomic and focused on a single responsibility
|
||||
1. Unless complexity warrants otherwise, create exactly ${numTasks} tasks, numbered sequentially starting from ${nextId}
|
||||
2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards
|
||||
3. Order tasks logically - consider dependencies and implementation sequence
|
||||
4. Early tasks should focus on setup, core functionality first, then advanced features
|
||||
5. Include clear validation/testing approach for each task
|
||||
6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs)
|
||||
6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs, potentially including existing tasks with IDs less than ${nextId} if applicable)
|
||||
7. Assign priority (high/medium/low) based on criticality and dependency order
|
||||
8. Include detailed implementation guidance in the "details" field
|
||||
9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance
|
||||
@@ -101,41 +182,40 @@ Guidelines:
|
||||
11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches`;
|
||||
|
||||
// Build user prompt with PRD content
|
||||
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:
|
||||
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into approximately ${numTasks} tasks, starting IDs from ${nextId}:\n\n${prdContent}\n\n
|
||||
|
||||
${prdContent}
|
||||
|
||||
Return your response in this format:
|
||||
Return your response in this format:
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Setup Project Repository",
|
||||
"description": "...",
|
||||
...
|
||||
},
|
||||
...
|
||||
],
|
||||
"metadata": {
|
||||
"projectName": "PRD Implementation",
|
||||
"totalTasks": ${numTasks},
|
||||
"sourceFile": "${prdPath}",
|
||||
"generatedAt": "YYYY-MM-DD"
|
||||
}
|
||||
"tasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Setup Project Repository",
|
||||
"description": "...",
|
||||
...
|
||||
},
|
||||
...
|
||||
],
|
||||
"metadata": {
|
||||
"projectName": "PRD Implementation",
|
||||
"totalTasks": ${numTasks},
|
||||
"sourceFile": "${prdPath}",
|
||||
"generatedAt": "YYYY-MM-DD"
|
||||
}
|
||||
}`;
|
||||
|
||||
// Call the unified AI service
|
||||
report('Calling AI service to generate tasks from PRD...', 'info');
|
||||
|
||||
// Call generateObjectService with proper parameters
|
||||
const tasksData = await generateObjectService({
|
||||
role: 'main', // Use 'main' role to get the model from config
|
||||
session: session, // Pass session for API key resolution
|
||||
schema: TasksDataSchema, // Pass the schema for validation
|
||||
objectName: 'tasks_data', // Name the generated object
|
||||
systemPrompt: systemPrompt, // System instructions
|
||||
prompt: userPrompt, // User prompt with PRD content
|
||||
reportProgress // Progress reporting function
|
||||
// Call generateObjectService with the CORRECT schema
|
||||
const generatedData = await generateObjectService({
|
||||
role: 'main',
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
schema: prdResponseSchema,
|
||||
objectName: 'tasks_data',
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
reportProgress
|
||||
});
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
@@ -143,11 +223,58 @@ Return your response in this format:
|
||||
if (!fs.existsSync(tasksDir)) {
|
||||
fs.mkdirSync(tasksDir, { recursive: true });
|
||||
}
|
||||
logFn.success('Successfully parsed PRD via AI service.'); // Assumes generateObjectService validated
|
||||
|
||||
// Validate and Process Tasks
|
||||
if (!generatedData || !Array.isArray(generatedData.tasks)) {
|
||||
// This error *shouldn't* happen if generateObjectService enforced prdResponseSchema
|
||||
// But keep it as a safeguard
|
||||
logFn.error(
|
||||
`Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}`
|
||||
);
|
||||
throw new Error(
|
||||
'AI service returned unexpected data structure after validation.'
|
||||
);
|
||||
}
|
||||
|
||||
let currentId = nextId;
|
||||
const taskMap = new Map();
|
||||
const processedNewTasks = generatedData.tasks.map((task) => {
|
||||
const newId = currentId++;
|
||||
taskMap.set(task.id, newId);
|
||||
return {
|
||||
...task,
|
||||
id: newId,
|
||||
status: 'pending',
|
||||
priority: task.priority || 'medium',
|
||||
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
||||
subtasks: []
|
||||
};
|
||||
});
|
||||
|
||||
// Remap dependencies for the NEWLY processed tasks
|
||||
processedNewTasks.forEach((task) => {
|
||||
task.dependencies = task.dependencies
|
||||
.map((depId) => taskMap.get(depId)) // Map old AI ID to new sequential ID
|
||||
.filter(
|
||||
(newDepId) =>
|
||||
newDepId != null && // Must exist
|
||||
newDepId < task.id && // Must be a lower ID (could be existing or newly generated)
|
||||
(findTaskById(existingTasks, newDepId) || // Check if it exists in old tasks OR
|
||||
processedNewTasks.some((t) => t.id === newDepId)) // check if it exists in new tasks
|
||||
);
|
||||
});
|
||||
|
||||
const allTasks = useAppend
|
||||
? [...existingTasks, ...processedNewTasks]
|
||||
: processedNewTasks;
|
||||
|
||||
const finalTaskData = { tasks: allTasks }; // Use the combined list
|
||||
|
||||
// Write the tasks to the file
|
||||
writeJSON(tasksPath, tasksData);
|
||||
writeJSON(tasksPath, finalTaskData);
|
||||
report(
|
||||
`Successfully generated ${tasksData.tasks.length} tasks from PRD`,
|
||||
`Successfully wrote ${allTasks.length} total tasks to ${tasksPath} (${processedNewTasks.length} new).`,
|
||||
'success'
|
||||
);
|
||||
report(`Tasks saved to: ${tasksPath}`, 'info');
|
||||
@@ -156,10 +283,10 @@ Return your response in this format:
|
||||
if (reportProgress && mcpLog) {
|
||||
// Enable silent mode when being called from MCP server
|
||||
enableSilentMode();
|
||||
await generateTaskFiles(tasksPath, tasksDir);
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
disableSilentMode();
|
||||
} else {
|
||||
await generateTaskFiles(tasksPath, tasksDir);
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
}
|
||||
|
||||
// Only show success boxes for text output (CLI)
|
||||
@@ -167,7 +294,7 @@ Return your response in this format:
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully generated ${tasksData.tasks.length} tasks from PRD`
|
||||
`Successfully generated ${processedNewTasks.length} new tasks. Total tasks in ${tasksPath}: ${allTasks.length}`
|
||||
),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
@@ -189,7 +316,7 @@ Return your response in this format:
|
||||
);
|
||||
}
|
||||
|
||||
return tasksData;
|
||||
return { success: true, tasks: processedNewTasks };
|
||||
} catch (error) {
|
||||
report(`Error parsing PRD: ${error.message}`, 'error');
|
||||
|
||||
@@ -197,8 +324,8 @@ Return your response in this format:
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter
|
||||
if (getDebugFlag(projectRoot)) {
|
||||
// Use projectRoot for debug flag check
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
getStatusWithColor,
|
||||
@@ -16,7 +17,10 @@ import {
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from '../utils.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import {
|
||||
generateObjectService,
|
||||
generateTextService
|
||||
} from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
@@ -29,6 +33,7 @@ import generateTaskFiles from './generate-task-files.js';
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP server.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {string} [context.projectRoot] - Project root path (needed for AI service key resolution).
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json'). Automatically 'json' if mcpLog is present.
|
||||
* @returns {Promise<Object|null>} - The updated subtask or null if update failed.
|
||||
*/
|
||||
@@ -40,7 +45,7 @@ async function updateSubtaskById(
|
||||
context = {},
|
||||
outputFormat = context.mcpLog ? 'json' : 'text'
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const { session, mcpLog, projectRoot } = context;
|
||||
const logFn = mcpLog || consoleLog;
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
@@ -130,36 +135,16 @@ async function updateSubtaskById(
|
||||
|
||||
const subtask = parentTask.subtasks[subtaskIndex];
|
||||
|
||||
// Check if subtask is already completed
|
||||
if (subtask.status === 'done' || subtask.status === 'completed') {
|
||||
report(
|
||||
'warn',
|
||||
`Subtask ${subtaskId} is already marked as done and cannot be updated`
|
||||
);
|
||||
|
||||
// Only show UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.yellow(
|
||||
`Subtask ${subtaskId} is already marked as ${subtask.status} and cannot be updated.`
|
||||
) +
|
||||
'\n\n' +
|
||||
chalk.white(
|
||||
'Completed subtasks are locked to maintain consistency. To modify a completed subtask, you must first:'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(
|
||||
'1. Change its status to "pending" or "in-progress"'
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white('2. Then run the update-subtask command'),
|
||||
{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
const subtaskSchema = z.object({
|
||||
id: z.number().int().positive(),
|
||||
title: z.string(),
|
||||
description: z.string().optional(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.union([z.string(), z.number()])).optional(),
|
||||
priority: z.string().optional(),
|
||||
details: z.string().optional(),
|
||||
testStrategy: z.string().optional()
|
||||
});
|
||||
|
||||
// Only show UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
@@ -192,101 +177,161 @@ async function updateSubtaskById(
|
||||
|
||||
// Start the loading indicator - only for text output
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Generating additional information with AI...'
|
||||
useResearch
|
||||
? 'Updating subtask with research...'
|
||||
: 'Updating subtask...'
|
||||
);
|
||||
}
|
||||
|
||||
let additionalInformation = '';
|
||||
let parsedAIResponse;
|
||||
try {
|
||||
// Reverted: Keep the original system prompt
|
||||
const systemPrompt = `You are an AI assistant helping to update software development subtasks with additional information.
|
||||
Given a subtask, you will provide additional details, implementation notes, or technical insights based on user request.
|
||||
Focus only on adding content that enhances the subtask - don't repeat existing information.
|
||||
Be technical, specific, and implementation-focused rather than general.
|
||||
Provide concrete examples, code snippets, or implementation details when relevant.`;
|
||||
// --- GET PARENT & SIBLING CONTEXT ---
|
||||
const parentContext = {
|
||||
id: parentTask.id,
|
||||
title: parentTask.title
|
||||
// Avoid sending full parent description/details unless necessary
|
||||
};
|
||||
|
||||
// Reverted: Use the full JSON stringification for the user message
|
||||
const subtaskData = JSON.stringify(subtask, null, 2);
|
||||
const userMessageContent = `Here is the subtask to enhance:\n${subtaskData}\n\nPlease provide additional information addressing this request:\n${prompt}\n\nReturn ONLY the new information to add - do not repeat existing content.`;
|
||||
const prevSubtask =
|
||||
subtaskIndex > 0
|
||||
? {
|
||||
id: `${parentTask.id}.${parentTask.subtasks[subtaskIndex - 1].id}`,
|
||||
title: parentTask.subtasks[subtaskIndex - 1].title,
|
||||
status: parentTask.subtasks[subtaskIndex - 1].status
|
||||
}
|
||||
: null;
|
||||
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
report('info', `Calling AI text service with role: ${serviceRole}`);
|
||||
const nextSubtask =
|
||||
subtaskIndex < parentTask.subtasks.length - 1
|
||||
? {
|
||||
id: `${parentTask.id}.${parentTask.subtasks[subtaskIndex + 1].id}`,
|
||||
title: parentTask.subtasks[subtaskIndex + 1].title,
|
||||
status: parentTask.subtasks[subtaskIndex + 1].status
|
||||
}
|
||||
: null;
|
||||
|
||||
const streamResult = await generateTextService({
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
const contextString = `
|
||||
Parent Task: ${JSON.stringify(parentContext)}
|
||||
${prevSubtask ? `Previous Subtask: ${JSON.stringify(prevSubtask)}` : ''}
|
||||
${nextSubtask ? `Next Subtask: ${JSON.stringify(nextSubtask)}` : ''}
|
||||
`;
|
||||
|
||||
const systemPrompt = `You are an AI assistant updating a parent task's subtask. This subtask will be part of a larger parent task and will be used to direct AI agents to complete the subtask. Your goal is to GENERATE new, relevant information based on the user's request (which may be high-level, mid-level or low-level) and APPEND it to the existing subtask 'details' field, wrapped in specific XML-like tags with an ISO 8601 timestamp. Intelligently determine the level of detail to include based on the user's request. Some requests are meant simply to update the subtask with some mid-implementation details, while others are meant to update the subtask with a detailed plan or strategy.
|
||||
|
||||
Context Provided:
|
||||
- The current subtask object.
|
||||
- Basic info about the parent task (ID, title).
|
||||
- Basic info about the immediately preceding subtask (ID, title, status), if it exists.
|
||||
- Basic info about the immediately succeeding subtask (ID, title, status), if it exists.
|
||||
- A user request string.
|
||||
|
||||
Guidelines:
|
||||
1. Analyze the user request considering the provided subtask details AND the context of the parent and sibling tasks.
|
||||
2. GENERATE new, relevant text content that should be added to the 'details' field. Focus *only* on the substance of the update based on the user request and context. Do NOT add timestamps or any special formatting yourself. Avoid over-engineering the details, provide .
|
||||
3. Update the 'details' field in the subtask object with the GENERATED text content. It's okay if this overwrites previous details in the object you return, as the calling code will handle the final appending.
|
||||
4. Return the *entire* updated subtask object (with your generated content in the 'details' field) as a valid JSON object conforming to the provided schema. Do NOT return explanations or markdown formatting.`;
|
||||
|
||||
const subtaskDataString = JSON.stringify(subtask, null, 2);
|
||||
// Updated user prompt including context
|
||||
const userPrompt = `Task Context:\n${contextString}\nCurrent Subtask:\n${subtaskDataString}\n\nUser Request: "${prompt}"\n\nPlease GENERATE new, relevant text content for the 'details' field based on the user request and the provided context. Return the entire updated subtask object as a valid JSON object matching the schema, with the newly generated text placed in the 'details' field.`;
|
||||
// --- END UPDATED PROMPTS ---
|
||||
|
||||
// Call Unified AI Service using generateObjectService
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
report('info', `Using AI object service with role: ${role}`);
|
||||
|
||||
parsedAIResponse = await generateObjectService({
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userMessageContent
|
||||
schema: subtaskSchema,
|
||||
objectName: 'updatedSubtask',
|
||||
role,
|
||||
session,
|
||||
projectRoot,
|
||||
maxRetries: 2
|
||||
});
|
||||
report(
|
||||
'success',
|
||||
'Successfully received object response from AI service'
|
||||
);
|
||||
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
// Stop indicator immediately since generateText is blocking
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
// Assign the result directly (generateTextService returns the text string)
|
||||
additionalInformation = streamResult ? streamResult.trim() : '';
|
||||
|
||||
if (!additionalInformation) {
|
||||
throw new Error('AI returned empty response.'); // Changed error message slightly
|
||||
if (!parsedAIResponse || typeof parsedAIResponse !== 'object') {
|
||||
throw new Error('AI did not return a valid object.');
|
||||
}
|
||||
|
||||
report(
|
||||
// Corrected log message to reflect generateText
|
||||
'success',
|
||||
`Successfully generated text using AI role: ${serviceRole}.`
|
||||
`Successfully generated object using AI role: ${role}.`
|
||||
);
|
||||
} catch (aiError) {
|
||||
report('error', `AI service call failed: ${aiError.message}`);
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator); // Ensure stop on error
|
||||
loadingIndicator = null;
|
||||
}
|
||||
throw aiError;
|
||||
} // Removed the inner finally block as streamingInterval is gone
|
||||
}
|
||||
|
||||
const currentDate = new Date();
|
||||
// --- TIMESTAMP & FORMATTING LOGIC (Handled Locally) ---
|
||||
// Extract only the generated content from the AI's response details field.
|
||||
const generatedContent = parsedAIResponse.details || ''; // Default to empty string
|
||||
|
||||
// Format the additional information with timestamp
|
||||
const formattedInformation = `\n\n<info added on ${currentDate.toISOString()}>\n${additionalInformation}\n</info added on ${currentDate.toISOString()}>`;
|
||||
if (generatedContent.trim()) {
|
||||
// Generate timestamp locally
|
||||
const timestamp = new Date().toISOString(); // <<< Local Timestamp
|
||||
|
||||
// Format the content with XML-like tags and timestamp LOCALLY
|
||||
const formattedBlock = `<info added on ${timestamp}>\n${generatedContent.trim()}\n</info added on ${timestamp}>`; // <<< Local Formatting
|
||||
|
||||
// Append the formatted block to the *original* subtask details
|
||||
subtask.details =
|
||||
(subtask.details ? subtask.details + '\n' : '') + formattedBlock; // <<< Local Appending
|
||||
report(
|
||||
'info',
|
||||
'Appended timestamped, formatted block with AI-generated content to subtask.details.'
|
||||
);
|
||||
} else {
|
||||
report(
|
||||
'warn',
|
||||
'AI response object did not contain generated content in the "details" field. Original details remain unchanged.'
|
||||
);
|
||||
}
|
||||
// --- END TIMESTAMP & FORMATTING LOGIC ---
|
||||
|
||||
// Get a reference to the subtask *after* its details have been updated
|
||||
const updatedSubtask = parentTask.subtasks[subtaskIndex]; // subtask === updatedSubtask now
|
||||
|
||||
report('info', 'Updated subtask details locally after AI generation.');
|
||||
// --- END UPDATE SUBTASK ---
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: formattedInformation:',
|
||||
formattedInformation.substring(0, 70) + '...'
|
||||
'>>> DEBUG: Subtask details AFTER AI update:',
|
||||
updatedSubtask.details // Use updatedSubtask
|
||||
);
|
||||
}
|
||||
|
||||
// Append to subtask details and description
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: Subtask details BEFORE append:', subtask.details);
|
||||
}
|
||||
|
||||
if (subtask.details) {
|
||||
subtask.details += formattedInformation;
|
||||
} else {
|
||||
subtask.details = `${formattedInformation}`;
|
||||
}
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: Subtask details AFTER append:', subtask.details);
|
||||
}
|
||||
|
||||
if (subtask.description) {
|
||||
// Only append to description if it makes sense (for shorter updates)
|
||||
if (additionalInformation.length < 200) {
|
||||
// Only show debug info for text output (CLI)
|
||||
// Description update logic (keeping as is for now)
|
||||
if (updatedSubtask.description) {
|
||||
// Use updatedSubtask
|
||||
if (prompt.length < 100) {
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask description BEFORE append:',
|
||||
subtask.description
|
||||
updatedSubtask.description // Use updatedSubtask
|
||||
);
|
||||
}
|
||||
subtask.description += ` [Updated: ${currentDate.toLocaleDateString()}]`;
|
||||
// Only show debug info for text output (CLI)
|
||||
updatedSubtask.description += ` [Updated: ${new Date().toLocaleDateString()}]`; // Use updatedSubtask
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask description AFTER append:',
|
||||
subtask.description
|
||||
updatedSubtask.description // Use updatedSubtask
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -297,10 +342,7 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
console.log('>>> DEBUG: About to call writeJSON with updated data...');
|
||||
}
|
||||
|
||||
// Update the subtask in the parent task's array
|
||||
parentTask.subtasks[subtaskIndex] = subtask;
|
||||
|
||||
// Write the updated tasks to the file
|
||||
// Write the updated tasks to the file (parentTask already contains the updated subtask)
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
@@ -326,17 +368,18 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
'\n\n' +
|
||||
chalk.white.bold('Title:') +
|
||||
' ' +
|
||||
subtask.title +
|
||||
updatedSubtask.title +
|
||||
'\n\n' +
|
||||
chalk.white.bold('Information Added:') +
|
||||
// Update the display to show the new details field
|
||||
chalk.white.bold('Updated Details:') +
|
||||
'\n' +
|
||||
chalk.white(truncate(additionalInformation, 300, true)),
|
||||
chalk.white(truncate(updatedSubtask.details || '', 500, true)), // Use updatedSubtask
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return subtask;
|
||||
return updatedSubtask; // Return the modified subtask object
|
||||
} catch (error) {
|
||||
// Outer catch block handles final errors after loop/attempts
|
||||
// Stop indicator on error - only for text output (CLI)
|
||||
|
||||
@@ -70,29 +70,80 @@ function parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
let parseMethodUsed = 'raw'; // Keep track of which method worked
|
||||
|
||||
// Extract from Markdown code block first
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
report('info', 'Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// If no code block, find first '{' and last '}' for the object
|
||||
const firstBrace = cleanedResponse.indexOf('{');
|
||||
const lastBrace = cleanedResponse.lastIndexOf('}');
|
||||
if (firstBrace !== -1 && lastBrace > firstBrace) {
|
||||
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
|
||||
report('info', 'Extracted content between first { and last }.');
|
||||
} else {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain a JSON object structure. Parsing raw response.'
|
||||
);
|
||||
// --- NEW Step 1: Try extracting between {} first ---
|
||||
const firstBraceIndex = cleanedResponse.indexOf('{');
|
||||
const lastBraceIndex = cleanedResponse.lastIndexOf('}');
|
||||
let potentialJsonFromBraces = null;
|
||||
|
||||
if (firstBraceIndex !== -1 && lastBraceIndex > firstBraceIndex) {
|
||||
potentialJsonFromBraces = cleanedResponse.substring(
|
||||
firstBraceIndex,
|
||||
lastBraceIndex + 1
|
||||
);
|
||||
if (potentialJsonFromBraces.length <= 2) {
|
||||
potentialJsonFromBraces = null; // Ignore empty braces {}
|
||||
}
|
||||
}
|
||||
|
||||
// If {} extraction yielded something, try parsing it immediately
|
||||
if (potentialJsonFromBraces) {
|
||||
try {
|
||||
const testParse = JSON.parse(potentialJsonFromBraces);
|
||||
// It worked! Use this as the primary cleaned response.
|
||||
cleanedResponse = potentialJsonFromBraces;
|
||||
parseMethodUsed = 'braces';
|
||||
report(
|
||||
'info',
|
||||
'Successfully parsed JSON content extracted between first { and last }.'
|
||||
);
|
||||
} catch (e) {
|
||||
report(
|
||||
'info',
|
||||
'Content between {} looked promising but failed initial parse. Proceeding to other methods.'
|
||||
);
|
||||
// Reset cleanedResponse to original if brace parsing failed
|
||||
cleanedResponse = originalResponseForDebug;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 2: If brace parsing didn't work or wasn't applicable, try code block extraction ---
|
||||
if (parseMethodUsed === 'raw') {
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json|javascript)?\s*([\s\S]*?)\s*```/i
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
parseMethodUsed = 'codeblock';
|
||||
report('info', 'Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// --- Step 3: If code block failed, try stripping prefixes ---
|
||||
const commonPrefixes = [
|
||||
'json\n',
|
||||
'javascript\n'
|
||||
// ... other prefixes ...
|
||||
];
|
||||
let prefixFound = false;
|
||||
for (const prefix of commonPrefixes) {
|
||||
if (cleanedResponse.toLowerCase().startsWith(prefix)) {
|
||||
cleanedResponse = cleanedResponse.substring(prefix.length).trim();
|
||||
parseMethodUsed = 'prefix';
|
||||
report('info', `Stripped prefix: "${prefix.trim()}"`);
|
||||
prefixFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prefixFound) {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain {}, code block, or known prefix. Attempting raw parse.'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 4: Attempt final parse ---
|
||||
let parsedTask;
|
||||
try {
|
||||
parsedTask = JSON.parse(cleanedResponse);
|
||||
@@ -168,7 +219,7 @@ async function updateTaskById(
|
||||
context = {},
|
||||
outputFormat = 'text'
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const { session, mcpLog, projectRoot } = context;
|
||||
const logFn = mcpLog || consoleLog;
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
@@ -329,7 +380,7 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
useResearch ? 'Updating task with research...' : 'Updating task...'
|
||||
useResearch ? 'Updating task with research...\n' : 'Updating task...\n'
|
||||
);
|
||||
}
|
||||
|
||||
@@ -343,7 +394,8 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt,
|
||||
role,
|
||||
session
|
||||
session,
|
||||
projectRoot
|
||||
});
|
||||
report('success', 'Successfully received text response from AI service');
|
||||
// --- End AI Service Call ---
|
||||
|
||||
@@ -21,6 +21,7 @@ import {
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getModelConfiguration } from './models.js';
|
||||
|
||||
// Zod schema for validating the structure of tasks AFTER parsing
|
||||
const updatedTaskSchema = z
|
||||
@@ -42,13 +43,12 @@ const updatedTaskArraySchema = z.array(updatedTaskSchema);
|
||||
* Parses an array of task objects from AI's text response.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} expectedCount - Expected number of tasks.
|
||||
* @param {Function | Object} logFn - The logging function (consoleLog) or MCP log object.
|
||||
* @param {Function | Object} logFn - The logging function or MCP log object.
|
||||
* @param {boolean} isMCP - Flag indicating if logFn is MCP logger.
|
||||
* @returns {Array} Parsed and validated tasks array.
|
||||
* @throws {Error} If parsing or validation fails.
|
||||
*/
|
||||
function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
// Helper for consistent logging inside parser
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
@@ -68,38 +68,98 @@ function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
let parseMethodUsed = 'raw'; // Track which method worked
|
||||
|
||||
// Extract from Markdown code block first
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
report('info', 'Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// If no code block, find first '[' and last ']' for the array
|
||||
const firstBracket = cleanedResponse.indexOf('[');
|
||||
const lastBracket = cleanedResponse.lastIndexOf(']');
|
||||
if (firstBracket !== -1 && lastBracket > firstBracket) {
|
||||
cleanedResponse = cleanedResponse.substring(
|
||||
firstBracket,
|
||||
lastBracket + 1
|
||||
);
|
||||
report('info', 'Extracted content between first [ and last ].');
|
||||
} else {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain a JSON array structure. Parsing raw response.'
|
||||
);
|
||||
// --- NEW Step 1: Try extracting between [] first ---
|
||||
const firstBracketIndex = cleanedResponse.indexOf('[');
|
||||
const lastBracketIndex = cleanedResponse.lastIndexOf(']');
|
||||
let potentialJsonFromArray = null;
|
||||
|
||||
if (firstBracketIndex !== -1 && lastBracketIndex > firstBracketIndex) {
|
||||
potentialJsonFromArray = cleanedResponse.substring(
|
||||
firstBracketIndex,
|
||||
lastBracketIndex + 1
|
||||
);
|
||||
// Basic check to ensure it's not just "[]" or malformed
|
||||
if (potentialJsonFromArray.length <= 2) {
|
||||
potentialJsonFromArray = null; // Ignore empty array
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to parse the array
|
||||
// If [] extraction yielded something, try parsing it immediately
|
||||
if (potentialJsonFromArray) {
|
||||
try {
|
||||
const testParse = JSON.parse(potentialJsonFromArray);
|
||||
// It worked! Use this as the primary cleaned response.
|
||||
cleanedResponse = potentialJsonFromArray;
|
||||
parseMethodUsed = 'brackets';
|
||||
report(
|
||||
'info',
|
||||
'Successfully parsed JSON content extracted between first [ and last ].'
|
||||
);
|
||||
} catch (e) {
|
||||
report(
|
||||
'info',
|
||||
'Content between [] looked promising but failed initial parse. Proceeding to other methods.'
|
||||
);
|
||||
// Reset cleanedResponse to original if bracket parsing failed
|
||||
cleanedResponse = originalResponseForDebug;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 2: If bracket parsing didn't work or wasn't applicable, try code block extraction ---
|
||||
if (parseMethodUsed === 'raw') {
|
||||
// Only look for ```json blocks now
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```json\s*([\s\S]*?)\s*```/i // Only match ```json
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
parseMethodUsed = 'codeblock';
|
||||
report('info', 'Extracted JSON content from JSON Markdown code block.');
|
||||
} else {
|
||||
report('info', 'No JSON code block found.');
|
||||
// --- Step 3: If code block failed, try stripping prefixes ---
|
||||
const commonPrefixes = [
|
||||
'json\n',
|
||||
'javascript\n', // Keep checking common prefixes just in case
|
||||
'python\n',
|
||||
'here are the updated tasks:',
|
||||
'here is the updated json:',
|
||||
'updated tasks:',
|
||||
'updated json:',
|
||||
'response:',
|
||||
'output:'
|
||||
];
|
||||
let prefixFound = false;
|
||||
for (const prefix of commonPrefixes) {
|
||||
if (cleanedResponse.toLowerCase().startsWith(prefix)) {
|
||||
cleanedResponse = cleanedResponse.substring(prefix.length).trim();
|
||||
parseMethodUsed = 'prefix';
|
||||
report('info', `Stripped prefix: "${prefix.trim()}"`);
|
||||
prefixFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prefixFound) {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain [], JSON code block, or known prefix. Attempting raw parse.'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 4: Attempt final parse ---
|
||||
let parsedTasks;
|
||||
try {
|
||||
parsedTasks = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
report('error', `Failed to parse JSON array: ${parseError.message}`);
|
||||
report(
|
||||
'error',
|
||||
`Extraction method used: ${parseMethodUsed}` // Log which method failed
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||
@@ -113,7 +173,7 @@ function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
);
|
||||
}
|
||||
|
||||
// Validate Array structure
|
||||
// --- Step 5 & 6: Validate Array structure and Zod schema ---
|
||||
if (!Array.isArray(parsedTasks)) {
|
||||
report(
|
||||
'error',
|
||||
@@ -134,7 +194,6 @@ function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
);
|
||||
}
|
||||
|
||||
// Validate each task object using Zod
|
||||
const validationResult = updatedTaskArraySchema.safeParse(parsedTasks);
|
||||
if (!validationResult.success) {
|
||||
report('error', 'Parsed task array failed Zod validation.');
|
||||
@@ -147,7 +206,6 @@ function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
}
|
||||
|
||||
report('info', 'Successfully validated task structure.');
|
||||
// Return the validated data, potentially filtering/adjusting length if needed
|
||||
return validationResult.data.slice(
|
||||
0,
|
||||
expectedCount || validationResult.data.length
|
||||
@@ -173,7 +231,7 @@ async function updateTasks(
|
||||
context = {},
|
||||
outputFormat = 'text' // Default to text for CLI
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const { session, mcpLog, projectRoot } = context;
|
||||
// Use mcpLog if available, otherwise use the imported consoleLog function
|
||||
const logFn = mcpLog || consoleLog;
|
||||
// Flag to easily check which logger type we have
|
||||
@@ -217,7 +275,7 @@ async function updateTasks(
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status')
|
||||
],
|
||||
colWidths: [5, 60, 10]
|
||||
colWidths: [5, 70, 20]
|
||||
});
|
||||
|
||||
tasksToUpdate.forEach((task) => {
|
||||
@@ -294,9 +352,7 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Calling AI service to update tasks...'
|
||||
);
|
||||
loadingIndicator = startLoadingIndicator('Updating tasks...\n');
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
@@ -312,7 +368,8 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt,
|
||||
role,
|
||||
session
|
||||
session,
|
||||
projectRoot
|
||||
});
|
||||
if (isMCP) logFn.info('Successfully received text response');
|
||||
else
|
||||
|
||||
@@ -9,7 +9,13 @@ import boxen from 'boxen';
|
||||
import ora from 'ora';
|
||||
import Table from 'cli-table3';
|
||||
import gradient from 'gradient-string';
|
||||
import { log, findTaskById, readJSON, truncate } from './utils.js';
|
||||
import {
|
||||
log,
|
||||
findTaskById,
|
||||
readJSON,
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from './utils.js';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import {
|
||||
@@ -27,6 +33,8 @@ const warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']);
|
||||
* Display a fancy banner for the CLI
|
||||
*/
|
||||
function displayBanner() {
|
||||
if (isSilentMode()) return;
|
||||
|
||||
console.clear();
|
||||
const bannerText = figlet.textSync('Task Master', {
|
||||
font: 'Standard',
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import dotenv from 'dotenv';
|
||||
// Import specific config getters needed here
|
||||
import { getLogLevel, getDebugFlag } from './config-manager.js';
|
||||
|
||||
@@ -14,16 +15,47 @@ let silentMode = false;
|
||||
|
||||
// --- Environment Variable Resolution Utility ---
|
||||
/**
|
||||
* Resolves an environment variable by checking process.env first, then session.env.
|
||||
* @param {string} varName - The name of the environment variable.
|
||||
* @param {string|null} session - The MCP session object (optional).
|
||||
* Resolves an environment variable's value.
|
||||
* Precedence:
|
||||
* 1. session.env (if session provided)
|
||||
* 2. process.env
|
||||
* 3. .env file at projectRoot (if projectRoot provided)
|
||||
* @param {string} key - The environment variable key.
|
||||
* @param {object|null} [session=null] - The MCP session object.
|
||||
* @param {string|null} [projectRoot=null] - The project root directory (for .env fallback).
|
||||
* @returns {string|undefined} The value of the environment variable or undefined if not found.
|
||||
*/
|
||||
function resolveEnvVariable(varName, session) {
|
||||
// Ensure session and session.env exist before attempting access
|
||||
const sessionValue =
|
||||
session && session.env ? session.env[varName] : undefined;
|
||||
return process.env[varName] ?? sessionValue;
|
||||
function resolveEnvVariable(key, session = null, projectRoot = null) {
|
||||
// 1. Check session.env
|
||||
if (session?.env?.[key]) {
|
||||
return session.env[key];
|
||||
}
|
||||
|
||||
// 2. Read .env file at projectRoot
|
||||
if (projectRoot) {
|
||||
const envPath = path.join(projectRoot, '.env');
|
||||
if (fs.existsSync(envPath)) {
|
||||
try {
|
||||
const envFileContent = fs.readFileSync(envPath, 'utf-8');
|
||||
const parsedEnv = dotenv.parse(envFileContent); // Use dotenv to parse
|
||||
if (parsedEnv && parsedEnv[key]) {
|
||||
// console.log(`DEBUG: Found key ${key} in ${envPath}`); // Optional debug log
|
||||
return parsedEnv[key];
|
||||
}
|
||||
} catch (error) {
|
||||
// Log error but don't crash, just proceed as if key wasn't found in file
|
||||
log('warn', `Could not read or parse ${envPath}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Fallback: Check process.env
|
||||
if (process.env[key]) {
|
||||
return process.env[key];
|
||||
}
|
||||
|
||||
// Not found anywhere
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// --- Project Root Finding Utility ---
|
||||
@@ -508,8 +540,6 @@ function detectCamelCaseFlags(args) {
|
||||
// Export all utility functions and configuration
|
||||
export {
|
||||
CONFIG,
|
||||
// CONFIG, <-- Already Removed
|
||||
// getConfig <-- Removing now
|
||||
LOG_LEVELS,
|
||||
log,
|
||||
readJSON,
|
||||
@@ -545,5 +575,4 @@ export {
|
||||
resolveEnvVariable,
|
||||
getTaskManager,
|
||||
findProjectRoot
|
||||
// getConfig <-- Removed
|
||||
};
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
Task Master PRD
|
||||
|
||||
Create a CLI tool for task management
|
||||
@@ -1,259 +1,299 @@
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2025-04-25T02:29:42.258Z",
|
||||
"tasksAnalyzed": 31,
|
||||
"generatedAt": "2025-05-03T04:45:36.864Z",
|
||||
"tasksAnalyzed": 36,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Task Master",
|
||||
"projectName": "Taskmaster",
|
||||
"usedResearch": false
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 24,
|
||||
"taskTitle": "Implement AI-Powered Test Generation Command",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Break down the implementation of an AI-powered test generation command into granular steps, covering CLI integration, task retrieval, AI prompt construction, API integration, test file formatting, error handling, documentation, and comprehensive testing (unit, integration, error cases, and manual verification).",
|
||||
"reasoning": "This task involves advanced CLI development, deep integration with external AI APIs, dynamic prompt engineering, file system operations, error handling, and extensive testing. It requires orchestrating multiple subsystems and ensuring robust, user-friendly output. The cognitive and technical demands are high, justifying a high complexity score and a need for further decomposition into at least 10 subtasks to manage risk and ensure quality.[1][3][4][5]"
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the 'Implement AI-Powered Test Generation Command' task by detailing the specific steps required for AI prompt engineering, including data extraction, prompt formatting, and error handling.",
|
||||
"reasoning": "Requires AI integration, complex logic, and thorough testing. Prompt engineering and API interaction add significant complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 26,
|
||||
"taskTitle": "Implement Context Foundation for AI Operations",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the context foundation implementation into detailed subtasks for CLI flag integration, file reading utilities, error handling, context formatting, command handler updates, documentation, and comprehensive testing for both functionality and error scenarios.",
|
||||
"reasoning": "This task introduces foundational context management across multiple commands, requiring careful CLI design, file I/O, error handling, and integration with AI prompt construction. While less complex than full AI-powered features, it still spans several modules and requires robust validation, suggesting a moderate-to-high complexity and a need for further breakdown.[1][3][4]"
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Implement Context Foundation for AI Operations' task by detailing the specific steps for integrating file reading, cursor rules, and basic context extraction into the Claude API prompts.",
|
||||
"reasoning": "Involves modifying multiple commands and integrating different context sources. Error handling and backwards compatibility are crucial."
|
||||
},
|
||||
{
|
||||
"taskId": 27,
|
||||
"taskTitle": "Implement Context Enhancements for AI Operations",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Decompose the context enhancement task into subtasks for code context extraction, task history integration, PRD summarization, context formatting, token optimization, error handling, and comprehensive testing for each new context type.",
|
||||
"reasoning": "This phase builds on the foundation to add sophisticated context extraction (code, history, PRD), requiring advanced parsing, summarization, and prompt engineering. The need to optimize for token limits and maintain performance across large codebases increases both technical and cognitive complexity, warranting a high score and further subtask expansion.[1][3][4][5]"
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Implement Context Enhancements for AI Operations' task by detailing the specific steps for code context extraction, task history integration, and PRD context integration, including parsing, summarization, and formatting.",
|
||||
"reasoning": "Builds upon the previous task with more sophisticated context extraction and integration. Requires intelligent parsing and summarization."
|
||||
},
|
||||
{
|
||||
"taskId": 28,
|
||||
"taskTitle": "Implement Advanced ContextManager System",
|
||||
"complexityScore": 10,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Expand the ContextManager implementation into subtasks for class design, context source integration, optimization algorithms, caching, token management, command interface updates, AI service integration, performance monitoring, logging, and comprehensive testing (unit, integration, performance, and user experience).",
|
||||
"reasoning": "This is a highly complex architectural task involving advanced class design, optimization algorithms, dynamic context prioritization, caching, and integration with multiple AI services. It requires deep system knowledge, careful performance considerations, and robust error handling, making it one of the most complex tasks in the set and justifying a large number of subtasks.[1][3][4][5]"
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Expand the 'Implement Advanced ContextManager System' task by detailing the specific steps for creating the ContextManager class, implementing the optimization pipeline, and adding command interface enhancements, including caching and performance monitoring.",
|
||||
"reasoning": "A comprehensive system requiring careful design, optimization, and testing. Involves complex algorithms and performance considerations."
|
||||
},
|
||||
{
|
||||
"taskId": 32,
|
||||
"taskTitle": "Implement \"learn\" Command for Automatic Cursor Rule Generation",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 15,
|
||||
"expansionPrompt": "Break down the 'learn' command implementation into subtasks for file structure setup, path utilities, chat history analysis, rule management, AI integration, error handling, performance optimization, CLI integration, logging, and comprehensive testing.",
|
||||
"reasoning": "This task requires orchestrating file system operations, parsing complex chat and code histories, managing rule templates, integrating with AI for pattern extraction, and ensuring robust error handling and performance. The breadth and depth of required functionality, along with the need for both automatic and manual triggers, make this a highly complex task needing extensive decomposition.[1][3][4][5]"
|
||||
},
|
||||
{
|
||||
"taskId": 35,
|
||||
"taskTitle": "Integrate Grok3 API for Research Capabilities",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the Grok3 API integration into subtasks for API client development, service layer updates, payload/response adaptation, error handling, configuration management, UI updates, backward compatibility, and documentation/testing.",
|
||||
"reasoning": "This migration task involves replacing a core external API, adapting to new request/response formats, updating configuration and UI, and ensuring backward compatibility. While not as cognitively complex as some AI tasks, the risk and breadth of impact across the system justify a moderate-to-high complexity and further breakdown.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 36,
|
||||
"taskTitle": "Add Ollama Support for AI Services as Claude Alternative",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Decompose the Ollama integration into subtasks for service class implementation, configuration, model selection, prompt formatting, error handling, fallback logic, documentation, and comprehensive testing.",
|
||||
"reasoning": "Adding a local AI provider requires interface compatibility, configuration management, error handling, and fallback logic, as well as user documentation. The technical complexity is moderate-to-high, especially in ensuring seamless switching and robust error handling, warranting further subtasking.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 37,
|
||||
"taskTitle": "Add Gemini Support for Main AI Services as Claude Alternative",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand Gemini integration into subtasks for service class creation, authentication, prompt/response mapping, configuration, error handling, streaming support, documentation, and comprehensive testing.",
|
||||
"reasoning": "Integrating a new cloud AI provider involves authentication, API adaptation, configuration, and ensuring feature parity. The complexity is similar to other provider integrations, requiring careful planning and multiple subtasks for robust implementation and testing.[1][3][4]"
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Expand the 'Implement \"learn\" Command for Automatic Cursor Rule Generation' task by detailing the specific steps for Cursor data analysis, rule management, and AI integration, including error handling and performance optimization.",
|
||||
"reasoning": "Requires deep integration with Cursor's data, complex pattern analysis, and AI interaction. Significant error handling and performance optimization are needed."
|
||||
},
|
||||
{
|
||||
"taskId": 40,
|
||||
"taskTitle": "Implement 'plan' Command for Task Implementation Planning",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down the 'plan' command implementation into subtasks for CLI integration, task/subtask retrieval, AI prompt construction, plan formatting, error handling, and testing.",
|
||||
"reasoning": "This task involves AI prompt engineering, CLI integration, and content formatting, but is more focused and less technically demanding than full AI service or context management features. It still requires careful error handling and testing, suggesting a moderate complexity and a handful of subtasks.[1][3][4]"
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Expand the 'Implement 'plan' Command for Task Implementation Planning' task by detailing the steps for retrieving task content, generating implementation plans with AI, and formatting the plan within XML tags.",
|
||||
"reasoning": "Involves AI integration and requires careful formatting and error handling. Switching between Claude and Perplexity adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 41,
|
||||
"taskTitle": "Implement Visual Task Dependency Graph in Terminal",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Expand the visual dependency graph implementation into subtasks for CLI command setup, graph layout algorithms, ASCII/Unicode rendering, color coding, circular dependency detection, filtering, accessibility, performance optimization, documentation, and testing.",
|
||||
"reasoning": "Rendering complex dependency graphs in the terminal with color coding, layout optimization, and accessibility features is technically challenging and requires careful algorithm design and robust error handling. The need for performance optimization and user-friendly output increases the complexity, justifying a high score and further subtasking.[1][3][4][5]"
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the 'Implement Visual Task Dependency Graph in Terminal' task by detailing the steps for designing the graph rendering system, implementing layout algorithms, and handling circular dependencies and filtering options.",
|
||||
"reasoning": "Requires complex graph algorithms and terminal rendering. Accessibility and performance are important considerations."
|
||||
},
|
||||
{
|
||||
"taskId": 42,
|
||||
"taskTitle": "Implement MCP-to-MCP Communication Protocol",
|
||||
"complexityScore": 10,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Break down the MCP-to-MCP protocol implementation into subtasks for protocol definition, adapter pattern, client module, reference integration, mode support, core module updates, configuration, documentation, error handling, security, and comprehensive testing.",
|
||||
"reasoning": "Designing and implementing a standardized communication protocol with dynamic mode switching, adapter patterns, and robust error handling is architecturally complex. It requires deep system understanding, security considerations, and extensive testing, making it one of the most complex tasks and requiring significant decomposition.[1][3][4][5]"
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Expand the 'Implement MCP-to-MCP Communication Protocol' task by detailing the steps for defining the protocol, implementing the adapter pattern, and building the client module, including error handling and security considerations.",
|
||||
"reasoning": "Requires designing a new protocol and implementing communication with external systems. Security and error handling are critical."
|
||||
},
|
||||
{
|
||||
"taskId": 43,
|
||||
"taskTitle": "Add Research Flag to Add-Task Command",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the research flag implementation into subtasks for CLI parser updates, subtask generation logic, parent linking, help documentation, and testing.",
|
||||
"reasoning": "This is a focused feature addition involving CLI parsing, subtask generation, and documentation. While it requires some integration with AI or templating logic, the scope is well-defined and less complex than architectural or multi-module tasks, suggesting a moderate complexity and a handful of subtasks.[1][3][4]"
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Expand the 'Add Research Flag to Add-Task Command' task by detailing the steps for updating the command parser, generating research subtasks, and linking them to the parent task.",
|
||||
"reasoning": "Relatively straightforward, but requires careful handling of subtask generation and linking."
|
||||
},
|
||||
{
|
||||
"taskId": 44,
|
||||
"taskTitle": "Implement Task Automation with Webhooks and Event Triggers",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Decompose the webhook and event trigger system into subtasks for event system design, webhook registration, trigger definition, incoming/outgoing webhook handling, authentication, rate limiting, CLI management, payload templating, logging, and comprehensive testing.",
|
||||
"reasoning": "Building a robust automation system with webhooks and event triggers involves designing an event system, secure webhook handling, trigger logic, CLI management, and error handling. The breadth and integration requirements make this a highly complex task needing extensive breakdown.[1][3][4][5]"
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Expand the 'Implement Task Automation with Webhooks and Event Triggers' task by detailing the steps for implementing the webhook registration system, event system, and trigger definition interface, including security and error handling.",
|
||||
"reasoning": "Requires designing a robust event system and integrating with external services. Security and error handling are critical."
|
||||
},
|
||||
{
|
||||
"taskId": 45,
|
||||
"taskTitle": "Implement GitHub Issue Import Feature",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the GitHub issue import feature into subtasks for CLI flag parsing, URL extraction, API integration, data mapping, authentication, error handling, override logic, documentation, and testing.",
|
||||
"reasoning": "This task involves external API integration, data mapping, authentication, error handling, and user override logic. While not as complex as architectural changes, it still requires careful planning and multiple subtasks for robust implementation and testing.[1][3][4]"
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the 'Implement GitHub Issue Import Feature' task by detailing the steps for parsing the URL, fetching issue details from the GitHub API, and generating a well-formatted task.",
|
||||
"reasoning": "Requires interacting with the GitHub API and handling various error conditions. Authentication adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 46,
|
||||
"taskTitle": "Implement ICE Analysis Command for Task Prioritization",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Break down the ICE analysis command into subtasks for scoring algorithm development, LLM prompt engineering, report generation, CLI rendering, integration with complexity reports, sorting/filtering, error handling, and testing.",
|
||||
"reasoning": "Implementing a prioritization command with LLM-based scoring, report generation, and CLI rendering involves moderate technical and cognitive complexity, especially in ensuring accurate and actionable outputs. It requires several subtasks for robust implementation and validation.[1][3][4]"
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the 'Implement ICE Analysis Command for Task Prioritization' task by detailing the steps for calculating ICE scores, generating the report file, and implementing the CLI rendering.",
|
||||
"reasoning": "Requires AI integration for scoring and careful formatting of the report. Integration with existing complexity reports adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 47,
|
||||
"taskTitle": "Enhance Task Suggestion Actions Card Workflow",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the workflow enhancement into subtasks for UI redesign, phase management logic, interactive elements, progress tracking, context addition, task management integration, accessibility, and comprehensive testing.",
|
||||
"reasoning": "Redesigning a multi-phase workflow with interactive UI elements, progress tracking, and context management involves both UI/UX and logic complexity. The need for seamless transitions and robust state management increases the complexity, warranting further breakdown.[1][3][4]"
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Enhance Task Suggestion Actions Card Workflow' task by detailing the steps for implementing the task expansion, context addition, and task management phases, including UI/UX considerations.",
|
||||
"reasoning": "Requires significant UI/UX work and careful state management. Integration with existing functionality is crucial."
|
||||
},
|
||||
{
|
||||
"taskId": 48,
|
||||
"taskTitle": "Refactor Prompts into Centralized Structure",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down the prompt refactoring into subtasks for directory setup, prompt extraction, import updates, naming conventions, documentation, and regression testing.",
|
||||
"reasoning": "This is a codebase refactoring task focused on maintainability and organization. While it touches many files, the technical complexity is moderate, but careful planning and testing are needed to avoid regressions, suggesting a moderate complexity and several subtasks.[1][3][4]"
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Expand the 'Refactor Prompts into Centralized Structure' task by detailing the steps for creating the 'prompts' directory, extracting prompts into individual files, and updating functions to import them.",
|
||||
"reasoning": "Primarily a refactoring task, but requires careful attention to detail to avoid breaking existing functionality."
|
||||
},
|
||||
{
|
||||
"taskId": 49,
|
||||
"taskTitle": "Implement Code Quality Analysis Command",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Expand the code quality analysis command into subtasks for pattern recognition, best practice verification, AI integration, recommendation generation, task integration, CLI development, configuration, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "This task involves static code analysis, AI integration for best practice checks, recommendation generation, and task creation workflows. The technical and cognitive demands are high, requiring robust validation and integration, justifying a high complexity and multiple subtasks.[1][3][4][5]"
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Implement Code Quality Analysis Command' task by detailing the steps for pattern recognition, best practice verification, and improvement recommendations, including AI integration and task creation.",
|
||||
"reasoning": "Requires complex code analysis and AI integration. Generating actionable recommendations adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 50,
|
||||
"taskTitle": "Implement Test Coverage Tracking System by Task",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Break down the test coverage tracking system into subtasks for data structure design, coverage parsing, mapping algorithms, CLI commands, LLM-powered test generation, MCP integration, visualization, workflow integration, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "Mapping test coverage to tasks, integrating with coverage tools, generating targeted tests, and visualizing coverage requires advanced data modeling, parsing, AI integration, and workflow design. The breadth and depth of this system make it highly complex and in need of extensive decomposition.[1][3][4][5]"
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Expand the 'Implement Test Coverage Tracking System by Task' task by detailing the steps for creating the tests.json file structure, developing the coverage report parser, and implementing the CLI commands and AI-powered test generation system.",
|
||||
"reasoning": "A comprehensive system requiring deep integration with testing tools and AI. Maintaining bidirectional relationships adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 51,
|
||||
"taskTitle": "Implement Perplexity Research Command",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the Perplexity research command into subtasks for API client development, context extraction, CLI interface, result formatting, caching, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "This task involves external API integration, context extraction, CLI development, result formatting, caching, and error handling. The technical complexity is moderate-to-high, especially in ensuring robust and user-friendly output, suggesting multiple subtasks.[1][3][4]"
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the 'Implement Perplexity Research Command' task by detailing the steps for creating the Perplexity API client, implementing task context extraction, and building the CLI interface.",
|
||||
"reasoning": "Requires API integration and careful formatting of the research results. Caching adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 52,
|
||||
"taskTitle": "Implement Task Suggestion Command for CLI",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down the task suggestion command into subtasks for task snapshot collection, context extraction, AI suggestion generation, interactive CLI interface, error handling, and testing.",
|
||||
"reasoning": "This is a focused feature involving AI suggestion generation and interactive CLI elements. While it requires careful context management and error handling, the scope is well-defined and less complex than architectural or multi-module tasks, suggesting a moderate complexity and several subtasks.[1][3][4]"
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the 'Implement Task Suggestion Command for CLI' task by detailing the steps for collecting existing task data, generating task suggestions with AI, and implementing the interactive CLI interface.",
|
||||
"reasoning": "Requires AI integration and careful design of the interactive interface. Handling various flag combinations adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 53,
|
||||
"taskTitle": "Implement Subtask Suggestion Feature for Parent Tasks",
|
||||
"complexityScore": 6,
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the subtask suggestion feature into subtasks for parent task validation, context gathering, AI suggestion logic, interactive CLI interface, subtask linking, and testing.",
|
||||
"reasoning": "Similar to the task suggestion command, this feature is focused but requires robust context management, AI integration, and interactive CLI handling. The complexity is moderate, warranting several subtasks for a robust implementation.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 54,
|
||||
"taskTitle": "Add Research Flag to Add-Task Command",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the research flag enhancement into subtasks for CLI parser updates, research invocation, user interaction, task creation flow integration, and testing.",
|
||||
"reasoning": "This is a focused enhancement involving CLI parsing, research invocation, and user interaction. The technical complexity is moderate, with a clear scope and integration points, suggesting a handful of subtasks.[1][3][4]"
|
||||
"expansionPrompt": "Expand the 'Implement Subtask Suggestion Feature for Parent Tasks' task by detailing the steps for validating parent tasks, gathering context, generating subtask suggestions with AI, and implementing the interactive CLI interface.",
|
||||
"reasoning": "Requires AI integration and careful design of the interactive interface. Linking subtasks to parent tasks adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 55,
|
||||
"taskTitle": "Implement Positional Arguments Support for CLI Commands",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand positional argument support into subtasks for parser updates, argument mapping, help documentation, error handling, backward compatibility, and comprehensive testing.",
|
||||
"reasoning": "Upgrading CLI parsing to support positional arguments requires careful mapping, error handling, documentation, and regression testing to maintain backward compatibility. The complexity is moderate, suggesting several subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 56,
|
||||
"taskTitle": "Refactor Task-Master Files into Node Module Structure",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 10,
|
||||
"expansionPrompt": "Break down the refactoring into subtasks for directory setup, file migration, import path updates, build script adjustments, compatibility checks, documentation, regression testing, and rollback planning.",
|
||||
"reasoning": "This is a high-risk, broad refactoring affecting many files and build processes. It requires careful planning, incremental changes, and extensive testing to avoid regressions, justifying a high complexity and multiple subtasks.[1][3][4][5]"
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand the 'Implement Positional Arguments Support for CLI Commands' task by detailing the steps for updating the argument parsing logic, defining the positional argument order, and handling edge cases.",
|
||||
"reasoning": "Requires careful modification of the command parsing logic and ensuring backward compatibility. Handling edge cases adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 57,
|
||||
"taskTitle": "Enhance Task-Master CLI User Experience and Interface",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the CLI UX enhancement into subtasks for log management, visual design, interactive elements, output formatting, help/documentation, accessibility, performance optimization, and comprehensive testing.",
|
||||
"reasoning": "Improving CLI UX involves log management, visual enhancements, interactive elements, and accessibility, requiring both technical and design skills. The breadth of improvements and need for robust testing increase the complexity, suggesting multiple subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 58,
|
||||
"taskTitle": "Implement Elegant Package Update Mechanism for Task-Master",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Break down the update mechanism into subtasks for version detection, update command implementation, file management, configuration migration, notification system, rollback logic, documentation, and comprehensive testing.",
|
||||
"reasoning": "Implementing a robust update mechanism involves version management, file operations, configuration migration, rollback planning, and user communication. The technical and operational complexity is moderate-to-high, requiring multiple subtasks.[1][3][4]"
|
||||
},
|
||||
{
|
||||
"taskId": 59,
|
||||
"taskTitle": "Remove Manual Package.json Modifications and Implement Automatic Dependency Management",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the dependency management refactor into subtasks for code audit, removal of manual modifications, npm dependency updates, initialization command updates, documentation, and regression testing.",
|
||||
"reasoning": "This is a focused refactoring to align with npm best practices. While it touches installation and configuration logic, the technical complexity is moderate, with a clear scope and manageable risk, suggesting several subtasks.[1][3][4]"
|
||||
"expansionPrompt": "Expand the 'Enhance Task-Master CLI User Experience and Interface' task by detailing the steps for log management, visual enhancements, interactive elements, and output formatting.",
|
||||
"reasoning": "Requires significant UI/UX work and careful consideration of different terminal environments. Reducing verbose logging adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 60,
|
||||
"taskTitle": "Implement Mentor System with Round-Table Discussion Feature",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 12,
|
||||
"expansionPrompt": "Break down the mentor system implementation into subtasks for mentor management, round-table simulation, CLI integration, AI personality simulation, task integration, output formatting, error handling, documentation, and comprehensive testing.",
|
||||
"reasoning": "This task involves designing a new system for mentor management, simulating multi-personality AI discussions, integrating with tasks, and ensuring robust CLI and output handling. The breadth and novelty of the feature, along with the need for robust simulation and integration, make it highly complex and in need of extensive decomposition.[1][3][4][5]"
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Expand the 'Implement Mentor System with Round-Table Discussion Feature' task by detailing the steps for mentor management, round-table discussion implementation, and integration with the task system, including LLM integration.",
|
||||
"reasoning": "Requires complex AI simulation and careful formatting of the discussion output. Integrating with the task system adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 61,
|
||||
"taskTitle": "Implement Flexible AI Model Management",
|
||||
"complexityScore": 10,
|
||||
"recommendedSubtasks": 15,
|
||||
"expansionPrompt": "Expand the AI model management implementation into subtasks for configuration management, CLI command parsing, provider module development, unified service abstraction, environment variable handling, documentation, integration testing, migration planning, and cleanup of legacy code.",
|
||||
"reasoning": "This is a major architectural overhaul involving configuration management, CLI design, multi-provider integration, abstraction layers, environment variable handling, documentation, and migration. The technical and organizational complexity is extremely high, requiring extensive decomposition and careful coordination.[1][3][4][5]"
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 8,
|
||||
"expansionPrompt": "Expand the 'Implement Flexible AI Model Management' task by detailing the steps for creating the configuration management module, implementing the CLI command parser, and integrating the Vercel AI SDK.",
|
||||
"reasoning": "Requires deep integration with multiple AI models and careful management of API keys and configuration options. Vercel AI SDK integration adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 62,
|
||||
"taskTitle": "Add --simple Flag to Update Commands for Direct Text Input",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Expand the 'Add --simple Flag to Update Commands for Direct Text Input' task by detailing the steps for updating the command parsers, implementing the conditional logic, and formatting the user input with a timestamp.",
|
||||
"reasoning": "Relatively straightforward, but requires careful attention to formatting and ensuring consistency with AI-processed updates."
|
||||
},
|
||||
{
|
||||
"taskId": 63,
|
||||
"taskTitle": "Add pnpm Support for the Taskmaster Package",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Add pnpm Support for the Taskmaster Package' task by detailing the steps for updating the documentation, ensuring package scripts compatibility, and testing the installation and operation with pnpm.",
|
||||
"reasoning": "Requires careful attention to detail to ensure compatibility with pnpm's execution model. Testing and documentation are crucial."
|
||||
},
|
||||
{
|
||||
"taskId": 64,
|
||||
"taskTitle": "Add Yarn Support for Taskmaster Installation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Add Yarn Support for Taskmaster Installation' task by detailing the steps for updating package.json, adding Yarn-specific configuration files, and testing the installation and operation with Yarn.",
|
||||
"reasoning": "Requires careful attention to detail to ensure compatibility with Yarn's execution model. Testing and documentation are crucial."
|
||||
},
|
||||
{
|
||||
"taskId": 65,
|
||||
"taskTitle": "Add Bun Support for Taskmaster Installation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Add Bun Support for Taskmaster Installation' task by detailing the steps for updating the installation scripts, testing the installation and operation with Bun, and updating the documentation.",
|
||||
"reasoning": "Requires careful attention to detail to ensure compatibility with Bun's execution model. Testing and documentation are crucial."
|
||||
},
|
||||
{
|
||||
"taskId": 66,
|
||||
"taskTitle": "Support Status Filtering in Show Command for Subtasks",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Expand the 'Support Status Filtering in Show Command for Subtasks' task by detailing the steps for updating the command parser, modifying the show command handler, and updating the help documentation.",
|
||||
"reasoning": "Relatively straightforward, but requires careful handling of status validation and filtering."
|
||||
},
|
||||
{
|
||||
"taskId": 67,
|
||||
"taskTitle": "Add CLI JSON output and Cursor keybindings integration",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Add CLI JSON output and Cursor keybindings integration' task by detailing the steps for implementing the JSON output logic, creating the install-keybindings command structure, and handling keybinding file manipulation.",
|
||||
"reasoning": "Requires careful formatting of the JSON output and handling of file system operations. OS detection adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 68,
|
||||
"taskTitle": "Ability to create tasks without parsing PRD",
|
||||
"complexityScore": 3,
|
||||
"recommendedSubtasks": 2,
|
||||
"expansionPrompt": "Expand the 'Ability to create tasks without parsing PRD' task by detailing the steps for creating tasks without a PRD.",
|
||||
"reasoning": "Simple task to allow task creation without a PRD."
|
||||
},
|
||||
{
|
||||
"taskId": 69,
|
||||
"taskTitle": "Enhance Analyze Complexity for Specific Task IDs",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Expand the 'Enhance Analyze Complexity for Specific Task IDs' task by detailing the steps for modifying the core logic, updating the CLI, and updating the MCP tool.",
|
||||
"reasoning": "Requires modifying existing functionality and ensuring compatibility with both CLI and MCP."
|
||||
},
|
||||
{
|
||||
"taskId": 70,
|
||||
"taskTitle": "Implement 'diagram' command for Mermaid diagram generation",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Expand the 'Implement 'diagram' command for Mermaid diagram generation' task by detailing the steps for creating the command, generating the Mermaid diagram, and handling different output options.",
|
||||
"reasoning": "Requires generating Mermaid diagrams and handling different output options."
|
||||
},
|
||||
{
|
||||
"taskId": 72,
|
||||
"taskTitle": "Implement PDF Generation for Project Progress and Dependency Overview",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Expand the 'Implement PDF Generation for Project Progress and Dependency Overview' task by detailing the steps for summarizing project progress, visualizing the dependency chain, and generating the PDF document.",
|
||||
"reasoning": "Requires integrating with the diagram command and using a PDF generation library. Handling large dependency chains adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 73,
|
||||
"taskTitle": "Implement Custom Model ID Support for Ollama/OpenRouter",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the --simple flag implementation into subtasks for CLI parser updates, update logic modification, timestamp formatting, display logic, documentation, and testing.",
|
||||
"reasoning": "This is a focused feature addition involving CLI parsing, conditional logic, timestamp formatting, and display updates. The technical complexity is moderate, with a clear scope and manageable risk, suggesting a handful of subtasks.[1][3][4]"
|
||||
"expansionPrompt": "Expand the 'Implement Custom Model ID Support for Ollama/OpenRouter' task by detailing the steps for modifying the CLI, implementing the interactive setup, and handling validation and warnings.",
|
||||
"reasoning": "Requires integrating with external APIs and handling different model types. Validation and warnings are crucial."
|
||||
},
|
||||
{
|
||||
"taskId": 75,
|
||||
"taskTitle": "Integrate Google Search Grounding for Research Role",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Expand the 'Integrate Google Search Grounding for Research Role' task by detailing the steps for modifying the AI service layer, implementing the conditional logic, and updating the supported models.",
|
||||
"reasoning": "Requires conditional logic and integration with the Google Search Grounding API."
|
||||
},
|
||||
{
|
||||
"taskId": 76,
|
||||
"taskTitle": "Develop E2E Test Framework for Taskmaster MCP Server (FastMCP over stdio)",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Expand the 'Develop E2E Test Framework for Taskmaster MCP Server (FastMCP over stdio)' task by detailing the steps for launching the FastMCP server, implementing the message protocol handler, and developing the request/response correlation mechanism.",
|
||||
"reasoning": "Requires complex system integration and robust error handling. Designing a comprehensive test framework adds complexity."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user