diff --git a/scripts/init.js b/scripts/init.js index 9f636314..2eba5536 100755 --- a/scripts/init.js +++ b/scripts/init.js @@ -13,847 +13,976 @@ * For the full license text, see the LICENSE file in the root directory. */ -import fs from 'fs'; -import path from 'path'; -import readline from 'readline'; -import { fileURLToPath } from 'url'; -import { dirname } from 'path'; -import chalk from 'chalk'; -import figlet from 'figlet'; -import boxen from 'boxen'; -import gradient from 'gradient-string'; -import { isSilentMode } from './modules/utils.js'; -import { convertAllCursorRulesToRooRules } from './modules/rule-transformer.js'; -import { execSync } from 'child_process'; +import fs from "fs"; +import path from "path"; +import readline from "readline"; +import { fileURLToPath } from "url"; +import { dirname } from "path"; +import chalk from "chalk"; +import figlet from "figlet"; +import boxen from "boxen"; +import gradient from "gradient-string"; +import { isSilentMode } from "./modules/utils.js"; +import { convertAllCursorRulesToRooRules } from "./modules/rule-transformer.js"; +import { execSync } from "child_process"; +import { registerUserWithGateway } from "./modules/telemetry-submission.js"; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); // Define log levels const LOG_LEVELS = { - debug: 0, - info: 1, - warn: 2, - error: 3, - success: 4 + debug: 0, + info: 1, + warn: 2, + error: 3, + success: 4, }; // Determine log level from environment variable or default to 'info' const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL - ? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()] - : LOG_LEVELS.info; // Default to info + ? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()] + : LOG_LEVELS.info; // Default to info // Create a color gradient for the banner -const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); -const warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']); +const coolGradient = gradient(["#00b4d8", "#0077b6", "#03045e"]); +const warmGradient = gradient(["#fb8b24", "#e36414", "#9a031e"]); // Display a fancy banner function displayBanner() { - if (isSilentMode()) return; + if (isSilentMode()) return; - console.clear(); - const bannerText = figlet.textSync('Task Master AI', { - font: 'Standard', - horizontalLayout: 'default', - verticalLayout: 'default' - }); + console.clear(); + const bannerText = figlet.textSync("Task Master AI", { + font: "Standard", + horizontalLayout: "default", + verticalLayout: "default", + }); - console.log(coolGradient(bannerText)); + console.log(coolGradient(bannerText)); - // Add creator credit line below the banner - console.log( - chalk.dim('by ') + chalk.cyan.underline('https://x.com/eyaltoledano') - ); + // Add creator credit line below the banner + console.log( + chalk.dim("by ") + chalk.cyan.underline("https://x.com/eyaltoledano") + ); - console.log( - boxen(chalk.white(`${chalk.bold('Initializing')} your new project`), { - padding: 1, - margin: { top: 0, bottom: 1 }, - borderStyle: 'round', - borderColor: 'cyan' - }) - ); + console.log( + boxen(chalk.white(`${chalk.bold("Initializing")} your new project`), { + padding: 1, + margin: { top: 0, bottom: 1 }, + borderStyle: "round", + borderColor: "cyan", + }) + ); } // Logging function with icons and colors function log(level, ...args) { - const icons = { - debug: chalk.gray('🔍'), - info: chalk.blue('â„šī¸'), - warn: chalk.yellow('âš ī¸'), - error: chalk.red('❌'), - success: chalk.green('✅') - }; + const icons = { + debug: chalk.gray("🔍"), + info: chalk.blue("â„šī¸"), + warn: chalk.yellow("âš ī¸"), + error: chalk.red("❌"), + success: chalk.green("✅"), + }; - if (LOG_LEVELS[level] >= LOG_LEVEL) { - const icon = icons[level] || ''; + if (LOG_LEVELS[level] >= LOG_LEVEL) { + const icon = icons[level] || ""; - // Only output to console if not in silent mode - if (!isSilentMode()) { - if (level === 'error') { - console.error(icon, chalk.red(...args)); - } else if (level === 'warn') { - console.warn(icon, chalk.yellow(...args)); - } else if (level === 'success') { - console.log(icon, chalk.green(...args)); - } else if (level === 'info') { - console.log(icon, chalk.blue(...args)); - } else { - console.log(icon, ...args); - } - } - } + // Only output to console if not in silent mode + if (!isSilentMode()) { + if (level === "error") { + console.error(icon, chalk.red(...args)); + } else if (level === "warn") { + console.warn(icon, chalk.yellow(...args)); + } else if (level === "success") { + console.log(icon, chalk.green(...args)); + } else if (level === "info") { + console.log(icon, chalk.blue(...args)); + } else { + console.log(icon, ...args); + } + } + } - // Write to debug log if DEBUG=true - if (process.env.DEBUG === 'true') { - const logMessage = `[${level.toUpperCase()}] ${args.join(' ')}\n`; - fs.appendFileSync('init-debug.log', logMessage); - } + // Write to debug log if DEBUG=true + if (process.env.DEBUG === "true") { + const logMessage = `[${level.toUpperCase()}] ${args.join(" ")}\n`; + fs.appendFileSync("init-debug.log", logMessage); + } } // Function to create directory if it doesn't exist function ensureDirectoryExists(dirPath) { - if (!fs.existsSync(dirPath)) { - fs.mkdirSync(dirPath, { recursive: true }); - log('info', `Created directory: ${dirPath}`); - } + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, { recursive: true }); + log("info", `Created directory: ${dirPath}`); + } } // Function to add shell aliases to the user's shell configuration function addShellAliases() { - const homeDir = process.env.HOME || process.env.USERPROFILE; - let shellConfigFile; + const homeDir = process.env.HOME || process.env.USERPROFILE; + let shellConfigFile; - // Determine which shell config file to use - if (process.env.SHELL?.includes('zsh')) { - shellConfigFile = path.join(homeDir, '.zshrc'); - } else if (process.env.SHELL?.includes('bash')) { - shellConfigFile = path.join(homeDir, '.bashrc'); - } else { - log('warn', 'Could not determine shell type. Aliases not added.'); - return false; - } + // Determine which shell config file to use + if (process.env.SHELL?.includes("zsh")) { + shellConfigFile = path.join(homeDir, ".zshrc"); + } else if (process.env.SHELL?.includes("bash")) { + shellConfigFile = path.join(homeDir, ".bashrc"); + } else { + log("warn", "Could not determine shell type. Aliases not added."); + return false; + } - try { - // Check if file exists - if (!fs.existsSync(shellConfigFile)) { - log( - 'warn', - `Shell config file ${shellConfigFile} not found. Aliases not added.` - ); - return false; - } + try { + // Check if file exists + if (!fs.existsSync(shellConfigFile)) { + log( + "warn", + `Shell config file ${shellConfigFile} not found. Aliases not added.` + ); + return false; + } - // Check if aliases already exist - const configContent = fs.readFileSync(shellConfigFile, 'utf8'); - if (configContent.includes("alias tm='task-master'")) { - log('info', 'Task Master aliases already exist in shell config.'); - return true; - } + // Check if aliases already exist + const configContent = fs.readFileSync(shellConfigFile, "utf8"); + if (configContent.includes("alias tm='task-master'")) { + log("info", "Task Master aliases already exist in shell config."); + return true; + } - // Add aliases to the shell config file - const aliasBlock = ` + // Add aliases to the shell config file + const aliasBlock = ` # Task Master aliases added on ${new Date().toLocaleDateString()} alias tm='task-master' alias taskmaster='task-master' `; - fs.appendFileSync(shellConfigFile, aliasBlock); - log('success', `Added Task Master aliases to ${shellConfigFile}`); - log( - 'info', - 'To use the aliases in your current terminal, run: source ' + - shellConfigFile - ); + fs.appendFileSync(shellConfigFile, aliasBlock); + log("success", `Added Task Master aliases to ${shellConfigFile}`); + log( + "info", + "To use the aliases in your current terminal, run: source " + + shellConfigFile + ); - return true; - } catch (error) { - log('error', `Failed to add aliases: ${error.message}`); - return false; - } + return true; + } catch (error) { + log("error", `Failed to add aliases: ${error.message}`); + return false; + } } // Function to copy a file from the package to the target directory function copyTemplateFile(templateName, targetPath, replacements = {}) { - // Get the file content from the appropriate source directory - let sourcePath; + // Get the file content from the appropriate source directory + let sourcePath; - // Map template names to their actual source paths - switch (templateName) { - // case 'scripts_README.md': - // sourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md'); - // break; - case 'dev_workflow.mdc': - sourcePath = path.join( - __dirname, - '..', - '.cursor', - 'rules', - 'dev_workflow.mdc' - ); - break; - case 'taskmaster.mdc': - sourcePath = path.join( - __dirname, - '..', - '.cursor', - 'rules', - 'taskmaster.mdc' - ); - break; - case 'cursor_rules.mdc': - sourcePath = path.join( - __dirname, - '..', - '.cursor', - 'rules', - 'cursor_rules.mdc' - ); - break; - case 'self_improve.mdc': - sourcePath = path.join( - __dirname, - '..', - '.cursor', - 'rules', - 'self_improve.mdc' - ); - break; - // case 'README-task-master.md': - // sourcePath = path.join(__dirname, '..', 'README-task-master.md'); - break; - case 'windsurfrules': - sourcePath = path.join(__dirname, '..', 'assets', '.windsurfrules'); - break; - case '.roomodes': - sourcePath = path.join(__dirname, '..', 'assets', 'roocode', '.roomodes'); - break; - case 'architect-rules': - case 'ask-rules': - case 'boomerang-rules': - case 'code-rules': - case 'debug-rules': - case 'test-rules': - // Extract the mode name from the template name (e.g., 'architect' from 'architect-rules') - const mode = templateName.split('-')[0]; - sourcePath = path.join( - __dirname, - '..', - 'assets', - 'roocode', - '.roo', - `rules-${mode}`, - templateName - ); - break; - default: - // For other files like env.example, gitignore, etc. that don't have direct equivalents - sourcePath = path.join(__dirname, '..', 'assets', templateName); - } + // Map template names to their actual source paths + switch (templateName) { + // case 'scripts_README.md': + // sourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md'); + // break; + case "dev_workflow.mdc": + sourcePath = path.join( + __dirname, + "..", + ".cursor", + "rules", + "dev_workflow.mdc" + ); + break; + case "taskmaster.mdc": + sourcePath = path.join( + __dirname, + "..", + ".cursor", + "rules", + "taskmaster.mdc" + ); + break; + case "cursor_rules.mdc": + sourcePath = path.join( + __dirname, + "..", + ".cursor", + "rules", + "cursor_rules.mdc" + ); + break; + case "self_improve.mdc": + sourcePath = path.join( + __dirname, + "..", + ".cursor", + "rules", + "self_improve.mdc" + ); + break; + // case 'README-task-master.md': + // sourcePath = path.join(__dirname, '..', 'README-task-master.md'); + break; + case "windsurfrules": + sourcePath = path.join(__dirname, "..", "assets", ".windsurfrules"); + break; + case ".roomodes": + sourcePath = path.join(__dirname, "..", "assets", "roocode", ".roomodes"); + break; + case "architect-rules": + case "ask-rules": + case "boomerang-rules": + case "code-rules": + case "debug-rules": + case "test-rules": + // Extract the mode name from the template name (e.g., 'architect' from 'architect-rules') + const mode = templateName.split("-")[0]; + sourcePath = path.join( + __dirname, + "..", + "assets", + "roocode", + ".roo", + `rules-${mode}`, + templateName + ); + break; + default: + // For other files like env.example, gitignore, etc. that don't have direct equivalents + sourcePath = path.join(__dirname, "..", "assets", templateName); + } - // Check if the source file exists - if (!fs.existsSync(sourcePath)) { - // Fall back to templates directory for files that might not have been moved yet - sourcePath = path.join(__dirname, '..', 'assets', templateName); - if (!fs.existsSync(sourcePath)) { - log('error', `Source file not found: ${sourcePath}`); - return; - } - } + // Check if the source file exists + if (!fs.existsSync(sourcePath)) { + // Fall back to templates directory for files that might not have been moved yet + sourcePath = path.join(__dirname, "..", "assets", templateName); + if (!fs.existsSync(sourcePath)) { + log("error", `Source file not found: ${sourcePath}`); + return; + } + } - let content = fs.readFileSync(sourcePath, 'utf8'); + let content = fs.readFileSync(sourcePath, "utf8"); - // Replace placeholders with actual values - Object.entries(replacements).forEach(([key, value]) => { - const regex = new RegExp(`\\{\\{${key}\\}\\}`, 'g'); - content = content.replace(regex, value); - }); + // Replace placeholders with actual values + Object.entries(replacements).forEach(([key, value]) => { + const regex = new RegExp(`\\{\\{${key}\\}\\}`, "g"); + content = content.replace(regex, value); + }); - // Handle special files that should be merged instead of overwritten - if (fs.existsSync(targetPath)) { - const filename = path.basename(targetPath); + // Handle special files that should be merged instead of overwritten + if (fs.existsSync(targetPath)) { + const filename = path.basename(targetPath); - // Handle .gitignore - append lines that don't exist - if (filename === '.gitignore') { - log('info', `${targetPath} already exists, merging content...`); - const existingContent = fs.readFileSync(targetPath, 'utf8'); - const existingLines = new Set( - existingContent.split('\n').map((line) => line.trim()) - ); - const newLines = content - .split('\n') - .filter((line) => !existingLines.has(line.trim())); + // Handle .gitignore - append lines that don't exist + if (filename === ".gitignore") { + log("info", `${targetPath} already exists, merging content...`); + const existingContent = fs.readFileSync(targetPath, "utf8"); + const existingLines = new Set( + existingContent.split("\n").map((line) => line.trim()) + ); + const newLines = content + .split("\n") + .filter((line) => !existingLines.has(line.trim())); - if (newLines.length > 0) { - // Add a comment to separate the original content from our additions - const updatedContent = - existingContent.trim() + - '\n\n# Added by Claude Task Master\n' + - newLines.join('\n'); - fs.writeFileSync(targetPath, updatedContent); - log('success', `Updated ${targetPath} with additional entries`); - } else { - log('info', `No new content to add to ${targetPath}`); - } - return; - } + if (newLines.length > 0) { + // Add a comment to separate the original content from our additions + const updatedContent = + existingContent.trim() + + "\n\n# Added by Claude Task Master\n" + + newLines.join("\n"); + fs.writeFileSync(targetPath, updatedContent); + log("success", `Updated ${targetPath} with additional entries`); + } else { + log("info", `No new content to add to ${targetPath}`); + } + return; + } - // Handle .windsurfrules - append the entire content - if (filename === '.windsurfrules') { - log( - 'info', - `${targetPath} already exists, appending content instead of overwriting...` - ); - const existingContent = fs.readFileSync(targetPath, 'utf8'); + // Handle .windsurfrules - append the entire content + if (filename === ".windsurfrules") { + log( + "info", + `${targetPath} already exists, appending content instead of overwriting...` + ); + const existingContent = fs.readFileSync(targetPath, "utf8"); - // Add a separator comment before appending our content - const updatedContent = - existingContent.trim() + - '\n\n# Added by Task Master - Development Workflow Rules\n\n' + - content; - fs.writeFileSync(targetPath, updatedContent); - log('success', `Updated ${targetPath} with additional rules`); - return; - } + // Add a separator comment before appending our content + const updatedContent = + existingContent.trim() + + "\n\n# Added by Task Master - Development Workflow Rules\n\n" + + content; + fs.writeFileSync(targetPath, updatedContent); + log("success", `Updated ${targetPath} with additional rules`); + return; + } - // Handle README.md - offer to preserve or create a different file - if (filename === 'README-task-master.md') { - log('info', `${targetPath} already exists`); - // Create a separate README file specifically for this project - const taskMasterReadmePath = path.join( - path.dirname(targetPath), - 'README-task-master.md' - ); - fs.writeFileSync(taskMasterReadmePath, content); - log( - 'success', - `Created ${taskMasterReadmePath} (preserved original README-task-master.md)` - ); - return; - } + // Handle README.md - offer to preserve or create a different file + if (filename === "README-task-master.md") { + log("info", `${targetPath} already exists`); + // Create a separate README file specifically for this project + const taskMasterReadmePath = path.join( + path.dirname(targetPath), + "README-task-master.md" + ); + fs.writeFileSync(taskMasterReadmePath, content); + log( + "success", + `Created ${taskMasterReadmePath} (preserved original README-task-master.md)` + ); + return; + } - // For other files, warn and prompt before overwriting - log('warn', `${targetPath} already exists, skipping.`); - return; - } + // For other files, warn and prompt before overwriting + log("warn", `${targetPath} already exists, skipping.`); + return; + } - // If the file doesn't exist, create it normally - fs.writeFileSync(targetPath, content); - log('info', `Created file: ${targetPath}`); + // If the file doesn't exist, create it normally + fs.writeFileSync(targetPath, content); + log("info", `Created file: ${targetPath}`); } // Main function to initialize a new project (No longer needs isInteractive logic) async function initializeProject(options = {}) { - // Receives options as argument - // Only display banner if not in silent mode - if (!isSilentMode()) { - displayBanner(); - } + // Receives options as argument + // Only display banner if not in silent mode + if (!isSilentMode()) { + displayBanner(); + } - // Debug logging only if not in silent mode - // if (!isSilentMode()) { - // console.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED ====='); - // console.log('Full options object:', JSON.stringify(options)); - // console.log('options.yes:', options.yes); - // console.log('=================================================='); - // } + // Debug logging only if not in silent mode + // if (!isSilentMode()) { + // console.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED ====='); + // console.log('Full options object:', JSON.stringify(options)); + // console.log('options.yes:', options.yes); + // console.log('=================================================='); + // } - const skipPrompts = options.yes || (options.name && options.description); + const skipPrompts = options.yes || (options.name && options.description); - // if (!isSilentMode()) { - // console.log('Skip prompts determined:', skipPrompts); - // } + // if (!isSilentMode()) { + // console.log('Skip prompts determined:', skipPrompts); + // } - if (skipPrompts) { - if (!isSilentMode()) { - console.log('SKIPPING PROMPTS - Using defaults or provided values'); - } + if (skipPrompts) { + if (!isSilentMode()) { + console.log("SKIPPING PROMPTS - Using defaults or provided values"); + } - // Use provided options or defaults - const projectName = options.name || 'task-master-project'; - const projectDescription = - options.description || 'A project managed with Task Master AI'; - const projectVersion = options.version || '0.1.0'; - const authorName = options.author || 'Vibe coder'; - const dryRun = options.dryRun || false; - const addAliases = options.aliases || false; + // Use provided options or defaults + const projectName = options.name || "task-master-project"; + const projectDescription = + options.description || "A project managed with Task Master AI"; + const projectVersion = options.version || "0.1.0"; + const authorName = options.author || "Vibe coder"; + const dryRun = options.dryRun || false; + const addAliases = options.aliases || false; - if (dryRun) { - log('info', 'DRY RUN MODE: No files will be modified'); - log('info', 'Would initialize Task Master project'); - log('info', 'Would create/update necessary project files'); - if (addAliases) { - log('info', 'Would add shell aliases for task-master'); - } - return { - dryRun: true - }; - } + if (dryRun) { + log("info", "DRY RUN MODE: No files will be modified"); + log("info", "Would initialize Task Master project"); + log("info", "Would create/update necessary project files"); + if (addAliases) { + log("info", "Would add shell aliases for task-master"); + } + return { + dryRun: true, + }; + } - createProjectStructure(addAliases, dryRun); - } else { - // Interactive logic - log('info', 'Required options not provided, proceeding with prompts.'); - const rl = readline.createInterface({ - input: process.stdin, - output: process.stdout - }); + createProjectStructure(addAliases, dryRun, null); + } else { + // Interactive logic + log("info", "Required options not provided, proceeding with prompts."); + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); - try { - // Only prompt for shell aliases - const addAliasesInput = await promptQuestion( - rl, - chalk.cyan( - 'Add shell aliases for task-master? This lets you type "tm" instead of "task-master" (Y/n): ' - ) - ); - const addAliasesPrompted = addAliasesInput.trim().toLowerCase() !== 'n'; + try { + // Prompt for shell aliases + const addAliasesInput = await promptQuestion( + rl, + chalk.cyan( + 'Add shell aliases for task-master? This lets you type "tm" instead of "task-master" (Y/n): ' + ) + ); + const addAliasesPrompted = addAliasesInput.trim().toLowerCase() !== "n"; - // Confirm settings... - console.log('\nTask Master Project settings:'); - console.log( - chalk.blue( - 'Add shell aliases (so you can use "tm" instead of "task-master"):' - ), - chalk.white(addAliasesPrompted ? 'Yes' : 'No') - ); + // Prompt for hosted telemetry gateway + const useHostedGatewayInput = await promptQuestion( + rl, + chalk.cyan( + "Enable TaskMaster hosted telemetry gateway? This helps improve the product by sharing anonymous usage data (Y/n): " + ) + ); + const useHostedGateway = + useHostedGatewayInput.trim().toLowerCase() !== "n"; - const confirmInput = await promptQuestion( - rl, - chalk.yellow('\nDo you want to continue with these settings? (Y/n): ') - ); - const shouldContinue = confirmInput.trim().toLowerCase() !== 'n'; - rl.close(); + let gatewayRegistration = null; + if (useHostedGateway) { + // Prompt for email + const emailInput = await promptQuestion( + rl, + chalk.cyan("Enter your email address for telemetry registration: ") + ); + const email = emailInput.trim(); - if (!shouldContinue) { - log('info', 'Project initialization cancelled by user'); - process.exit(0); - return; - } + if (email && email.includes("@")) { + console.log( + chalk.blue("Registering with TaskMaster telemetry gateway...") + ); + gatewayRegistration = await registerUserWithGateway(email); - const dryRun = options.dryRun || false; + if (gatewayRegistration.success) { + console.log( + chalk.green( + `✅ Successfully ${gatewayRegistration.isNewUser ? "registered" : "found"} user!` + ) + ); + console.log(chalk.dim(`User ID: ${gatewayRegistration.userId}`)); + } else { + console.log( + chalk.yellow( + `âš ī¸ Gateway registration failed: ${gatewayRegistration.error}` + ) + ); + console.log( + chalk.dim("You can configure telemetry manually later.") + ); + } + } else { + console.log( + chalk.yellow( + "âš ī¸ Invalid email address. Skipping gateway registration." + ) + ); + } + } - if (dryRun) { - log('info', 'DRY RUN MODE: No files will be modified'); - log('info', 'Would initialize Task Master project'); - log('info', 'Would create/update necessary project files'); - if (addAliasesPrompted) { - log('info', 'Would add shell aliases for task-master'); - } - return { - dryRun: true - }; - } + // Confirm settings + console.log("\nTask Master Project settings:"); + console.log( + chalk.blue( + 'Add shell aliases (so you can use "tm" instead of "task-master"):' + ), + chalk.white(addAliasesPrompted ? "Yes" : "No") + ); + console.log( + chalk.blue("Hosted telemetry gateway:"), + chalk.white(useHostedGateway ? "Enabled" : "Disabled") + ); + if (gatewayRegistration?.success) { + console.log( + chalk.blue("Telemetry user:"), + chalk.white( + `${gatewayRegistration.email} (${gatewayRegistration.userId})` + ) + ); + } - // Create structure using only necessary values - createProjectStructure(addAliasesPrompted, dryRun); - } catch (error) { - rl.close(); - log('error', `Error during initialization process: ${error.message}`); - process.exit(1); - } - } + const confirmInput = await promptQuestion( + rl, + chalk.yellow("\nDo you want to continue with these settings? (Y/n): ") + ); + const shouldContinue = confirmInput.trim().toLowerCase() !== "n"; + rl.close(); + + if (!shouldContinue) { + log("info", "Project initialization cancelled by user"); + process.exit(0); + return; + } + + const dryRun = options.dryRun || false; + + if (dryRun) { + log("info", "DRY RUN MODE: No files will be modified"); + log("info", "Would initialize Task Master project"); + log("info", "Would create/update necessary project files"); + if (addAliasesPrompted) { + log("info", "Would add shell aliases for task-master"); + } + if (useHostedGateway && gatewayRegistration?.success) { + log("info", "Would configure hosted telemetry gateway"); + } + return { + dryRun: true, + }; + } + + // Create structure with telemetry configuration + createProjectStructure(addAliasesPrompted, dryRun, gatewayRegistration); + } catch (error) { + rl.close(); + log("error", `Error during initialization process: ${error.message}`); + process.exit(1); + } + } } // Helper function to promisify readline question function promptQuestion(rl, question) { - return new Promise((resolve) => { - rl.question(question, (answer) => { - resolve(answer); - }); - }); + return new Promise((resolve) => { + rl.question(question, (answer) => { + resolve(answer); + }); + }); } // Function to create the project structure -function createProjectStructure(addAliases, dryRun) { - const targetDir = process.cwd(); - log('info', `Initializing project in ${targetDir}`); +function createProjectStructure(addAliases, dryRun, gatewayRegistration) { + const targetDir = process.cwd(); + log("info", `Initializing project in ${targetDir}`); - // Create directories - ensureDirectoryExists(path.join(targetDir, '.cursor', 'rules')); + // Create directories + ensureDirectoryExists(path.join(targetDir, ".cursor", "rules")); - // Create Roo directories - ensureDirectoryExists(path.join(targetDir, '.roo')); - ensureDirectoryExists(path.join(targetDir, '.roo', 'rules')); - for (const mode of [ - 'architect', - 'ask', - 'boomerang', - 'code', - 'debug', - 'test' - ]) { - ensureDirectoryExists(path.join(targetDir, '.roo', `rules-${mode}`)); - } + // Create Roo directories + ensureDirectoryExists(path.join(targetDir, ".roo")); + ensureDirectoryExists(path.join(targetDir, ".roo", "rules")); + for (const mode of [ + "architect", + "ask", + "boomerang", + "code", + "debug", + "test", + ]) { + ensureDirectoryExists(path.join(targetDir, ".roo", `rules-${mode}`)); + } - ensureDirectoryExists(path.join(targetDir, 'scripts')); - ensureDirectoryExists(path.join(targetDir, 'tasks')); + ensureDirectoryExists(path.join(targetDir, "scripts")); + ensureDirectoryExists(path.join(targetDir, "tasks")); - // Setup MCP configuration for integration with Cursor - setupMCPConfiguration(targetDir); + // Setup MCP configuration for integration with Cursor + setupMCPConfiguration(targetDir); - // Copy template files with replacements - const replacements = { - year: new Date().getFullYear() - }; + // Copy template files with replacements + const replacements = { + year: new Date().getFullYear(), + }; - // Copy .env.example - copyTemplateFile( - 'env.example', - path.join(targetDir, '.env.example'), - replacements - ); + // Copy .env.example + copyTemplateFile( + "env.example", + path.join(targetDir, ".env.example"), + replacements + ); - // Copy .taskmasterconfig with project name - copyTemplateFile( - '.taskmasterconfig', - path.join(targetDir, '.taskmasterconfig'), - { - ...replacements - } - ); + // Copy .taskmasterconfig with project name + copyTemplateFile( + ".taskmasterconfig", + path.join(targetDir, ".taskmasterconfig"), + { + ...replacements, + } + ); - // Copy .gitignore - copyTemplateFile('gitignore', path.join(targetDir, '.gitignore')); + // Configure telemetry if gateway registration was successful + if (gatewayRegistration?.success) { + configureTelemetrySettings(targetDir, gatewayRegistration); + } - // Copy dev_workflow.mdc - copyTemplateFile( - 'dev_workflow.mdc', - path.join(targetDir, '.cursor', 'rules', 'dev_workflow.mdc') - ); + // Copy .gitignore + copyTemplateFile("gitignore", path.join(targetDir, ".gitignore")); - // Copy taskmaster.mdc - copyTemplateFile( - 'taskmaster.mdc', - path.join(targetDir, '.cursor', 'rules', 'taskmaster.mdc') - ); + // Copy dev_workflow.mdc + copyTemplateFile( + "dev_workflow.mdc", + path.join(targetDir, ".cursor", "rules", "dev_workflow.mdc") + ); - // Copy cursor_rules.mdc - copyTemplateFile( - 'cursor_rules.mdc', - path.join(targetDir, '.cursor', 'rules', 'cursor_rules.mdc') - ); + // Copy taskmaster.mdc + copyTemplateFile( + "taskmaster.mdc", + path.join(targetDir, ".cursor", "rules", "taskmaster.mdc") + ); - // Copy self_improve.mdc - copyTemplateFile( - 'self_improve.mdc', - path.join(targetDir, '.cursor', 'rules', 'self_improve.mdc') - ); + // Copy cursor_rules.mdc + copyTemplateFile( + "cursor_rules.mdc", + path.join(targetDir, ".cursor", "rules", "cursor_rules.mdc") + ); - // Generate Roo rules from Cursor rules - log('info', 'Generating Roo rules from Cursor rules...'); - convertAllCursorRulesToRooRules(targetDir); + // Copy self_improve.mdc + copyTemplateFile( + "self_improve.mdc", + path.join(targetDir, ".cursor", "rules", "self_improve.mdc") + ); - // Copy .windsurfrules - copyTemplateFile('windsurfrules', path.join(targetDir, '.windsurfrules')); + // Generate Roo rules from Cursor rules + log("info", "Generating Roo rules from Cursor rules..."); + convertAllCursorRulesToRooRules(targetDir); - // Copy .roomodes for Roo Code integration - copyTemplateFile('.roomodes', path.join(targetDir, '.roomodes')); + // Copy .windsurfrules + copyTemplateFile("windsurfrules", path.join(targetDir, ".windsurfrules")); - // Copy Roo rule files for each mode - const rooModes = ['architect', 'ask', 'boomerang', 'code', 'debug', 'test']; - for (const mode of rooModes) { - copyTemplateFile( - `${mode}-rules`, - path.join(targetDir, '.roo', `rules-${mode}`, `${mode}-rules`) - ); - } + // Copy .roomodes for Roo Code integration + copyTemplateFile(".roomodes", path.join(targetDir, ".roomodes")); - // Copy example_prd.txt - copyTemplateFile( - 'example_prd.txt', - path.join(targetDir, 'scripts', 'example_prd.txt') - ); + // Copy Roo rule files for each mode + const rooModes = ["architect", "ask", "boomerang", "code", "debug", "test"]; + for (const mode of rooModes) { + copyTemplateFile( + `${mode}-rules`, + path.join(targetDir, ".roo", `rules-${mode}`, `${mode}-rules`) + ); + } - // // Create main README.md - // copyTemplateFile( - // 'README-task-master.md', - // path.join(targetDir, 'README-task-master.md'), - // replacements - // ); + // Copy example_prd.txt + copyTemplateFile( + "example_prd.txt", + path.join(targetDir, "scripts", "example_prd.txt") + ); - // Initialize git repository if git is available - try { - if (!fs.existsSync(path.join(targetDir, '.git'))) { - log('info', 'Initializing git repository...'); - execSync('git init', { stdio: 'ignore' }); - log('success', 'Git repository initialized'); - } - } catch (error) { - log('warn', 'Git not available, skipping repository initialization'); - } + // // Create main README.md + // copyTemplateFile( + // 'README-task-master.md', + // path.join(targetDir, 'README-task-master.md'), + // replacements + // ); - // Run npm install automatically - const npmInstallOptions = { - cwd: targetDir, - // Default to inherit for interactive CLI, change if silent - stdio: 'inherit' - }; + // Initialize git repository if git is available + try { + if (!fs.existsSync(path.join(targetDir, ".git"))) { + log("info", "Initializing git repository..."); + execSync("git init", { stdio: "ignore" }); + log("success", "Git repository initialized"); + } + } catch (error) { + log("warn", "Git not available, skipping repository initialization"); + } - if (isSilentMode()) { - // If silent (MCP mode), suppress npm install output - npmInstallOptions.stdio = 'ignore'; - log('info', 'Running npm install silently...'); // Log our own message - } else { - // Interactive mode, show the boxen message - console.log( - boxen(chalk.cyan('Installing dependencies...'), { - padding: 0.5, - margin: 0.5, - borderStyle: 'round', - borderColor: 'blue' - }) - ); - } + // Run npm install automatically + const npmInstallOptions = { + cwd: targetDir, + // Default to inherit for interactive CLI, change if silent + stdio: "inherit", + }; - // === Add Model Configuration Step === - if (!isSilentMode() && !dryRun) { - console.log( - boxen(chalk.cyan('Configuring AI Models...'), { - padding: 0.5, - margin: { top: 1, bottom: 0.5 }, - borderStyle: 'round', - borderColor: 'blue' - }) - ); - log( - 'info', - 'Running interactive model setup. Please select your preferred AI models.' - ); - try { - execSync('npx task-master models --setup', { - stdio: 'inherit', - cwd: targetDir - }); - log('success', 'AI Models configured.'); - } catch (error) { - log('error', 'Failed to configure AI models:', error.message); - log('warn', 'You may need to run "task-master models --setup" manually.'); - } - } else if (isSilentMode() && !dryRun) { - log('info', 'Skipping interactive model setup in silent (MCP) mode.'); - log( - 'warn', - 'Please configure AI models using "task-master models --set-..." or the "models" MCP tool.' - ); - } else if (dryRun) { - log('info', 'DRY RUN: Skipping interactive model setup.'); - } - // ==================================== + if (isSilentMode()) { + // If silent (MCP mode), suppress npm install output + npmInstallOptions.stdio = "ignore"; + log("info", "Running npm install silently..."); // Log our own message + } else { + // Interactive mode, show the boxen message + console.log( + boxen(chalk.cyan("Installing dependencies..."), { + padding: 0.5, + margin: 0.5, + borderStyle: "round", + borderColor: "blue", + }) + ); + } - // Display success message - if (!isSilentMode()) { - console.log( - boxen( - warmGradient.multiline( - figlet.textSync('Success!', { font: 'Standard' }) - ) + - '\n' + - chalk.green('Project initialized successfully!'), - { - padding: 1, - margin: 1, - borderStyle: 'double', - borderColor: 'green' - } - ) - ); - } + // === Add Model Configuration Step === + if (!isSilentMode() && !dryRun) { + console.log( + boxen(chalk.cyan("Configuring AI Models..."), { + padding: 0.5, + margin: { top: 1, bottom: 0.5 }, + borderStyle: "round", + borderColor: "blue", + }) + ); + log( + "info", + "Running interactive model setup. Please select your preferred AI models." + ); + try { + execSync("npx task-master models --setup", { + stdio: "inherit", + cwd: targetDir, + }); + log("success", "AI Models configured."); + } catch (error) { + log("error", "Failed to configure AI models:", error.message); + log("warn", 'You may need to run "task-master models --setup" manually.'); + } + } else if (isSilentMode() && !dryRun) { + log("info", "Skipping interactive model setup in silent (MCP) mode."); + log( + "warn", + 'Please configure AI models using "task-master models --set-..." or the "models" MCP tool.' + ); + } else if (dryRun) { + log("info", "DRY RUN: Skipping interactive model setup."); + } + // ==================================== - // Display next steps in a nice box - if (!isSilentMode()) { - console.log( - boxen( - chalk.cyan.bold('Things you should do next:') + - '\n\n' + - chalk.white('1. ') + - chalk.yellow( - 'Configure AI models (if needed) and add API keys to `.env`' - ) + - '\n' + - chalk.white(' ├─ ') + - chalk.dim('Models: Use `task-master models` commands') + - '\n' + - chalk.white(' └─ ') + - chalk.dim( - 'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)' - ) + - '\n' + - chalk.white('2. ') + - chalk.yellow( - 'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt' - ) + - '\n' + - chalk.white('3. ') + - chalk.yellow( - 'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:' - ) + - '\n' + - chalk.white(' └─ ') + - chalk.dim('MCP Tool: ') + - chalk.cyan('parse_prd') + - chalk.dim(' | CLI: ') + - chalk.cyan('task-master parse-prd scripts/prd.txt') + - '\n' + - chalk.white('4. ') + - chalk.yellow( - 'Ask Cursor to analyze the complexity of the tasks in your PRD using research' - ) + - '\n' + - chalk.white(' └─ ') + - chalk.dim('MCP Tool: ') + - chalk.cyan('analyze_project_complexity') + - chalk.dim(' | CLI: ') + - chalk.cyan('task-master analyze-complexity') + - '\n' + - chalk.white('5. ') + - chalk.yellow( - 'Ask Cursor to expand all of your tasks using the complexity analysis' - ) + - '\n' + - chalk.white('6. ') + - chalk.yellow('Ask Cursor to begin working on the next task') + - '\n' + - chalk.white('7. ') + - chalk.yellow( - 'Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists.' - ) + - '\n' + - chalk.white('8. ') + - chalk.yellow( - 'Ask Cursor to update all tasks from a specific task id based on new learnings or pivots in your project.' - ) + - '\n' + - chalk.white('9. ') + - chalk.green.bold('Ship it!') + - '\n\n' + - chalk.dim( - '* Review the README.md file to learn how to use other commands via Cursor Agent.' - ) + - '\n' + - chalk.dim( - '* Use the task-master command without arguments to see all available commands.' - ), - { - padding: 1, - margin: 1, - borderStyle: 'round', - borderColor: 'yellow', - title: 'Getting Started', - titleAlignment: 'center' - } - ) - ); - } + // Display success message + if (!isSilentMode()) { + console.log( + boxen( + warmGradient.multiline( + figlet.textSync("Success!", { font: "Standard" }) + ) + + "\n" + + chalk.green("Project initialized successfully!"), + { + padding: 1, + margin: 1, + borderStyle: "double", + borderColor: "green", + } + ) + ); + } + + // Display next steps in a nice box + if (!isSilentMode()) { + console.log( + boxen( + chalk.cyan.bold("Things you should do next:") + + "\n\n" + + chalk.white("1. ") + + chalk.yellow( + "Configure AI models (if needed) and add API keys to `.env`" + ) + + "\n" + + chalk.white(" ├─ ") + + chalk.dim("Models: Use `task-master models` commands") + + "\n" + + chalk.white(" └─ ") + + chalk.dim( + "Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)" + ) + + "\n" + + chalk.white("2. ") + + chalk.yellow( + "Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt" + ) + + "\n" + + chalk.white("3. ") + + chalk.yellow( + "Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:" + ) + + "\n" + + chalk.white(" └─ ") + + chalk.dim("MCP Tool: ") + + chalk.cyan("parse_prd") + + chalk.dim(" | CLI: ") + + chalk.cyan("task-master parse-prd scripts/prd.txt") + + "\n" + + chalk.white("4. ") + + chalk.yellow( + "Ask Cursor to analyze the complexity of the tasks in your PRD using research" + ) + + "\n" + + chalk.white(" └─ ") + + chalk.dim("MCP Tool: ") + + chalk.cyan("analyze_project_complexity") + + chalk.dim(" | CLI: ") + + chalk.cyan("task-master analyze-complexity") + + "\n" + + chalk.white("5. ") + + chalk.yellow( + "Ask Cursor to expand all of your tasks using the complexity analysis" + ) + + "\n" + + chalk.white("6. ") + + chalk.yellow("Ask Cursor to begin working on the next task") + + "\n" + + chalk.white("7. ") + + chalk.yellow( + "Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists." + ) + + "\n" + + chalk.white("8. ") + + chalk.yellow( + "Ask Cursor to update all tasks from a specific task id based on new learnings or pivots in your project." + ) + + "\n" + + chalk.white("9. ") + + chalk.green.bold("Ship it!") + + "\n\n" + + chalk.dim( + "* Review the README.md file to learn how to use other commands via Cursor Agent." + ) + + "\n" + + chalk.dim( + "* Use the task-master command without arguments to see all available commands." + ), + { + padding: 1, + margin: 1, + borderStyle: "round", + borderColor: "yellow", + title: "Getting Started", + titleAlignment: "center", + } + ) + ); + } +} + +// Function to configure telemetry settings in .taskmasterconfig and MCP config +function configureTelemetrySettings(targetDir, gatewayRegistration) { + const configPath = path.join(targetDir, ".taskmasterconfig"); + + try { + // Read existing config + const configContent = fs.readFileSync(configPath, "utf8"); + const config = JSON.parse(configContent); + + // Add telemetry configuration + config.telemetry = { + enabled: true, + apiKey: gatewayRegistration.apiKey, + userId: gatewayRegistration.userId, + email: gatewayRegistration.email, + }; + + // Also ensure telemetryEnabled is explicitly set to true at root level + config.telemetryEnabled = true; + + // Write updated config + fs.writeFileSync(configPath, JSON.stringify(config, null, "\t")); + log("success", "Configured telemetry settings in .taskmasterconfig"); + + // Also update MCP configuration to include telemetry credentials + updateMCPTelemetryConfig(targetDir, gatewayRegistration); + } catch (error) { + log("error", `Failed to configure telemetry settings: ${error.message}`); + } +} + +// Function to update MCP configuration with telemetry credentials +function updateMCPTelemetryConfig(targetDir, gatewayRegistration) { + const mcpJsonPath = path.join(targetDir, ".cursor", "mcp.json"); + + try { + if (fs.existsSync(mcpJsonPath)) { + const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, "utf8")); + + // Update the task-master-ai server environment variables + if (mcpConfig.mcpServers && mcpConfig.mcpServers["task-master-ai"]) { + mcpConfig.mcpServers["task-master-ai"].env = { + ...mcpConfig.mcpServers["task-master-ai"].env, + TASKMASTER_API_KEY: gatewayRegistration.apiKey, + TASKMASTER_USER_ID: gatewayRegistration.userId, + TASKMASTER_USER_EMAIL: gatewayRegistration.email, + }; + + fs.writeFileSync(mcpJsonPath, JSON.stringify(mcpConfig, null, 4)); + log("success", "Updated MCP configuration with telemetry credentials"); + } + } + } catch (error) { + log("warn", `Failed to update MCP telemetry config: ${error.message}`); + } } // Function to setup MCP configuration for Cursor integration function setupMCPConfiguration(targetDir) { - const mcpDirPath = path.join(targetDir, '.cursor'); - const mcpJsonPath = path.join(mcpDirPath, 'mcp.json'); + const mcpDirPath = path.join(targetDir, ".cursor"); + const mcpJsonPath = path.join(mcpDirPath, "mcp.json"); - log('info', 'Setting up MCP configuration for Cursor integration...'); + log("info", "Setting up MCP configuration for Cursor integration..."); - // Create .cursor directory if it doesn't exist - ensureDirectoryExists(mcpDirPath); + // Create .cursor directory if it doesn't exist + ensureDirectoryExists(mcpDirPath); - // New MCP config to be added - references the installed package - const newMCPServer = { - 'task-master-ai': { - command: 'npx', - args: ['-y', '--package=task-master-ai', 'task-master-ai'], - env: { - ANTHROPIC_API_KEY: 'ANTHROPIC_API_KEY_HERE', - PERPLEXITY_API_KEY: 'PERPLEXITY_API_KEY_HERE', - OPENAI_API_KEY: 'OPENAI_API_KEY_HERE', - GOOGLE_API_KEY: 'GOOGLE_API_KEY_HERE', - XAI_API_KEY: 'XAI_API_KEY_HERE', - OPENROUTER_API_KEY: 'OPENROUTER_API_KEY_HERE', - MISTRAL_API_KEY: 'MISTRAL_API_KEY_HERE', - AZURE_OPENAI_API_KEY: 'AZURE_OPENAI_API_KEY_HERE', - OLLAMA_API_KEY: 'OLLAMA_API_KEY_HERE' - } - } - }; + // New MCP config to be added - references the installed package + const newMCPServer = { + "task-master-ai": { + command: "npx", + args: ["-y", "--package=task-master-ai", "task-master-ai"], + env: { + ANTHROPIC_API_KEY: "ANTHROPIC_API_KEY_HERE", + PERPLEXITY_API_KEY: "PERPLEXITY_API_KEY_HERE", + OPENAI_API_KEY: "OPENAI_API_KEY_HERE", + GOOGLE_API_KEY: "GOOGLE_API_KEY_HERE", + XAI_API_KEY: "XAI_API_KEY_HERE", + OPENROUTER_API_KEY: "OPENROUTER_API_KEY_HERE", + MISTRAL_API_KEY: "MISTRAL_API_KEY_HERE", + AZURE_OPENAI_API_KEY: "AZURE_OPENAI_API_KEY_HERE", + OLLAMA_API_KEY: "OLLAMA_API_KEY_HERE", + }, + }, + }; - // Check if mcp.json already existsimage.png - if (fs.existsSync(mcpJsonPath)) { - log( - 'info', - 'MCP configuration file already exists, checking for existing task-master-mcp...' - ); - try { - // Read existing config - const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8')); + // Check if mcp.json already existsimage.png + if (fs.existsSync(mcpJsonPath)) { + log( + "info", + "MCP configuration file already exists, checking for existing task-master-mcp..." + ); + try { + // Read existing config + const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, "utf8")); - // Initialize mcpServers if it doesn't exist - if (!mcpConfig.mcpServers) { - mcpConfig.mcpServers = {}; - } + // Initialize mcpServers if it doesn't exist + if (!mcpConfig.mcpServers) { + mcpConfig.mcpServers = {}; + } - // Check if any existing server configuration already has task-master-mcp in its args - const hasMCPString = Object.values(mcpConfig.mcpServers).some( - (server) => - server.args && - server.args.some( - (arg) => typeof arg === 'string' && arg.includes('task-master-ai') - ) - ); + // Check if any existing server configuration already has task-master-mcp in its args + const hasMCPString = Object.values(mcpConfig.mcpServers).some( + (server) => + server.args && + server.args.some( + (arg) => typeof arg === "string" && arg.includes("task-master-ai") + ) + ); - if (hasMCPString) { - log( - 'info', - 'Found existing task-master-ai MCP configuration in mcp.json, leaving untouched' - ); - return; // Exit early, don't modify the existing configuration - } + if (hasMCPString) { + log( + "info", + "Found existing task-master-ai MCP configuration in mcp.json, leaving untouched" + ); + return; // Exit early, don't modify the existing configuration + } - // Add the task-master-ai server if it doesn't exist - if (!mcpConfig.mcpServers['task-master-ai']) { - mcpConfig.mcpServers['task-master-ai'] = newMCPServer['task-master-ai']; - log( - 'info', - 'Added task-master-ai server to existing MCP configuration' - ); - } else { - log('info', 'task-master-ai server already configured in mcp.json'); - } + // Add the task-master-ai server if it doesn't exist + if (!mcpConfig.mcpServers["task-master-ai"]) { + mcpConfig.mcpServers["task-master-ai"] = newMCPServer["task-master-ai"]; + log( + "info", + "Added task-master-ai server to existing MCP configuration" + ); + } else { + log("info", "task-master-ai server already configured in mcp.json"); + } - // Write the updated configuration - fs.writeFileSync(mcpJsonPath, JSON.stringify(mcpConfig, null, 4)); - log('success', 'Updated MCP configuration file'); - } catch (error) { - log('error', `Failed to update MCP configuration: ${error.message}`); - // Create a backup before potentially modifying - const backupPath = `${mcpJsonPath}.backup-${Date.now()}`; - if (fs.existsSync(mcpJsonPath)) { - fs.copyFileSync(mcpJsonPath, backupPath); - log('info', `Created backup of existing mcp.json at ${backupPath}`); - } + // Write the updated configuration + fs.writeFileSync(mcpJsonPath, JSON.stringify(mcpConfig, null, 4)); + log("success", "Updated MCP configuration file"); + } catch (error) { + log("error", `Failed to update MCP configuration: ${error.message}`); + // Create a backup before potentially modifying + const backupPath = `${mcpJsonPath}.backup-${Date.now()}`; + if (fs.existsSync(mcpJsonPath)) { + fs.copyFileSync(mcpJsonPath, backupPath); + log("info", `Created backup of existing mcp.json at ${backupPath}`); + } - // Create new configuration - const newMCPConfig = { - mcpServers: newMCPServer - }; + // Create new configuration + const newMCPConfig = { + mcpServers: newMCPServer, + }; - fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4)); - log( - 'warn', - 'Created new MCP configuration file (backup of original file was created if it existed)' - ); - } - } else { - // If mcp.json doesn't exist, create it - const newMCPConfig = { - mcpServers: newMCPServer - }; + fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4)); + log( + "warn", + "Created new MCP configuration file (backup of original file was created if it existed)" + ); + } + } else { + // If mcp.json doesn't exist, create it + const newMCPConfig = { + mcpServers: newMCPServer, + }; - fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4)); - log('success', 'Created MCP configuration file for Cursor integration'); - } + fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4)); + log("success", "Created MCP configuration file for Cursor integration"); + } - // Add note to console about MCP integration - log('info', 'MCP server will use the installed task-master-ai package'); + // Add note to console about MCP integration + log("info", "MCP server will use the installed task-master-ai package"); } // Ensure necessary functions are exported diff --git a/scripts/modules/telemetry-submission.js b/scripts/modules/telemetry-submission.js index d9b9084d..dd8d6691 100644 --- a/scripts/modules/telemetry-submission.js +++ b/scripts/modules/telemetry-submission.js @@ -22,8 +22,10 @@ const TelemetryDataSchema = z.object({ fullOutput: z.any().optional(), }); -// Configuration -const GATEWAY_ENDPOINT = "http://localhost:4444/api/v1/telemetry"; +// Hardcoded configuration for TaskMaster telemetry gateway +const TASKMASTER_TELEMETRY_ENDPOINT = "http://localhost:4444/api/v1/telemetry"; +const TASKMASTER_USER_REGISTRATION_ENDPOINT = + "http://localhost:4444/api/v1/users"; const MAX_RETRIES = 3; const RETRY_DELAY = 1000; // 1 second @@ -32,27 +34,82 @@ const RETRY_DELAY = 1000; // 1 second * @returns {Object} Configuration object with apiKey, userId, and email */ function getTelemetryConfig() { - // Try environment variables first (for testing) + // Try environment variables first (for testing and manual setup) const envApiKey = - process.env.GATEWAY_API_KEY || process.env.TELEMETRY_API_KEY; + process.env.TASKMASTER_API_KEY || + process.env.GATEWAY_API_KEY || + process.env.TELEMETRY_API_KEY; const envUserId = - process.env.GATEWAY_USER_ID || process.env.TELEMETRY_USER_ID; + process.env.TASKMASTER_USER_ID || + process.env.GATEWAY_USER_ID || + process.env.TELEMETRY_USER_ID; const envEmail = - process.env.GATEWAY_USER_EMAIL || process.env.TELEMETRY_USER_EMAIL; + process.env.TASKMASTER_USER_EMAIL || + process.env.GATEWAY_USER_EMAIL || + process.env.TELEMETRY_USER_EMAIL; if (envApiKey && envUserId && envEmail) { return { apiKey: envApiKey, userId: envUserId, email: envEmail }; } - // Fall back to config file + // Fall back to config file (preferred for hosted gateway setup) const config = getConfig(); return { - apiKey: config?.telemetryApiKey, - userId: config?.telemetryUserId, - email: config?.telemetryUserEmail, + apiKey: config?.telemetry?.apiKey || config?.telemetryApiKey, + userId: + config?.telemetry?.userId || + config?.telemetryUserId || + config?.global?.userId, + email: config?.telemetry?.email || config?.telemetryUserEmail, }; } +/** + * Register or find user with TaskMaster telemetry gateway + * @param {string} email - User's email address + * @param {string} [userId] - Optional user ID (will be generated if not provided) + * @returns {Promise} - User registration result with apiKey and userId + */ +export async function registerUserWithGateway(email, userId = null) { + try { + const registrationData = { + email, + ...(userId && { userId }), // Include userId only if provided + }; + + const response = await fetch(TASKMASTER_USER_REGISTRATION_ENDPOINT, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(registrationData), + }); + + if (response.ok) { + const result = await response.json(); + return { + success: true, + apiKey: result.apiKey, + userId: result.userId, + email: result.email, + isNewUser: result.isNewUser || false, + }; + } else { + const errorData = await response.json().catch(() => ({})); + return { + success: false, + error: `Registration failed: ${response.status} ${response.statusText}`, + details: errorData, + }; + } + } catch (error) { + return { + success: false, + error: `Registration request failed: ${error.message}`, + }; + } +} + /** * Submits telemetry data to the remote gateway endpoint * @param {Object} telemetryData - The telemetry data to submit @@ -80,7 +137,7 @@ export async function submitTelemetryData(telemetryData) { return { success: false, error: - "Telemetry configuration incomplete. Set GATEWAY_API_KEY, GATEWAY_USER_ID, and GATEWAY_USER_EMAIL environment variables or configure in .taskmasterconfig", + "Telemetry configuration incomplete. Run 'task-master init' and select hosted gateway option, or manually set TASKMASTER_API_KEY, TASKMASTER_USER_ID, and TASKMASTER_USER_EMAIL environment variables", }; } @@ -102,7 +159,7 @@ export async function submitTelemetryData(telemetryData) { let lastError; for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) { try { - const response = await fetch(GATEWAY_ENDPOINT, { + const response = await fetch(TASKMASTER_TELEMETRY_ENDPOINT, { method: "POST", headers: { "Content-Type": "application/json", diff --git a/tasks/task_090.txt b/tasks/task_090.txt index f3393873..1d22a622 100644 --- a/tasks/task_090.txt +++ b/tasks/task_090.txt @@ -152,6 +152,10 @@ Implementation ready for integration into ai-services-unified.js in subtask 90.3 Integration Testing Complete - Live Gateway Verification: Successfully tested telemetry submission against live gateway at localhost:4444/api/v1/telemetry. Confirmed proper authentication using Bearer token and X-User-Email headers (not X-API-Key as initially assumed). Security filtering verified working correctly - sensitive data like commandArgs, fullOutput, apiKey, and internalDebugData properly removed before submission. Gateway responded with success confirmation and assigned telemetry ID. Service handles missing GATEWAY_USER_EMAIL environment variable gracefully. All functionality validated end-to-end including retry logic, error handling, and data validation. Module ready for integration into ai-services-unified.js. + +Implementation Complete - Gateway Integration Finalized: +Hardcoded gateway endpoint to http://localhost:4444/api/v1/telemetry with config-based credential handling replacing environment variables. Added registerUserWithGateway() function for automatic user registration/lookup during project initialization. Enhanced init.js with hosted gateway setup option and configureTelemetrySettings() function to store user credentials in .taskmasterconfig under telemetry section. Updated all 10 tests to reflect new architecture - all passing. Security features maintained: sensitive data filtering, Bearer token authentication with email header, graceful error handling, retry logic, and user opt-out support. Module fully integrated and ready for ai-services-unified.js integration in subtask 90.3. + ## 3. Implement DAU and active user tracking [pending] ### Dependencies: None diff --git a/tasks/tasks.json b/tasks/tasks.json index 5926f0e7..9d3b8ff6 100644 --- a/tasks/tasks.json +++ b/tasks/tasks.json @@ -6073,7 +6073,7 @@ "id": 2, "title": "Send telemetry data to remote database endpoint", "description": "Implement POST requests to gateway.task-master.dev/telemetry endpoint to send all telemetry data including new fields (args, output) for analysis and future AI model training", - "details": "Create a telemetry submission service that POSTs to gateway.task-master.dev/telemetry. Include all existing telemetry fields plus commandArgs and fullOutput. Implement retry logic and handle failures gracefully without blocking command execution. Respect user opt-out preferences.\n\nTDD Progress - Red Phase Complete:\n- Created test file: tests/unit/scripts/modules/telemetry-submission.test.js\n- Written 6 failing tests for telemetry submission functionality:\n 1. Successfully submit telemetry data to gateway endpoint\n 2. Implement retry logic for failed requests\n 3. Handle failures gracefully without blocking execution\n 4. Respect user opt-out preferences\n 5. Validate telemetry data before submission\n 6. Handle HTTP error responses appropriately\n- All tests failing as expected (module doesn't exist yet)\n- Ready to implement minimum code to make tests pass\n\nNext: Create scripts/modules/telemetry-submission.js with submitTelemetryData function\n\n\nTDD Green Phase Complete:\n- Implemented scripts/modules/telemetry-submission.js with submitTelemetryData function\n- All 6 tests now passing with full functionality implemented\n- Security measures in place: commandArgs and fullOutput filtered out before remote submission\n- Reliability features: exponential backoff retry logic (3 attempts max), graceful error handling\n- Gateway integration: configured for https://gateway.task-master.dev/telemetry endpoint\n- Zod schema validation ensures data integrity before submission\n- User privacy protected through telemetryEnabled config option\n- Smart retry logic avoids retries for 429/401/403 status codes\n- Service never throws errors and always returns result object to prevent blocking command execution\n\nImplementation ready for integration into ai-services-unified.js in subtask 90.3\n\n\nIntegration Testing Complete - Live Gateway Verification:\nSuccessfully tested telemetry submission against live gateway at localhost:4444/api/v1/telemetry. Confirmed proper authentication using Bearer token and X-User-Email headers (not X-API-Key as initially assumed). Security filtering verified working correctly - sensitive data like commandArgs, fullOutput, apiKey, and internalDebugData properly removed before submission. Gateway responded with success confirmation and assigned telemetry ID. Service handles missing GATEWAY_USER_EMAIL environment variable gracefully. All functionality validated end-to-end including retry logic, error handling, and data validation. Module ready for integration into ai-services-unified.js.\n", + "details": "Create a telemetry submission service that POSTs to gateway.task-master.dev/telemetry. Include all existing telemetry fields plus commandArgs and fullOutput. Implement retry logic and handle failures gracefully without blocking command execution. Respect user opt-out preferences.\n\nTDD Progress - Red Phase Complete:\n- Created test file: tests/unit/scripts/modules/telemetry-submission.test.js\n- Written 6 failing tests for telemetry submission functionality:\n 1. Successfully submit telemetry data to gateway endpoint\n 2. Implement retry logic for failed requests\n 3. Handle failures gracefully without blocking execution\n 4. Respect user opt-out preferences\n 5. Validate telemetry data before submission\n 6. Handle HTTP error responses appropriately\n- All tests failing as expected (module doesn't exist yet)\n- Ready to implement minimum code to make tests pass\n\nNext: Create scripts/modules/telemetry-submission.js with submitTelemetryData function\n\n\nTDD Green Phase Complete:\n- Implemented scripts/modules/telemetry-submission.js with submitTelemetryData function\n- All 6 tests now passing with full functionality implemented\n- Security measures in place: commandArgs and fullOutput filtered out before remote submission\n- Reliability features: exponential backoff retry logic (3 attempts max), graceful error handling\n- Gateway integration: configured for https://gateway.task-master.dev/telemetry endpoint\n- Zod schema validation ensures data integrity before submission\n- User privacy protected through telemetryEnabled config option\n- Smart retry logic avoids retries for 429/401/403 status codes\n- Service never throws errors and always returns result object to prevent blocking command execution\n\nImplementation ready for integration into ai-services-unified.js in subtask 90.3\n\n\nIntegration Testing Complete - Live Gateway Verification:\nSuccessfully tested telemetry submission against live gateway at localhost:4444/api/v1/telemetry. Confirmed proper authentication using Bearer token and X-User-Email headers (not X-API-Key as initially assumed). Security filtering verified working correctly - sensitive data like commandArgs, fullOutput, apiKey, and internalDebugData properly removed before submission. Gateway responded with success confirmation and assigned telemetry ID. Service handles missing GATEWAY_USER_EMAIL environment variable gracefully. All functionality validated end-to-end including retry logic, error handling, and data validation. Module ready for integration into ai-services-unified.js.\n\n\nImplementation Complete - Gateway Integration Finalized:\nHardcoded gateway endpoint to http://localhost:4444/api/v1/telemetry with config-based credential handling replacing environment variables. Added registerUserWithGateway() function for automatic user registration/lookup during project initialization. Enhanced init.js with hosted gateway setup option and configureTelemetrySettings() function to store user credentials in .taskmasterconfig under telemetry section. Updated all 10 tests to reflect new architecture - all passing. Security features maintained: sensitive data filtering, Bearer token authentication with email header, graceful error handling, retry logic, and user opt-out support. Module fully integrated and ready for ai-services-unified.js integration in subtask 90.3.\n", "status": "done", "dependencies": [], "parentTaskId": 90 diff --git a/tests/unit/scripts/modules/telemetry-submission.test.js b/tests/unit/scripts/modules/telemetry-submission.test.js index 65197cfc..a31d4d01 100644 --- a/tests/unit/scripts/modules/telemetry-submission.test.js +++ b/tests/unit/scripts/modules/telemetry-submission.test.js @@ -1,213 +1,306 @@ /** - * Tests for telemetry submission service (Task 90.2) - * Testing remote endpoint submission with retry logic and error handling + * Unit Tests for Telemetry Submission Service - Task 90.2 + * Tests the secure telemetry submission with gateway integration */ import { jest } from "@jest/globals"; -import { z } from "zod"; -// Mock fetch for testing HTTP requests -global.fetch = jest.fn(); - -// Mock config-manager -const mockGetConfig = jest.fn(); +// Mock config-manager before importing submitTelemetryData jest.unstable_mockModule( "../../../../scripts/modules/config-manager.js", () => ({ - __esModule: true, - getConfig: mockGetConfig, + getConfig: jest.fn(), }) ); +// Mock fetch globally +global.fetch = jest.fn(); + +// Import after mocking +const { submitTelemetryData, registerUserWithGateway } = await import( + "../../../../scripts/modules/telemetry-submission.js" +); +const { getConfig } = await import( + "../../../../scripts/modules/config-manager.js" +); + describe("Telemetry Submission Service - Task 90.2", () => { - let telemetrySubmission; - - beforeAll(async () => { - // Import after mocking - telemetrySubmission = await import( - "../../../../scripts/modules/telemetry-submission.js" - ); - }); - beforeEach(() => { jest.clearAllMocks(); - // Reset fetch mock - fetch.mockClear(); - mockGetConfig.mockClear(); - - // Default config mock - telemetry enabled - mockGetConfig.mockReturnValue({ telemetryEnabled: true }); + global.fetch.mockClear(); }); describe("Subtask 90.2: Send telemetry data to remote database endpoint", () => { - it("should successfully submit telemetry data to gateway endpoint", async () => { + it("should successfully submit telemetry data to hardcoded gateway endpoint", async () => { + // Mock successful config + getConfig.mockReturnValue({ + telemetry: { + apiKey: "test-api-key", + userId: "test-user-id", + email: "test@example.com", + }, + }); + // Mock successful response - fetch.mockResolvedValueOnce({ + global.fetch.mockResolvedValueOnce({ ok: true, - status: 200, - json: async () => ({ success: true, id: "telemetry-123" }), + json: async () => ({ id: "telemetry-123" }), }); const telemetryData = { - timestamp: "2025-05-28T15:00:00.000Z", - userId: "1234567890", - commandName: "add-task", + timestamp: new Date().toISOString(), + userId: "test-user-id", + commandName: "test-command", modelUsed: "claude-3-sonnet", - providerName: "anthropic", - inputTokens: 100, - outputTokens: 50, - totalTokens: 150, totalCost: 0.001, currency: "USD", - // These sensitive fields should be filtered out before submission - commandArgs: { id: "15", prompt: "Test task" }, - fullOutput: { title: "Generated Task", description: "AI output" }, + commandArgs: { secret: "should-be-filtered" }, + fullOutput: { debug: "should-be-filtered" }, }; - // Expected data after filtering (without commandArgs and fullOutput) - const expectedFilteredData = { - timestamp: "2025-05-28T15:00:00.000Z", - userId: "1234567890", - commandName: "add-task", - modelUsed: "claude-3-sonnet", - providerName: "anthropic", - inputTokens: 100, - outputTokens: 50, - totalTokens: 150, - totalCost: 0.001, - currency: "USD", - }; - - const result = - await telemetrySubmission.submitTelemetryData(telemetryData); + const result = await submitTelemetryData(telemetryData); expect(result.success).toBe(true); expect(result.id).toBe("telemetry-123"); - - // Verify the request was made with filtered data (security requirement) - expect(fetch).toHaveBeenCalledWith( - "https://gateway.task-master.dev/telemetry", + expect(global.fetch).toHaveBeenCalledWith( + "http://localhost:4444/api/v1/telemetry", // Hardcoded endpoint expect.objectContaining({ method: "POST", headers: { "Content-Type": "application/json", + Authorization: "Bearer test-api-key", + "X-User-Email": "test@example.com", }, - body: JSON.stringify(expectedFilteredData), + body: expect.stringContaining('"commandName":"test-command"'), + }) + ); + + // Verify sensitive data is filtered out + const sentData = JSON.parse(global.fetch.mock.calls[0][1].body); + expect(sentData.commandArgs).toBeUndefined(); + expect(sentData.fullOutput).toBeUndefined(); + }); + + it("should implement retry logic for failed requests", async () => { + getConfig.mockReturnValue({ + telemetry: { + apiKey: "test-api-key", + userId: "test-user-id", + email: "test@example.com", + }, + }); + + // Mock 3 failures then success + global.fetch + .mockRejectedValueOnce(new Error("Network error")) + .mockRejectedValueOnce(new Error("Network error")) + .mockRejectedValueOnce(new Error("Network error")) + .mockResolvedValueOnce({ + ok: false, + status: 500, + statusText: "Internal Server Error", + json: async () => ({}), + }); + + const telemetryData = { + timestamp: new Date().toISOString(), + userId: "test-user-id", + commandName: "test-command", + totalCost: 0.001, + currency: "USD", + }; + + const result = await submitTelemetryData(telemetryData); + + expect(result.success).toBe(false); + expect(result.attempts).toBe(3); + expect(global.fetch).toHaveBeenCalledTimes(3); + }, 10000); + + it("should handle failures gracefully without blocking execution", async () => { + getConfig.mockReturnValue({ + telemetry: { + apiKey: "test-api-key", + userId: "test-user-id", + email: "test@example.com", + }, + }); + + global.fetch.mockRejectedValue(new Error("Network failure")); + + const telemetryData = { + timestamp: new Date().toISOString(), + userId: "test-user-id", + commandName: "test-command", + totalCost: 0.001, + currency: "USD", + }; + + const result = await submitTelemetryData(telemetryData); + + expect(result.success).toBe(false); + expect(result.error).toContain("Network failure"); + expect(global.fetch).toHaveBeenCalledTimes(3); // All retries attempted + }, 10000); + + it("should respect user opt-out preferences", async () => { + getConfig.mockReturnValue({ + telemetryEnabled: false, + }); + + const telemetryData = { + timestamp: new Date().toISOString(), + userId: "test-user-id", + commandName: "test-command", + totalCost: 0.001, + currency: "USD", + }; + + const result = await submitTelemetryData(telemetryData); + + expect(result.success).toBe(true); + expect(result.skipped).toBe(true); + expect(result.reason).toBe("Telemetry disabled by user preference"); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it("should validate telemetry data before submission", async () => { + getConfig.mockReturnValue({ + telemetry: { + apiKey: "test-api-key", + userId: "test-user-id", + email: "test@example.com", + }, + }); + + const invalidTelemetryData = { + // Missing required fields + commandName: "test-command", + }; + + const result = await submitTelemetryData(invalidTelemetryData); + + expect(result.success).toBe(false); + expect(result.error).toContain("Telemetry data validation failed"); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it("should handle HTTP error responses appropriately", async () => { + getConfig.mockReturnValue({ + telemetry: { + apiKey: "invalid-key", + userId: "test-user-id", + email: "test@example.com", + }, + }); + + global.fetch.mockResolvedValueOnce({ + ok: false, + status: 401, + statusText: "Unauthorized", + json: async () => ({ error: "Invalid API key" }), + }); + + const telemetryData = { + timestamp: new Date().toISOString(), + userId: "test-user-id", + commandName: "test-command", + totalCost: 0.001, + currency: "USD", + }; + + const result = await submitTelemetryData(telemetryData); + + expect(result.success).toBe(false); + expect(result.statusCode).toBe(401); + expect(global.fetch).toHaveBeenCalledTimes(1); // No retries for auth errors + }); + }); + + describe("User Registration with Gateway", () => { + it("should successfully register new user with gateway", async () => { + global.fetch.mockResolvedValueOnce({ + ok: true, + json: async () => ({ + apiKey: "new-api-key-123", + userId: "new-user-id-456", + email: "newuser@example.com", + isNewUser: true, + }), + }); + + const result = await registerUserWithGateway("newuser@example.com"); + + expect(result.success).toBe(true); + expect(result.apiKey).toBe("new-api-key-123"); + expect(result.userId).toBe("new-user-id-456"); + expect(result.email).toBe("newuser@example.com"); + expect(result.isNewUser).toBe(true); + + expect(global.fetch).toHaveBeenCalledWith( + "http://localhost:4444/api/v1/users", + expect.objectContaining({ + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ email: "newuser@example.com" }), }) ); }); - it("should implement retry logic for failed requests", async () => { - // Mock first two calls to fail, third to succeed - fetch - .mockRejectedValueOnce(new Error("Network error")) - .mockRejectedValueOnce(new Error("Network error")) - .mockResolvedValueOnce({ - ok: true, - status: 200, - json: async () => ({ success: true, id: "telemetry-retry-123" }), - }); - - const telemetryData = { - timestamp: "2025-05-28T15:00:00.000Z", - userId: "1234567890", - commandName: "expand-task", - modelUsed: "claude-3-sonnet", - totalCost: 0.002, - }; - - const result = - await telemetrySubmission.submitTelemetryData(telemetryData); - - // Verify retry attempts (should be called 3 times) - expect(fetch).toHaveBeenCalledTimes(3); - expect(result.success).toBe(true); - expect(result.id).toBe("telemetry-retry-123"); - }); - - it("should handle failures gracefully without blocking execution", async () => { - // Mock all attempts to fail - fetch.mockRejectedValue(new Error("Persistent network error")); - - const telemetryData = { - timestamp: "2025-05-28T15:00:00.000Z", - userId: "1234567890", - commandName: "research", - modelUsed: "claude-3-sonnet", - totalCost: 0.003, - }; - - const result = - await telemetrySubmission.submitTelemetryData(telemetryData); - - // Verify it attempted retries but failed gracefully - expect(fetch).toHaveBeenCalledTimes(3); // Initial + 2 retries - expect(result.success).toBe(false); - expect(result.error).toContain("Persistent network error"); - }); - - it("should respect user opt-out preferences", async () => { - // Mock config to disable telemetry - mockGetConfig.mockReturnValue({ telemetryEnabled: false }); - - const telemetryData = { - timestamp: "2025-05-28T15:00:00.000Z", - userId: "1234567890", - commandName: "add-task", - totalCost: 0.001, - }; - - const result = - await telemetrySubmission.submitTelemetryData(telemetryData); - - // Verify no network request was made - expect(fetch).not.toHaveBeenCalled(); - expect(result.success).toBe(true); - expect(result.skipped).toBe(true); - expect(result.reason).toBe("Telemetry disabled by user preference"); - }); - - it("should validate telemetry data before submission", async () => { - const invalidTelemetryData = { - // Missing required fields - commandName: "test", - // Invalid timestamp format - timestamp: "invalid-date", - }; - - const result = - await telemetrySubmission.submitTelemetryData(invalidTelemetryData); - - // Verify no network request was made for invalid data - expect(fetch).not.toHaveBeenCalled(); - expect(result.success).toBe(false); - expect(result.error).toContain("validation"); - }); - - it("should handle HTTP error responses appropriately", async () => { - // Mock HTTP 429 error response (no retries for rate limiting) - fetch.mockResolvedValueOnce({ - ok: false, - status: 429, - statusText: "Too Many Requests", - json: async () => ({ error: "Rate limit exceeded" }), + it("should find existing user with provided userId", async () => { + global.fetch.mockResolvedValueOnce({ + ok: true, + json: async () => ({ + apiKey: "existing-api-key", + userId: "existing-user-id", + email: "existing@example.com", + isNewUser: false, + }), }); - const telemetryData = { - timestamp: "2025-05-28T15:00:00.000Z", - userId: "1234567890", - commandName: "update-task", - modelUsed: "claude-3-sonnet", - totalCost: 0.001, - }; + const result = await registerUserWithGateway( + "existing@example.com", + "existing-user-id" + ); - const result = - await telemetrySubmission.submitTelemetryData(telemetryData); + expect(result.success).toBe(true); + expect(result.isNewUser).toBe(false); + + expect(global.fetch).toHaveBeenCalledWith( + "http://localhost:4444/api/v1/users", + expect.objectContaining({ + body: JSON.stringify({ + email: "existing@example.com", + userId: "existing-user-id", + }), + }) + ); + }); + + it("should handle registration failures gracefully", async () => { + global.fetch.mockResolvedValueOnce({ + ok: false, + status: 400, + statusText: "Bad Request", + json: async () => ({ error: "Invalid email format" }), + }); + + const result = await registerUserWithGateway("invalid-email"); expect(result.success).toBe(false); - expect(result.error).toContain("429"); - expect(result.error).toContain("Too Many Requests"); - expect(fetch).toHaveBeenCalledTimes(1); // No retries for 429 + expect(result.error).toContain("Registration failed: 400 Bad Request"); + expect(result.details).toEqual({ error: "Invalid email format" }); + }); + + it("should handle network errors during registration", async () => { + global.fetch.mockRejectedValueOnce(new Error("Connection refused")); + + const result = await registerUserWithGateway("test@example.com"); + + expect(result.success).toBe(false); + expect(result.error).toContain( + "Registration request failed: Connection refused" + ); }); }); });