From 6d4471fcb516e8b0e0642c47c8c99739b250bb63 Mon Sep 17 00:00:00 2001 From: Eyal Toledano Date: Mon, 28 Apr 2025 04:08:10 -0400 Subject: [PATCH] refactor(init): Improve robustness and dependencies; Update template deps for AI SDKs; Silence npm install in MCP; Improve conditional model setup logic; Refactor init.js flags; Tweak Getting Started text; Fix MCP server launch command; Update default model in config template --- .changeset/fine-signs-add.md | 13 ++ .cursor/mcp.json | 2 +- .gitignore | 2 + .taskmasterconfig | 6 +- scripts/init.js | 156 ++++++++++---- tests/e2e/run_e2e.sh | 389 ++++++++++++++++++++++++++++++++++ tests/fixtures/sample-prd.txt | 110 +++++++--- 7 files changed, 601 insertions(+), 77 deletions(-) create mode 100644 .changeset/fine-signs-add.md create mode 100755 tests/e2e/run_e2e.sh diff --git a/.changeset/fine-signs-add.md b/.changeset/fine-signs-add.md new file mode 100644 index 00000000..fddbf217 --- /dev/null +++ b/.changeset/fine-signs-add.md @@ -0,0 +1,13 @@ +--- +'task-master-ai': patch +--- + +Improve and adjust `init` command for robustness and updated dependencies. + +- **Update Initialization Dependencies:** Ensure newly initialized projects (`task-master init`) include all required AI SDK dependencies (`@ai-sdk/*`, `ai`, provider wrappers) in their `package.json` for out-of-the-box AI feature compatibility. Remove unnecessary dependencies (e.g., `uuid`) from the init template. +- **Silence `npm install` during `init`:** Prevent `npm install` output from interfering with non-interactive/MCP initialization by suppressing its stdio in silent mode. +- **Improve Conditional Model Setup:** Reliably skip interactive `models --setup` during non-interactive `init` runs (e.g., `init -y` or MCP) by checking `isSilentMode()` instead of passing flags. +- **Refactor `init.js`:** Remove internal `isInteractive` flag logic. +- **Update `init` Instructions:** Tweak the "Getting Started" text displayed after `init`. +- **Fix MCP Server Launch:** Update `.cursor/mcp.json` template to use `node ./mcp-server/server.js` instead of `npx task-master-mcp`. +- **Update Default Model:** Change the default main model in the `.taskmasterconfig` template. diff --git a/.cursor/mcp.json b/.cursor/mcp.json index 3ac55286..1566b0ca 100644 --- a/.cursor/mcp.json +++ b/.cursor/mcp.json @@ -1,6 +1,6 @@ { "mcpServers": { - "taskmaster-ai": { + "task-master-ai": { "command": "node", "args": ["./mcp-server/server.js"], "env": { diff --git a/.gitignore b/.gitignore index dd1161de..4e9ba351 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,8 @@ npm-debug.log* yarn-debug.log* yarn-error.log* lerna-debug.log* +tests/e2e/_runs/ +tests/e2e/log/ # Coverage directory used by tools like istanbul coverage diff --git a/.taskmasterconfig b/.taskmasterconfig index cacd529e..ccb7704c 100644 --- a/.taskmasterconfig +++ b/.taskmasterconfig @@ -1,8 +1,8 @@ { "models": { "main": { - "provider": "openrouter", - "modelId": "google/gemini-2.5-pro-exp-03-25", + "provider": "anthropic", + "modelId": "claude-3-7-sonnet-20250219", "maxTokens": 100000, "temperature": 0.2 }, @@ -14,7 +14,7 @@ }, "fallback": { "provider": "anthropic", - "modelId": "claude-3-7-sonnet-20250219", + "modelId": "claude-3-5-sonnet-20241022", "maxTokens": 120000, "temperature": 0.2 } diff --git a/scripts/init.js b/scripts/init.js index f44a4863..3f5b4e55 100755 --- a/scripts/init.js +++ b/scripts/init.js @@ -367,10 +367,7 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) { } // For other files, warn and prompt before overwriting - log( - 'warn', - `${targetPath} already exists. Skipping file creation to avoid overwriting existing content.` - ); + log('warn', `${targetPath} already exists, skipping.`); return; } @@ -379,7 +376,7 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) { log('info', `Created file: ${targetPath}`); } -// Main function to initialize a new project (Now relies solely on passed options) +// Main function to initialize a new project (No longer needs isInteractive logic) async function initializeProject(options = {}) { // Receives options as argument // Only display banner if not in silent mode @@ -396,8 +393,8 @@ async function initializeProject(options = {}) { console.log('=================================================='); } - // Determine if we should skip prompts based on the passed options const skipPrompts = options.yes || (options.name && options.description); + if (!isSilentMode()) { console.log('Skip prompts determined:', skipPrompts); } @@ -411,8 +408,8 @@ async function initializeProject(options = {}) { const projectName = options.name || 'task-master-project'; const projectDescription = options.description || 'A project managed with Task Master AI'; - const projectVersion = options.version || '0.1.0'; // Default from commands.js or here - const authorName = options.author || 'Vibe coder'; // Default if not provided + const projectVersion = options.version || '0.1.0'; + const authorName = options.author || 'Vibe coder'; const dryRun = options.dryRun || false; const skipInstall = options.skipInstall || false; const addAliases = options.aliases || false; @@ -441,17 +438,18 @@ async function initializeProject(options = {}) { }; } - // Create structure using determined values + // Call createProjectStructure (no need for isInteractive flag) createProjectStructure( projectName, projectDescription, projectVersion, authorName, skipInstall, - addAliases + addAliases, + dryRun // Pass dryRun ); } else { - // Prompting logic (only runs if skipPrompts is false) + // Interactive logic log('info', 'Required options not provided, proceeding with prompts.'); const rl = readline.createInterface({ input: process.stdin, @@ -471,7 +469,7 @@ async function initializeProject(options = {}) { const projectVersionInput = await promptQuestion( rl, chalk.cyan('Enter project version (default: 1.0.0): ') - ); // Use a default for prompt + ); const authorName = await promptQuestion( rl, chalk.cyan('Enter your name: ') @@ -510,11 +508,10 @@ async function initializeProject(options = {}) { if (!shouldContinue) { log('info', 'Project initialization cancelled by user'); - process.exit(0); // Exit if cancelled - return; // Added return for clarity + process.exit(0); + return; } - // Still respect dryRun/skipInstall if passed initially even when prompting const dryRun = options.dryRun || false; const skipInstall = options.skipInstall || false; @@ -542,19 +539,20 @@ async function initializeProject(options = {}) { }; } - // Create structure using prompted values, respecting initial options where relevant + // Call createProjectStructure (no need for isInteractive flag) createProjectStructure( projectName, projectDescription, projectVersion, authorName, - skipInstall, // Use value from initial options - addAliasesPrompted // Use value from prompt + skipInstall, + addAliasesPrompted, + dryRun // Pass dryRun ); } catch (error) { rl.close(); - log('error', `Error during prompting: ${error.message}`); // Use log function - process.exit(1); // Exit on error during prompts + log('error', `Error during initialization process: ${error.message}`); + process.exit(1); } } } @@ -575,7 +573,8 @@ function createProjectStructure( projectVersion, authorName, skipInstall, - addAliases + addAliases, + dryRun ) { const targetDir = process.cwd(); log('info', `Initializing project in ${targetDir}`); @@ -599,7 +598,16 @@ function createProjectStructure( 'parse-prd': 'node scripts/dev.js parse-prd' }, dependencies: { - '@anthropic-ai/sdk': '^0.39.0', + '@ai-sdk/anthropic': '^1.2.10', + '@ai-sdk/azure': '^1.3.17', + '@ai-sdk/google': '^1.2.13', + '@ai-sdk/mistral': '^1.2.7', + '@ai-sdk/openai': '^1.3.20', + '@ai-sdk/perplexity': '^1.1.7', + '@ai-sdk/xai': '^1.2.15', + '@openrouter/ai-sdk-provider': '^0.4.5', + 'ollama-ai-provider': '^1.2.0', + ai: '^4.3.10', boxen: '^8.0.1', chalk: '^4.1.2', commander: '^11.1.0', @@ -673,7 +681,7 @@ function createProjectStructure( fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2)); log( 'warn', - 'Created new package.json (backup of original file was created)' + 'Created new package.json (backup of original file was created if it existed)' ); } } else { @@ -774,7 +782,18 @@ function createProjectStructure( } // Run npm install automatically - if (!isSilentMode()) { + const npmInstallOptions = { + cwd: targetDir, + // Default to inherit for interactive CLI, change if silent + stdio: 'inherit' + }; + + if (isSilentMode()) { + // If silent (MCP mode), suppress npm install output + npmInstallOptions.stdio = 'ignore'; + log('info', 'Running npm install silently...'); // Log our own message + } else { + // Interactive mode, show the boxen message console.log( boxen(chalk.cyan('Installing dependencies...'), { padding: 0.5, @@ -787,16 +806,57 @@ function createProjectStructure( try { if (!skipInstall) { - execSync('npm install', { stdio: 'inherit', cwd: targetDir }); + // Use the determined options + execSync('npm install', npmInstallOptions); log('success', 'Dependencies installed successfully!'); } else { log('info', 'Dependencies installation skipped'); } } catch (error) { log('error', 'Failed to install dependencies:', error.message); - log('error', 'Please run npm install manually'); + // Add more detail if silent, as the user won't see npm's error directly + if (isSilentMode()) { + log('error', 'Check npm logs or run "npm install" manually for details.'); + } else { + log('error', 'Please run npm install manually'); + } } + // === Add Model Configuration Step === + if (!isSilentMode() && !dryRun) { + console.log( + boxen(chalk.cyan('Configuring AI Models...'), { + padding: 0.5, + margin: { top: 1, bottom: 0.5 }, + borderStyle: 'round', + borderColor: 'blue' + }) + ); + log( + 'info', + 'Running interactive model setup. Please select your preferred AI models.' + ); + try { + execSync('npx task-master models --setup', { + stdio: 'inherit', + cwd: targetDir + }); + log('success', 'AI Models configured.'); + } catch (error) { + log('error', 'Failed to configure AI models:', error.message); + log('warn', 'You may need to run "task-master models --setup" manually.'); + } + } else if (isSilentMode() && !dryRun) { + log('info', 'Skipping interactive model setup in silent (MCP) mode.'); + log( + 'warn', + 'Please configure AI models using "task-master models --set-..." or the "models" MCP tool.' + ); + } else if (dryRun) { + log('info', 'DRY RUN: Skipping interactive model setup.'); + } + // ==================================== + // Display success message if (!isSilentMode()) { console.log( @@ -825,43 +885,59 @@ function createProjectStructure( if (!isSilentMode()) { console.log( boxen( - chalk.cyan.bold('Things you can now do:') + + chalk.cyan.bold('Things you should do next:') + '\n\n' + chalk.white('1. ') + chalk.yellow( - 'Rename .env.example to .env and add your ANTHROPIC_API_KEY and PERPLEXITY_API_KEY' + 'Configure AI models (if needed) and add API keys to `.env`' + ) + + '\n' + + chalk.white(' ├─ ') + + chalk.dim('Models: Use `task-master models` commands') + + '\n' + + chalk.white(' └─ ') + + chalk.dim( + 'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)' ) + '\n' + chalk.white('2. ') + chalk.yellow( - 'Discuss your idea with AI, and once ready ask for a PRD using the example_prd.txt file, and save what you get to scripts/PRD.txt' + 'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt' ) + '\n' + chalk.white('3. ') + chalk.yellow( - 'Ask Cursor Agent to parse your PRD.txt and generate tasks' + 'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:' ) + '\n' + chalk.white(' └─ ') + - chalk.dim('You can also run ') + - chalk.cyan('task-master parse-prd ') + + chalk.dim('MCP Tool: ') + + chalk.cyan('parse_prd') + + chalk.dim(' | CLI: ') + + chalk.cyan('task-master parse-prd scripts/prd.txt') + '\n' + chalk.white('4. ') + - chalk.yellow('Ask Cursor to analyze the complexity of your tasks') + + chalk.yellow( + 'Ask Cursor to analyze the complexity of the tasks in your PRD using research' + ) + + '\n' + + chalk.white(' └─ ') + + chalk.dim('MCP Tool: ') + + chalk.cyan('analyze_project_complexity') + + chalk.dim(' | CLI: ') + + chalk.cyan('task-master analyze-complexity') + '\n' + chalk.white('5. ') + chalk.yellow( - 'Ask Cursor which task is next to determine where to start' + 'Ask Cursor to expand all of your tasks using the complexity analysis' ) + '\n' + chalk.white('6. ') + - chalk.yellow( - 'Ask Cursor to expand any complex tasks that are too large or complex.' - ) + + chalk.yellow('Ask Cursor to begin working on the next task') + '\n' + chalk.white('7. ') + chalk.yellow( - 'Ask Cursor to set the status of a task, or multiple tasks. Use the task id from the task lists.' + 'Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists.' ) + '\n' + chalk.white('8. ') + @@ -874,6 +950,10 @@ function createProjectStructure( '\n\n' + chalk.dim( '* Review the README.md file to learn how to use other commands via Cursor Agent.' + ) + + '\n' + + chalk.dim( + '* Use the task-master command without arguments to see all available commands.' ), { padding: 1, diff --git a/tests/e2e/run_e2e.sh b/tests/e2e/run_e2e.sh new file mode 100755 index 00000000..5e56d5ad --- /dev/null +++ b/tests/e2e/run_e2e.sh @@ -0,0 +1,389 @@ +#!/bin/bash + +# Exit immediately if a command exits with a non-zero status. +set -e +# Treat unset variables as an error when substituting. +set -u +# Prevent errors in pipelines from being masked. +set -o pipefail + +# --- Configuration --- +# Assumes script is run from the project root (claude-task-master) +TASKMASTER_SOURCE_DIR="." # Current directory is the source +# Base directory for test runs, relative to project root +BASE_TEST_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs" +# Log directory, relative to project root +LOG_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/log" +# Path to the sample PRD, relative to project root +SAMPLE_PRD_SOURCE="$TASKMASTER_SOURCE_DIR/tests/fixtures/sample-prd.txt" +# Path to the main .env file in the source directory +MAIN_ENV_FILE="$TASKMASTER_SOURCE_DIR/.env" +# --- + +# --- Test State Variables --- +# Note: These are mainly for step numbering within the log now, not for final summary +test_step_count=0 +start_time_for_helpers=0 # Separate start time for helper functions inside the pipe +# --- + +# --- Log File Setup --- +# Create the log directory if it doesn't exist +mkdir -p "$LOG_DIR" +# Define timestamped log file path +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") +LOG_FILE="$LOG_DIR/e2e_run_$TIMESTAMP.log" + +# Echo starting message to the original terminal BEFORE the main piped block +echo "Starting E2E test. Output will be shown here and saved to: $LOG_FILE" +echo "Running from directory: $(pwd)" +echo "--- Starting E2E Run ---" # Separator before piped output starts + +# Record start time for overall duration *before* the pipe +overall_start_time=$(date +%s) + +# --- Main Execution Block (Piped to tee) --- +# Wrap the main part of the script in braces and pipe its output (stdout and stderr) to tee +{ + # Record start time for helper functions *inside* the pipe + start_time_for_helpers=$(date +%s) + + # --- Helper Functions (Output will now go to tee -> terminal & log file) --- + _format_duration() { + local total_seconds=$1 + local minutes=$((total_seconds / 60)) + local seconds=$((total_seconds % 60)) + printf "%dm%02ds" "$minutes" "$seconds" + } + + _get_elapsed_time_for_log() { + local current_time=$(date +%s) + local elapsed_seconds=$((current_time - start_time_for_helpers)) + _format_duration "$elapsed_seconds" + } + + log_info() { + echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" + } + + log_success() { + # We no longer increment success_step_count here for the final summary + echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" + } + + log_error() { + # Output errors to stderr, which gets merged and sent to tee + echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2 + } + + log_step() { + test_step_count=$((test_step_count + 1)) + echo "" + echo "=============================================" + echo " STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" + echo "=============================================" + } + # --- + + # --- Test Setup (Output to tee) --- + log_step "Setting up test environment" + + log_step "Creating global npm link for task-master-ai" + if npm link; then + log_success "Global link created/updated." + else + log_error "Failed to run 'npm link'. Check permissions or output for details." + exit 1 + fi + + mkdir -p "$BASE_TEST_DIR" + log_info "Ensured base test directory exists: $BASE_TEST_DIR" + + TEST_RUN_DIR="$BASE_TEST_DIR/run_$TIMESTAMP" + mkdir -p "$TEST_RUN_DIR" + log_info "Created test run directory: $TEST_RUN_DIR" + + # Check if source .env file exists + if [ ! -f "$MAIN_ENV_FILE" ]; then + log_error "Source .env file not found at $MAIN_ENV_FILE. Cannot proceed with API-dependent tests." + exit 1 + fi + log_info "Source .env file found at $MAIN_ENV_FILE." + + # Check if sample PRD exists + if [ ! -f "$SAMPLE_PRD_SOURCE" ]; then + log_error "Sample PRD not found at $SAMPLE_PRD_SOURCE. Please check path." + exit 1 + fi + + log_info "Copying sample PRD to test directory..." + cp "$SAMPLE_PRD_SOURCE" "$TEST_RUN_DIR/prd.txt" + if [ ! -f "$TEST_RUN_DIR/prd.txt" ]; then + log_error "Failed to copy sample PRD to $TEST_RUN_DIR." + exit 1 + fi + log_success "Sample PRD copied." + + ORIGINAL_DIR=$(pwd) # Save original dir + cd "$TEST_RUN_DIR" + log_info "Changed directory to $(pwd)" + + # === Copy .env file BEFORE init === + log_step "Copying source .env file for API keys" + if cp "$ORIGINAL_DIR/.env" ".env"; then + log_success ".env file copied successfully." + else + log_error "Failed to copy .env file from $ORIGINAL_DIR/.env" + exit 1 + fi + # ======================================== + + # --- Test Execution (Output to tee) --- + + log_step "Linking task-master-ai package locally" + npm link task-master-ai + log_success "Package linked locally." + + log_step "Initializing Task Master project (non-interactive)" + task-master init -y --name="E2E Test $TIMESTAMP" --description="Automated E2E test run" + if [ ! -f ".taskmasterconfig" ] || [ ! -f "package.json" ]; then + log_error "Initialization failed: .taskmasterconfig or package.json not found." + exit 1 + fi + log_success "Project initialized." + + log_step "Parsing PRD" + task-master parse-prd ./prd.txt --force + if [ ! -s "tasks/tasks.json" ]; then + log_error "Parsing PRD failed: tasks/tasks.json not found or is empty." + exit 1 + fi + log_success "PRD parsed successfully." + + log_step "Listing tasks" + task-master list > task_list_output.log + log_success "Task list saved to task_list_output.log" + + log_step "Analyzing complexity" + # Add --research flag if needed and API keys support it + task-master analyze-complexity --research --output complexity_results.json + if [ ! -f "complexity_results.json" ]; then + log_error "Complexity analysis failed: complexity_results.json not found." + exit 1 + fi + log_success "Complexity analysis saved to complexity_results.json" + + log_step "Generating complexity report" + task-master complexity-report --file complexity_results.json > complexity_report_formatted.log + log_success "Formatted complexity report saved to complexity_report_formatted.log" + + log_step "Expanding Task 1 (assuming it exists)" + # Add --research flag if needed and API keys support it + task-master expand --id=1 # Add --research? + log_success "Attempted to expand Task 1." + + log_step "Setting status for Subtask 1.1 (assuming it exists)" + task-master set-status --id=1.1 --status=done + log_success "Attempted to set status for Subtask 1.1 to 'done'." + + log_step "Listing tasks again (after changes)" + task-master list --with-subtasks > task_list_after_changes.log + log_success "Task list after changes saved to task_list_after_changes.log" + + # === Test Model Commands === + log_step "Checking initial model configuration" + task-master models > models_initial_config.log + log_success "Initial model config saved to models_initial_config.log" + + log_step "Setting main model" + task-master models --set-main claude-3-7-sonnet-20250219 + log_success "Set main model." + + log_step "Setting research model" + task-master models --set-research sonar-pro + log_success "Set research model." + + log_step "Setting fallback model" + task-master models --set-fallback claude-3-5-sonnet-20241022 + log_success "Set fallback model." + + log_step "Checking final model configuration" + task-master models > models_final_config.log + log_success "Final model config saved to models_final_config.log" + # === End Model Commands Test === + + log_step "Listing tasks again (final)" + task-master list --with-subtasks > task_list_final.log + log_success "Final task list saved to task_list_final.log" + + # === Test Core Task Commands === + log_step "Listing tasks (initial)" + task-master list > task_list_initial.log + log_success "Initial task list saved to task_list_initial.log" + + log_step "Getting next task" + task-master next > next_task_initial.log + log_success "Initial next task saved to next_task_initial.log" + + log_step "Showing Task 1 details" + task-master show 1 > task_1_details.log + log_success "Task 1 details saved to task_1_details.log" + + log_step "Adding dependency (Task 2 depends on Task 1)" + task-master add-dependency --id=2 --depends-on=1 + log_success "Added dependency 2->1." + + log_step "Validating dependencies (after add)" + task-master validate-dependencies > validate_dependencies_after_add.log + log_success "Dependency validation after add saved." + + log_step "Removing dependency (Task 2 depends on Task 1)" + task-master remove-dependency --id=2 --depends-on=1 + log_success "Removed dependency 2->1." + + log_step "Fixing dependencies (should be no-op now)" + task-master fix-dependencies > fix_dependencies_output.log + log_success "Fix dependencies attempted." + + log_step "Adding Task 11 (Manual)" + task-master add-task --title="Manual E2E Task" --description="Add basic health check endpoint" --priority=low --dependencies=3 # Depends on backend setup + # Assuming the new task gets ID 11 (adjust if PRD parsing changes) + log_success "Added Task 11 manually." + + log_step "Adding Task 12 (AI)" + task-master add-task --prompt="Implement basic UI styling using CSS variables for colors and spacing" --priority=medium --dependencies=1 # Depends on frontend setup + # Assuming the new task gets ID 12 + log_success "Added Task 12 via AI prompt." + + log_step "Updating Task 3 (update-task AI)" + task-master update-task --id=3 --prompt="Update backend server setup: Ensure CORS is configured to allow requests from the frontend origin." + log_success "Attempted update for Task 3." + + log_step "Updating Tasks from Task 5 (update AI)" + task-master update --from=5 --prompt="Refactor the backend storage module to use a simple JSON file (storage.json) instead of an in-memory object for persistence. Update relevant tasks." + log_success "Attempted update from Task 5 onwards." + + log_step "Expanding Task 8 (AI)" + task-master expand --id=8 # Expand task 8: Frontend logic + log_success "Attempted to expand Task 8." + + log_step "Updating Subtask 8.1 (update-subtask AI)" + task-master update-subtask --id=8.1 --prompt="Implementation note: Remember to handle potential API errors and display a user-friendly message." + log_success "Attempted update for Subtask 8.1." + + # Add a couple more subtasks for multi-remove test + log_step "Adding subtasks to Task 2 (for multi-remove test)" + task-master add-subtask --parent=2 --title="Subtask 2.1 for removal" + task-master add-subtask --parent=2 --title="Subtask 2.2 for removal" + log_success "Added subtasks 2.1 and 2.2." + + log_step "Removing Subtasks 2.1 and 2.2 (multi-ID)" + task-master remove-subtask --id=2.1,2.2 + log_success "Removed subtasks 2.1 and 2.2." + + log_step "Setting status for Task 1 to done" + task-master set-status --id=1 --status=done + log_success "Set status for Task 1 to done." + + log_step "Getting next task (after status change)" + task-master next > next_task_after_change.log + log_success "Next task after change saved to next_task_after_change.log" + + log_step "Clearing subtasks from Task 8" + task-master clear-subtasks --id=8 + log_success "Attempted to clear subtasks from Task 8." + + log_step "Removing Tasks 11 and 12 (multi-ID)" + # Remove the tasks we added earlier + task-master remove-task --id=11,12 -y + log_success "Removed tasks 11 and 12." + + log_step "Generating task files (final)" + task-master generate + log_success "Generated task files." + # === End Core Task Commands Test === + + # === AI Commands (Tested earlier implicitly with add/update/expand) === + log_step "Analyzing complexity (AI with Research)" + task-master analyze-complexity --research --output complexity_results.json + if [ ! -f "complexity_results.json" ]; then log_error "Complexity analysis failed."; exit 1; fi + log_success "Complexity analysis saved to complexity_results.json" + + log_step "Generating complexity report (Non-AI)" + task-master complexity-report --file complexity_results.json > complexity_report_formatted.log + log_success "Formatted complexity report saved to complexity_report_formatted.log" + + # Expand All (Commented Out) + # log_step "Expanding All Tasks (AI - Heavy Operation, Commented Out)" + # task-master expand --all --research + # log_success "Attempted to expand all tasks." + + log_step "Expanding Task 1 (AI - Note: Subtasks were removed/cleared)" + task-master expand --id=1 + log_success "Attempted to expand Task 1 again." + # === End AI Commands === + + log_step "Listing tasks again (final)" + task-master list --with-subtasks > task_list_final.log + log_success "Final task list saved to task_list_final.log" + + # --- Test Completion (Output to tee) --- + log_step "E2E Test Steps Completed" + echo "" + ABS_TEST_RUN_DIR="$(pwd)" + echo "Test artifacts and logs are located in: $ABS_TEST_RUN_DIR" + echo "Key artifact files (within above dir):" + echo " - .env (Copied from source)" + echo " - tasks/tasks.json" + echo " - task_list_output.log" + echo " - complexity_results.json" + echo " - complexity_report_formatted.log" + echo " - task_list_after_changes.log" + echo " - models_initial_config.log, models_final_config.log" + echo " - task_list_final.log" + echo " - task_list_initial.log, next_task_initial.log, task_1_details.log" + echo " - validate_dependencies_after_add.log, fix_dependencies_output.log" + echo " - complexity_*.log" + echo "" + echo "Full script log also available at: $LOG_FILE (relative to project root)" + + # Optional: cd back to original directory + # cd "$ORIGINAL_DIR" + +# End of the main execution block brace +} 2>&1 | tee "$LOG_FILE" + +# --- Final Terminal Message --- +EXIT_CODE=${PIPESTATUS[0]} +overall_end_time=$(date +%s) +total_elapsed_seconds=$((overall_end_time - overall_start_time)) + +# Format total duration +total_minutes=$((total_elapsed_seconds / 60)) +total_sec_rem=$((total_elapsed_seconds % 60)) +formatted_total_time=$(printf "%dm%02ds" "$total_minutes" "$total_sec_rem") + +# Count steps and successes from the log file *after* the pipe finishes +# Use grep -c for counting lines matching the pattern +final_step_count=$(grep -c '^==.* STEP [0-9]\+:' "$LOG_FILE" || true) # Count lines starting with === STEP X: +final_success_count=$(grep -c '\[SUCCESS\]' "$LOG_FILE" || true) # Count lines containing [SUCCESS] + +echo "--- E2E Run Summary ---" +echo "Log File: $LOG_FILE" +echo "Total Elapsed Time: ${formatted_total_time}" +echo "Total Steps Executed: ${final_step_count}" # Use count from log + +if [ $EXIT_CODE -eq 0 ]; then + echo "Status: SUCCESS" + # Use counts from log file + echo "Successful Steps: ${final_success_count}/${final_step_count}" +else + echo "Status: FAILED" + # Use count from log file for total steps attempted + echo "Failure likely occurred during/after Step: ${final_step_count}" + # Use count from log file for successes before failure + echo "Successful Steps Before Failure: ${final_success_count}" + echo "Please check the log file '$LOG_FILE' for error details." +fi +echo "-------------------------" + +exit $EXIT_CODE # Exit with the status of the main script block \ No newline at end of file diff --git a/tests/fixtures/sample-prd.txt b/tests/fixtures/sample-prd.txt index fadff345..1694b1bd 100644 --- a/tests/fixtures/sample-prd.txt +++ b/tests/fixtures/sample-prd.txt @@ -1,42 +1,82 @@ -# Sample PRD for Testing + +# Overview +This document outlines the requirements for a minimal web-based URL Shortener application. The application allows users to input a long URL and receive a shorter, alias URL that redirects to the original destination. This serves as a basic example of a micro-SaaS product. It's intended for anyone needing to create shorter links for sharing. The value is in providing a simple, functional utility accessible via a web browser. +# Core Features +1. **URL Input & Shortening:** A user interface with an input field for pasting a long URL and a button to trigger the shortening process. + - *Why:* The primary function for the user interaction. + - *How:* A React component with a text input and a submit button. Clicking the button sends the long URL to a backend API. +2. **Short URL Display:** After successful shortening, the application displays the newly generated short URL to the user. + - *Why:* Provides the result of the core function to the user. + - *How:* The React frontend updates to show the short URL returned by the API (e.g., `http://your-domain.com/aB3cD`). Include a "copy to clipboard" button for convenience. +3. **URL Redirection:** Accessing a generated short URL in a browser redirects the user to the original long URL. + - *Why:* The fundamental purpose of the shortened link. + * *How:* A backend API endpoint handles requests to `/:shortCode`. It looks up the code in a data store and issues an HTTP redirect (301 or 302) to the corresponding long URL. +4. **Basic Persistence:** Short URL mappings (short code -> long URL) persist across requests. + - *Why:* Short URLs need to remain functional after creation. + * *How:* A simple backend data store (e.g., initially an in-memory object for testing, then potentially a JSON file or simple database) holds the mappings. + +# User Experience +- **User Persona:** Anyone wanting to shorten a long web link. +- **Key User Flow:** User visits the web app -> Pastes a long URL into the input field -> Clicks "Shorten" -> Sees the generated short URL -> Copies the short URL -> (Later) Uses the short URL in a browser and gets redirected. +- **UI/UX Considerations:** Clean, minimal single-page interface. Clear input field, prominent button, easy-to-read display of the short URL, copy button. Basic validation feedback (e.g., "Invalid URL", "Success!"). + # Technical Architecture - -## System Components -1. **Task Management Core** - - Tasks.json file structure - - Task model with dependencies - - Task state management - -2. **Command Line Interface** - - Command parsing and execution - - Display utilities - -## Data Models - -### Task Model -```json -{ - "id": 1, - "title": "Task Title", - "description": "Brief task description", - "status": "pending|done|deferred", - "dependencies": [0], - "priority": "high|medium|low", - "details": "Implementation instructions", - "testStrategy": "Verification approach" -} -``` +- **System Components:** + - Frontend: Single Page Application (SPA) built with Vite + React. + - Backend: Simple API server (e.g., Node.js with Express). +- **Data Model:** A key-value store mapping `shortCode` (string) to `longUrl` (string). +- **APIs & Integrations:** + - Backend API: + - `POST /api/shorten`: Accepts `{ longUrl: string }` in the request body. Generates a unique `shortCode`, stores the mapping, returns `{ shortUrl: string }`. + - `GET /:shortCode`: Looks up `shortCode`. If found, performs HTTP redirect to `longUrl`. If not found, returns 404. +- **Infrastructure:** Frontend can be hosted on static hosting. Backend needs a simple server environment (Node.js). +- **Libraries:** + - Frontend: `react`, `react-dom`, `axios` (or `fetch` API) for API calls. Consider a simple state management solution if needed (e.g., `useState`, `useContext`). + - Backend: `express`, `nanoid` (or similar for short code generation). # Development Roadmap +- **MVP Requirements:** + 1. Setup Vite + React project. + 2. Create basic React UI components (InputForm, ResultDisplay). + 3. Setup basic Node.js/Express backend server. + 4. Implement backend data storage module (start with in-memory object). + 5. Implement unique short code generation logic (e.g., using `nanoid`). + 6. Implement backend `POST /api/shorten` endpoint logic. + 7. Implement backend `GET /:shortCode` redirect logic. + 8. Implement frontend logic to take input, call `POST /api/shorten`, and display the result. + 9. Basic frontend input validation (check if likely a URL). +- **Future Enhancements:** User accounts, custom short codes, analytics (click tracking), using a persistent database, error handling improvements, UI styling. (Out of scope for MVP). -## Phase 1: Core Task Management System -1. **Task Data Structure** - - Implement the tasks.json structure - - Create file system interactions +# Logical Dependency Chain +1. Vite + React Project Setup. +2. Basic Backend Server Setup (Express). +3. Backend Storage Module (in-memory first). +4. Short Code Generation Logic. +5. Implement `POST /api/shorten` endpoint (depends on 3 & 4). +6. Implement `GET /:shortCode` endpoint (depends on 3). +7. Frontend UI Components. +8. Frontend logic to call `POST /api/shorten` (depends on 5 & 7). +9. Frontend display logic (depends on 7 & 8). + *Goal is to get the backend API working first, then build the frontend to consume it.* -2. **Command Line Interface Foundation** - - Implement command parsing - - Create help documentation - \ No newline at end of file +# Risks and Mitigations +- **Risk:** Short code collisions (generating the same code twice). + - **Mitigation (MVP):** Use a library like `nanoid` with sufficient length to make collisions highly improbable for a simple service. Add a retry loop in generation if a collision *is* detected (check if code exists before storing). +- **Risk:** Storing invalid or malicious URLs. + - **Mitigation (MVP):** Basic URL validation on the frontend (simple regex) and potentially on the backend. Sanitize input. Advanced checks are out of scope. +- **Risk:** Scalability of in-memory store. + - **Mitigation (MVP):** Acceptable for MVP. Acknowledge need for persistent database (JSON file, Redis, SQL/NoSQL DB) for future enhancement. + +# Appendix +- Example Data Store (in-memory object): + ```javascript + // backend/storage.js + const urlMap = { + 'aB3cD': 'https://very-long-url-example.com/with/path/and/query?params=true', + 'xY7zW': 'https://another-example.org/' + }; + // ... functions to get/set URLs ... + ``` + \ No newline at end of file