fix(parse-prd): simplifies append and force variable names across the chain to avoid confusion. parse-prd append tested on MCP and the fix is good to go. Also adjusts e2e test to properly capture costs.

This commit is contained in:
Eyal Toledano
2025-05-17 20:10:53 -04:00
parent fcd80623b6
commit cd6e42249e
4 changed files with 122 additions and 68 deletions

View File

@@ -105,11 +105,9 @@ export async function parsePRDDirect(args, log, context = {}) {
}
}
const useForce = force === true;
const useAppend = append === true;
if (useAppend) {
if (append) {
logWrapper.info('Append mode enabled.');
if (useForce) {
if (force) {
logWrapper.warn(
'Both --force and --append flags were provided. --force takes precedence; append mode will be ignored.'
);
@@ -117,7 +115,7 @@ export async function parsePRDDirect(args, log, context = {}) {
}
logWrapper.info(
`Parsing PRD via direct function. Input: ${inputPath}, Output: ${outputPath}, NumTasks: ${numTasks}, Force: ${useForce}, Append: ${useAppend}, ProjectRoot: ${projectRoot}`
`Parsing PRD via direct function. Input: ${inputPath}, Output: ${outputPath}, NumTasks: ${numTasks}, Force: ${force}, Append: ${append}, ProjectRoot: ${projectRoot}`
);
const wasSilent = isSilentMode();
@@ -135,8 +133,8 @@ export async function parsePRDDirect(args, log, context = {}) {
session,
mcpLog: logWrapper,
projectRoot,
useForce,
useAppend,
force,
append,
commandName: 'parse-prd',
outputType: 'mcp'
},

View File

@@ -48,8 +48,8 @@ const prdResponseSchema = z.object({
* @param {string} tasksPath - Path to the tasks.json file
* @param {number} numTasks - Number of tasks to generate
* @param {Object} options - Additional options
* @param {boolean} [options.useForce=false] - Whether to overwrite existing tasks.json.
* @param {boolean} [options.useAppend=false] - Append to existing tasks file.
* @param {boolean} [options.force=false] - Whether to overwrite existing tasks.json.
* @param {boolean} [options.append=false] - Append to existing tasks file.
* @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused).
* @param {Object} [options.mcpLog] - MCP logger object (optional).
* @param {Object} [options.session] - Session object from MCP server (optional).
@@ -62,8 +62,8 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
mcpLog,
session,
projectRoot,
useForce = false,
useAppend = false
force = false,
append = false
} = options;
const isMCP = !!mcpLog;
const outputFormat = isMCP ? 'json' : 'text';
@@ -90,9 +90,7 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
}
};
report(
`Parsing PRD file: ${prdPath}, Force: ${useForce}, Append: ${useAppend}`
);
report(`Parsing PRD file: ${prdPath}, Force: ${force}, Append: ${append}`);
let existingTasks = [];
let nextId = 1;
@@ -101,7 +99,7 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
try {
// Handle file existence and overwrite/append logic
if (fs.existsSync(tasksPath)) {
if (useAppend) {
if (append) {
report(
`Append mode enabled. Reading existing tasks from ${tasksPath}`,
'info'
@@ -123,7 +121,7 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
);
existingTasks = []; // Reset if read fails
}
} else if (!useForce) {
} else if (!force) {
// Not appending and not forcing overwrite
const overwriteError = new Error(
`Output file ${tasksPath} already exists. Use --force to overwrite or --append.`
@@ -288,7 +286,7 @@ Guidelines:
);
});
const finalTasks = useAppend
const finalTasks = append
? [...existingTasks, ...processedNewTasks]
: processedNewTasks;
const outputData = { tasks: finalTasks };
@@ -296,7 +294,7 @@ Guidelines:
// Write the final tasks to the file
writeJSON(tasksPath, outputData);
report(
`Successfully ${useAppend ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}`,
`Successfully ${append ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}`,
'success'
);

View File

@@ -6,28 +6,38 @@
# It expects the project root path to be passed as the second argument.
# --- New Function: extract_and_sum_cost ---
# Takes a string containing command output and the current total cost.
# Extracts costs (lines with "Cost: $X.YYYY USD" or "Total Cost: $X.YYYY USD")
# from the output, sums them, and adds to the current total.
# Returns the new total cost.
# Takes a string containing command output.
# Extracts costs (lines with "Est. Cost: $X.YYYYYY" or similar from telemetry output)
# from the output, sums them, and adds them to the GLOBAL total_e2e_cost variable.
extract_and_sum_cost() {
local command_output="$1"
local current_total_cost="$2"
# Ensure total_e2e_cost is treated as a number, default to 0.0 if not set or invalid
if ! [[ "$total_e2e_cost" =~ ^[0-9]+(\.[0-9]+)?$ ]]; then
total_e2e_cost="0.0"
fi
local extracted_cost_sum="0.0"
# Grep for lines containing "Cost: $" or "Total Cost: $", then extract the numeric value.
# Handles cases like "Cost: $0.001234 USD" or "Total Cost: $0.001234 USD"
# Grep for lines containing "Est. Cost: $", then extract the numeric value.
# Example line: │ Est. Cost: $0.093549 │
# Accumulate all costs found in the command_output
while IFS= read -r line; do
# Extract the numeric part after '$' and before ' USD'
cost_value=$(echo "$line" | grep -o -E '(\$ ?[0-9]+\.[0-9]+)' | sed 's/\$ //g' | sed 's/\$//g')
# Extract the numeric part after 'Est. Cost: $' and before any trailing spaces/chars
cost_value=$(echo "$line" | grep -o -E 'Est\. Cost: \$([0-9]+\.[0-9]+)' | sed -E 's/Est\. Cost: \$//g')
if [[ -n "$cost_value" && "$cost_value" =~ ^[0-9]+\.[0-9]+$ ]]; then
# echo "[DEBUG] Found cost value: $cost_value in line: '$line'" # For debugging
extracted_cost_sum=$(echo "$extracted_cost_sum + $cost_value" | bc)
# else # For debugging
# echo "[DEBUG] No valid cost value found or extracted in line: '$line' (extracted: '$cost_value')" # For debugging
fi
done < <(echo "$command_output" | grep -E 'Cost: \$|Total Cost: \$')
done < <(echo "$command_output" | grep -E 'Est\. Cost: \$')
new_total_cost=$(echo "$current_total_cost + $extracted_cost_sum" | bc)
echo "$new_total_cost"
# echo "[DEBUG] Extracted sum from this command output: $extracted_cost_sum" # For debugging
if (( $(echo "$extracted_cost_sum > 0" | bc -l) )); then
total_e2e_cost=$(echo "$total_e2e_cost + $extracted_cost_sum" | bc)
# echo "[DEBUG] Updated global total_e2e_cost: $total_e2e_cost" # For debugging
fi
# No echo here, the function modifies a global variable.
}
export -f extract_and_sum_cost # Export for use in other scripts if sourced

View File

@@ -340,30 +340,43 @@ log_step() {
log_success "Project initialized."
log_step "Parsing PRD"
task-master parse-prd ./prd.txt --force
if [ ! -s "tasks/tasks.json" ]; then
log_error "Parsing PRD failed: tasks/tasks.json not found or is empty."
cmd_output_prd=$(task-master parse-prd ./prd.txt --force 2>&1)
exit_status_prd=$?
echo "$cmd_output_prd"
extract_and_sum_cost "$cmd_output_prd"
if [ $exit_status_prd -ne 0 ] || [ ! -s "tasks/tasks.json" ]; then
log_error "Parsing PRD failed: tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
exit 1
else
log_success "PRD parsed successfully."
fi
log_success "PRD parsed successfully."
log_step "Expanding Task 1 (to ensure subtask 1.1 exists)"
# Add --research flag if needed and API keys support it
task-master analyze-complexity --research --output complexity_results.json
if [ ! -f "complexity_results.json" ]; then
log_error "Complexity analysis failed: complexity_results.json not found."
cmd_output_analyze=$(task-master analyze-complexity --research --output complexity_results.json 2>&1)
exit_status_analyze=$?
echo "$cmd_output_analyze"
extract_and_sum_cost "$cmd_output_analyze"
if [ $exit_status_analyze -ne 0 ] || [ ! -f "complexity_results.json" ]; then
log_error "Complexity analysis failed: complexity_results.json not found. Exit status: $exit_status_analyze"
exit 1
else
log_success "Complexity analysis saved to complexity_results.json"
fi
log_success "Complexity analysis saved to complexity_results.json"
log_step "Generating complexity report"
task-master complexity-report --file complexity_results.json > complexity_report_formatted.log
log_success "Formatted complexity report saved to complexity_report_formatted.log"
log_step "Expanding Task 1 (assuming it exists)"
# Add --research flag if needed and API keys support it
task-master expand --id=1 # Add --research?
log_success "Attempted to expand Task 1."
cmd_output_expand1=$(task-master expand --id=1 2>&1)
exit_status_expand1=$?
echo "$cmd_output_expand1"
extract_and_sum_cost "$cmd_output_expand1"
if [ $exit_status_expand1 -ne 0 ]; then
log_error "Expanding Task 1 failed. Exit status: $exit_status_expand1"
else
log_success "Attempted to expand Task 1."
fi
log_step "Setting status for Subtask 1.1 (assuming it exists)"
task-master set-status --id=1.1 --status=done
@@ -407,10 +420,11 @@ log_step() {
if [ -x "$verification_script_path" ]; then
log_info "--- Executing Fallback Verification Script: $verification_script_path ---"
# Execute the script directly, allowing output to flow to tee
# Pass the current directory (the test run dir) as the argument
"$verification_script_path" "$(pwd)"
verification_exit_code=$? # Capture exit code immediately
verification_output=$("$verification_script_path" "$(pwd)" 2>&1)
verification_exit_code=$?
echo "$verification_output"
extract_and_sum_cost "$verification_output"
log_info "--- Finished Fallback Verification Script Execution (Exit Code: $verification_exit_code) ---"
# Log success/failure based on captured exit code
@@ -418,13 +432,9 @@ log_step() {
log_success "Fallback verification script reported success."
else
log_error "Fallback verification script reported FAILURE (Exit Code: $verification_exit_code)."
# Decide whether to exit the main script or just log the error
# exit 1 # Uncomment to make verification failure fatal
fi
else
log_error "Fallback verification script not found or not executable at $verification_script_path. Skipping verification."
# Decide whether to exit or continue
# exit 1
fi
else
log_info "Skipping Fallback Verification test as requested by flag."
@@ -483,6 +493,7 @@ log_step() {
# 3. Check for success and extract task ID
new_task_id=""
extract_and_sum_cost "$add_task_cmd_output"
if [ $add_task_exit_code -eq 0 ] && (echo "$add_task_cmd_output" | grep -q "✓ Added new task #" || echo "$add_task_cmd_output" | grep -q "✅ New task created successfully:" || echo "$add_task_cmd_output" | grep -q "Task [0-9]\+ Created Successfully"); then
new_task_id=$(echo "$add_task_cmd_output" | grep -o -E "(Task |#)[0-9.]+" | grep -o -E "[0-9.]+" | head -n 1)
if [ -n "$new_task_id" ]; then
@@ -569,8 +580,6 @@ log_step() {
log_success "Validation correctly identified non-existent dependency 999."
else
log_error "Validation DID NOT report non-existent dependency 999 as expected. Check validate_deps_non_existent.log"
# Consider exiting here if this check fails, as it indicates a validation logic problem
# exit 1
fi
log_step "Fixing dependencies (should remove 1 -> 999)"
@@ -581,7 +590,6 @@ log_step() {
task-master validate-dependencies > validate_deps_after_fix_non_existent.log 2>&1 || true # Allow potential failure
if grep -q "Non-existent dependency ID: 999" validate_deps_after_fix_non_existent.log; then
log_error "Validation STILL reports non-existent dependency 999 after fix. Check logs."
# exit 1
else
log_success "Validation shows non-existent dependency 999 was removed."
fi
@@ -600,7 +608,6 @@ log_step() {
log_success "Validation correctly identified circular dependency between 4 and 5."
else
log_error "Validation DID NOT report circular dependency 4<->5 as expected. Check validate_deps_circular.log"
# exit 1
fi
log_step "Fixing dependencies (should remove one side of 4 <-> 5)"
@@ -611,7 +618,6 @@ log_step() {
task-master validate-dependencies > validate_deps_after_fix_circular.log 2>&1 || true # Allow potential failure
if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_after_fix_circular.log; then
log_error "Validation STILL reports circular dependency 4<->5 after fix. Check logs."
# exit 1
else
log_success "Validation shows circular dependency 4<->5 was resolved."
fi
@@ -629,25 +635,60 @@ log_step() {
log_success "Added Task $manual_task_id manually."
log_step "Adding Task $ai_task_id (AI)"
task-master add-task --prompt="Implement basic UI styling using CSS variables for colors and spacing" --priority=medium --dependencies=1 # Depends on frontend setup
log_success "Added Task $ai_task_id via AI prompt."
cmd_output_add_ai=$(task-master add-task --prompt="Implement basic UI styling using CSS variables for colors and spacing" --priority=medium --dependencies=1 2>&1)
exit_status_add_ai=$?
echo "$cmd_output_add_ai"
extract_and_sum_cost "$cmd_output_add_ai"
if [ $exit_status_add_ai -ne 0 ]; then
log_error "Adding AI Task $ai_task_id failed. Exit status: $exit_status_add_ai"
else
log_success "Added Task $ai_task_id via AI prompt."
fi
log_step "Updating Task 3 (update-task AI)"
task-master update-task --id=3 --prompt="Update backend server setup: Ensure CORS is configured to allow requests from the frontend origin."
log_success "Attempted update for Task 3."
cmd_output_update_task3=$(task-master update-task --id=3 --prompt="Update backend server setup: Ensure CORS is configured to allow requests from the frontend origin." 2>&1)
exit_status_update_task3=$?
echo "$cmd_output_update_task3"
extract_and_sum_cost "$cmd_output_update_task3"
if [ $exit_status_update_task3 -ne 0 ]; then
log_error "Updating Task 3 failed. Exit status: $exit_status_update_task3"
else
log_success "Attempted update for Task 3."
fi
log_step "Updating Tasks from Task 5 (update AI)"
task-master update --from=5 --prompt="Refactor the backend storage module to use a simple JSON file (storage.json) instead of an in-memory object for persistence. Update relevant tasks."
log_success "Attempted update from Task 5 onwards."
cmd_output_update_from5=$(task-master update --from=5 --prompt="Refactor the backend storage module to use a simple JSON file (storage.json) instead of an in-memory object for persistence. Update relevant tasks." 2>&1)
exit_status_update_from5=$?
echo "$cmd_output_update_from5"
extract_and_sum_cost "$cmd_output_update_from5"
if [ $exit_status_update_from5 -ne 0 ]; then
log_error "Updating from Task 5 failed. Exit status: $exit_status_update_from5"
else
log_success "Attempted update from Task 5 onwards."
fi
log_step "Expanding Task 8 (AI)"
task-master expand --id=8 # Expand task 8: Frontend logic
log_success "Attempted to expand Task 8."
cmd_output_expand8=$(task-master expand --id=8 2>&1)
exit_status_expand8=$?
echo "$cmd_output_expand8"
extract_and_sum_cost "$cmd_output_expand8"
if [ $exit_status_expand8 -ne 0 ]; then
log_error "Expanding Task 8 failed. Exit status: $exit_status_expand8"
else
log_success "Attempted to expand Task 8."
fi
log_step "Updating Subtask 8.1 (update-subtask AI)"
task-master update-subtask --id=8.1 --prompt="Implementation note: Remember to handle potential API errors and display a user-friendly message."
log_success "Attempted update for Subtask 8.1."
cmd_output_update_subtask81=$(task-master update-subtask --id=8.1 --prompt="Implementation note: Remember to handle potential API errors and display a user-friendly message." 2>&1)
exit_status_update_subtask81=$?
echo "$cmd_output_update_subtask81"
extract_and_sum_cost "$cmd_output_update_subtask81"
if [ $exit_status_update_subtask81 -ne 0 ]; then
log_error "Updating Subtask 8.1 failed. Exit status: $exit_status_update_subtask81"
else
log_success "Attempted update for Subtask 8.1."
fi
# Add a couple more subtasks for multi-remove test
log_step 'Adding subtasks to Task 2 (for multi-remove test)'
@@ -740,9 +781,16 @@ log_step() {
# === AI Commands (Re-test some after changes) ===
log_step "Analyzing complexity (AI with Research - Final Check)"
task-master analyze-complexity --research --output complexity_results_final.json
if [ ! -f "complexity_results_final.json" ]; then log_error "Final Complexity analysis failed."; exit 1; fi
log_success "Final Complexity analysis saved."
cmd_output_analyze_final=$(task-master analyze-complexity --research --output complexity_results_final.json 2>&1)
exit_status_analyze_final=$?
echo "$cmd_output_analyze_final"
extract_and_sum_cost "$cmd_output_analyze_final"
if [ $exit_status_analyze_final -ne 0 ] || [ ! -f "complexity_results_final.json" ]; then
log_error "Final Complexity analysis failed. Exit status: $exit_status_analyze_final. File found: $(test -f complexity_results_final.json && echo true || echo false)"
exit 1 # Critical for subsequent report step
else
log_success "Final Complexity analysis command executed and file created."
fi
log_step "Generating complexity report (Non-AI - Final Check)"
task-master complexity-report --file complexity_results_final.json > complexity_report_formatted_final.log