reafactor: test arch audit

This commit is contained in:
Murat Ozcan
2025-10-16 19:54:30 -05:00
parent bee9c5dce7
commit 8e41b251a3
8 changed files with 19 additions and 342 deletions

View File

@@ -18,35 +18,7 @@ template: "{installed_path}/atdd-checklist-template.md"
# Variables and inputs
variables:
# Story context
story_file: "" # Path to story markdown with acceptance criteria
test_dir: "{project-root}/tests"
test_framework: "" # Detected from framework workflow (playwright, cypress)
# Test level selection
test_levels: "e2e,api,component" # Which levels to generate
primary_level: "e2e" # Primary test level for acceptance criteria
include_component_tests: true # Generate component tests for UI logic
# ATDD approach
start_failing: true # Tests must fail initially (red phase)
use_given_when_then: true # BDD-style test structure
network_first: true # Route interception before navigation
one_assertion_per_test: true # Atomic test design
# Data and fixtures
generate_factories: true # Create data factory stubs
generate_fixtures: true # Create fixture architecture
auto_cleanup: true # Fixtures clean up their data
# Output configuration
output_checklist: "{output_folder}/atdd-checklist-{story_id}.md"
include_data_testids: true # List required data-testid attributes
include_mock_requirements: true # Document mock/stub needs
# Advanced options
auto_load_knowledge: true # Load fixture-architecture, data-factories, component-tdd fragments
share_with_dev: true # Provide implementation checklist to DEV agent
test_dir: "{project-root}/tests" # Root test directory
# Output configuration
default_output_file: "{output_folder}/atdd-checklist-{story_id}.md"

View File

@@ -18,61 +18,13 @@ template: false
# Variables and inputs
variables:
# Execution mode
# Execution mode and targeting
standalone_mode: true # Can work without BMad artifacts (true) or integrate with BMad (false)
# Target specification (flexible - can be story, feature, or directory)
story_file: "" # Path to story markdown (optional - only if BMad workflow)
target_feature: "" # Feature name or directory to analyze (e.g., "user-authentication" or "src/auth/")
target_files: "" # Specific files to analyze (comma-separated paths)
# Discovery and analysis
test_dir: "{project-root}/tests"
source_dir: "{project-root}/src"
auto_discover_features: true # Automatically find features needing tests
analyze_coverage: true # Check existing test coverage gaps
# Coverage strategy
coverage_target: "critical-paths" # critical-paths, comprehensive, selective
test_levels: "e2e,api,component,unit" # Which levels to generate (comma-separated)
avoid_duplicate_coverage: true # Don't test same behavior at multiple levels
# Test priorities (from test-priorities.md knowledge fragment)
include_p0: true # Critical paths (every commit)
include_p1: true # High priority (PR to main)
include_p2: true # Medium priority (nightly)
include_p3: false # Low priority (on-demand)
# Test design principles
use_given_when_then: true # BDD-style test structure
one_assertion_per_test: true # Atomic test design
network_first: true # Route interception before navigation
deterministic_waits: true # No hard waits or sleeps
# Infrastructure generation
generate_fixtures: true # Create/enhance fixture architecture
generate_factories: true # Create/enhance data factories
update_helpers: true # Add utility functions
# Integration with BMad artifacts (when available)
use_test_design: true # Load test-design.md if exists
use_tech_spec: true # Load tech-spec.md if exists
use_prd: true # Load PRD.md if exists
# Output configuration
update_readme: true # Update test README with new specs
update_package_scripts: true # Add test execution scripts
output_summary: "{output_folder}/automation-summary.md"
# Quality gates
max_test_duration: 90 # seconds (1.5 minutes per test)
max_file_lines: 300 # lines (keep tests lean)
require_self_cleaning: true # All tests must clean up data
# Advanced options
auto_load_knowledge: true # Load test-levels, test-priorities, fixture-architecture, selective-testing, ci-burn-in
run_tests_after_generation: true # Verify tests pass/fail as expected
auto_validate: true # Always validate generated tests
# Directory paths
test_dir: "{project-root}/tests" # Root test directory
source_dir: "{project-root}/src" # Source code directory
# Output configuration
default_output_file: "{output_folder}/automation-summary.md"

View File

@@ -17,43 +17,8 @@ validation: "{installed_path}/checklist.md"
# Variables and inputs
variables:
ci_platform: "auto" # auto, github-actions, gitlab-ci, circle-ci, jenkins
test_framework: "" # Detected from framework workflow (playwright, cypress)
test_dir: "{project-root}/tests"
config_file: "" # Framework config file path
node_version_source: "{project-root}/.nvmrc" # Node version for CI
# Execution configuration
parallel_jobs: 4 # Number of parallel test shards
burn_in_enabled: true # Enable burn-in loop for flaky test detection
burn_in_iterations: 10 # Number of burn-in iterations
selective_testing_enabled: true # Enable changed test detection
# Artifact configuration
artifact_retention_days: 30
upload_artifacts_on: "failure" # failure, always, never
artifact_types: "traces,screenshots,videos,html-report" # Comma-separated
# Performance tuning
cache_enabled: true # Enable dependency caching
browser_cache_enabled: true # Cache browser binaries
timeout_minutes: 60 # Overall job timeout
test_timeout_minutes: 30 # Individual test run timeout
# Notification configuration
notify_on_failure: false # Enable notifications (requires setup)
notification_channels: "" # slack, email, discord
# Output artifacts
generate_ci_readme: true
generate_local_mirror_script: true
generate_secrets_checklist: true
# CI-specific optimizations
use_matrix_strategy: true # Parallel execution across OS/browsers
use_sharding: true # Split tests into shards
retry_failed_tests: true
retry_count: 2
ci_platform: "auto" # auto, github-actions, gitlab-ci, circle-ci, jenkins - user can override
test_dir: "{project-root}/tests" # Root test directory
# Output configuration
default_output_file: "{project-root}/.github/workflows/test.yml" # GitHub Actions default

View File

@@ -17,23 +17,10 @@ validation: "{installed_path}/checklist.md"
# Variables and inputs
variables:
test_framework: "" # playwright or cypress - auto-detect from package.json or ask
project_type: "" # react, vue, angular, next, node - detected from package.json
bundler: "" # vite, webpack, rollup, esbuild - detected from package.json
test_dir: "{project-root}/tests" # Root test directory
config_file: "" # Will be set to {project-root}/{framework}.config.{ts|js}
use_typescript: true # Prefer TypeScript configuration
standalone_mode: true # Can run without story context
# Framework selection criteria
framework_preference: "auto" # auto, playwright, cypress
project_size: "auto" # auto, small, large - influences framework choice
# Output artifacts
generate_env_example: true
generate_nvmrc: true
generate_readme: true
generate_sample_tests: true
framework_preference: "auto" # auto, playwright, cypress - user can override auto-detection
project_size: "auto" # auto, small, large - influences framework recommendation
# Output configuration
default_output_file: "{test_dir}/README.md" # Main deliverable is test setup README

View File

@@ -18,58 +18,8 @@ template: "{installed_path}/nfr-report-template.md"
# Variables and inputs
variables:
# Target specification
story_file: "" # Path to story markdown (optional)
feature_name: "" # Feature to assess (if no story file)
# NFR categories to assess
assess_performance: true # Response time, throughput, resource usage
assess_security: true # Authentication, authorization, data protection
assess_reliability: true # Error handling, recovery, availability
assess_maintainability: true # Code quality, test coverage, documentation
# Custom NFR categories (comma-separated)
custom_nfr_categories: "" # e.g., "accessibility,internationalization,compliance"
# Evidence sources
test_results_dir: "{project-root}/test-results"
metrics_dir: "{project-root}/metrics"
logs_dir: "{project-root}/logs"
include_ci_results: true # Analyze CI/CD pipeline results
# Thresholds (can be overridden)
performance_response_time_ms: 500 # Target response time
performance_throughput_rps: 100 # Target requests per second
security_score_min: 85 # Minimum security score (0-100)
reliability_uptime_pct: 99.9 # Target uptime percentage
maintainability_coverage_pct: 80 # Minimum test coverage
# Assessment configuration
use_deterministic_rules: true # PASS/CONCERNS/FAIL based on evidence
never_guess_thresholds: true # Mark as CONCERNS if threshold unknown
require_evidence: true # Every NFR must have evidence or be called out
suggest_monitoring: true # Recommend monitoring hooks for gaps
# Integration with BMad artifacts
use_tech_spec: true # Load tech-spec.md for NFR requirements
use_prd: true # Load PRD.md for NFR context
use_test_design: true # Load test-design.md for NFR test plan
# Output configuration
output_file: "{output_folder}/nfr-assessment.md"
generate_gate_yaml: true # Create gate YAML snippet with NFR status
generate_evidence_checklist: true # Create checklist of evidence gaps
update_story_file: false # Add NFR section to story (optional)
# Quality gates
fail_on_critical_nfr: true # Fail if critical NFR has FAIL status
warn_on_concerns: true # Warn if any NFR has CONCERNS status
block_release_on_fail: true # Block release if NFR assessment fails
# Advanced options
auto_load_knowledge: true # Load nfr-criteria, ci-burn-in fragments
include_quick_wins: true # Suggest quick wins for concerns/failures
include_recommended_actions: true # Provide actionable remediation steps
# NFR category assessment (defaults to all categories)
custom_nfr_categories: "" # Optional additional categories beyond standard (security, performance, reliability, maintainability)
# Output configuration
default_output_file: "{output_folder}/nfr-assessment.md"

View File

@@ -18,33 +18,7 @@ template: "{installed_path}/test-design-template.md"
# Variables and inputs
variables:
# Target scope
epic_num: "" # Epic number for scoped design
story_path: "" # Specific story for design (optional)
design_level: "full" # full, targeted, minimal
# Risk assessment configuration
risk_assessment_enabled: true
risk_threshold: 6 # Scores >= 6 are high-priority (probability × impact)
risk_categories: "TECH,SEC,PERF,DATA,BUS,OPS" # Comma-separated
# Coverage planning
priority_levels: "P0,P1,P2,P3" # Test priorities
test_levels: "e2e,api,integration,unit,component" # Test levels to consider
selective_testing_strategy: "risk-based" # risk-based, coverage-based, hybrid
# Output configuration
output_file: "{output_folder}/test-design-epic-{epic_num}.md"
include_risk_matrix: true
include_coverage_matrix: true
include_execution_order: true
include_resource_estimates: true
# Advanced options
auto_load_knowledge: true # Load relevant knowledge fragments
include_mitigation_plan: true
include_gate_criteria: true
standalone_mode: false # Can run without epic context
design_level: "full" # full, targeted, minimal - scope of design effort
# Output configuration
default_output_file: "{output_folder}/test-design-epic-{epic_num}.md"

View File

@@ -18,54 +18,9 @@ template: "{installed_path}/test-review-template.md"
# Variables and inputs
variables:
# Review target
test_file_path: "" # Explicit test file to review (if not provided, auto-discover)
test_dir: "{project-root}/tests"
test_dir: "{project-root}/tests" # Root test directory
review_scope: "single" # single (one file), directory (folder), suite (all tests)
# Review configuration
quality_score_enabled: true # Calculate 0-100 quality score
append_to_file: false # true = inline comments, false = separate report
check_against_knowledge: true # Use tea-index.csv fragments for validation
strict_mode: false # Strict = fail on any violation, Relaxed = advisory only
# Quality criteria to check
check_given_when_then: true # BDD format validation
check_test_ids: true # Test ID conventions (e.g., 1.3-E2E-001)
check_priority_markers: true # P0/P1/P2/P3 classification
check_hard_waits: true # Detect sleep(), wait(X), hardcoded delays
check_determinism: true # No conditionals (if/else), no try/catch abuse
check_isolation: true # Tests clean up, no shared state
check_fixture_patterns: true # Pure function → Fixture → mergeTests
check_data_factories: true # Factory usage vs hardcoded data
check_network_first: true # Route intercept before navigate
check_assertions: true # Explicit assertions, not implicit waits
check_test_length: true # Warn if >300 lines per file
check_test_duration: true # Warn if individual test >1.5 min
check_flakiness_patterns: true # Common flaky patterns (race conditions, timing)
# Integration with BMad artifacts
use_story_file: true # Load story for context (acceptance criteria)
use_test_design: true # Load test-design for priority context
auto_discover_story: true # Find related story by test ID
# Output configuration
output_file: "{output_folder}/test-review-{filename}.md"
generate_inline_comments: false # Add TODO comments in test files
generate_quality_badge: true # Create quality badge/score
append_to_story: false # Add review section to story file
# Knowledge base fragments to load
knowledge_fragments:
- test-quality.md # Definition of Done for tests
- fixture-architecture.md # Pure function → Fixture patterns
- network-first.md # Route interception before navigation
- data-factories.md # Factory patterns and best practices
- test-levels-framework.md # E2E vs API vs Component vs Unit
- playwright-config.md # Configuration patterns (if Playwright)
- tdd-cycles.md # Red-Green-Refactor patterns
- selective-testing.md # Duplicate coverage detection
# Output configuration
default_output_file: "{output_folder}/test-review.md"

View File

@@ -18,92 +18,14 @@ template: "{installed_path}/trace-template.md"
# Variables and inputs
variables:
# Target specification
story_file: "" # Path to story markdown (e.g., bmad/output/story-1.3.md)
acceptance_criteria: "" # Optional - inline criteria if no story file
# Directory paths
test_dir: "{project-root}/tests" # Root test directory
source_dir: "{project-root}/src" # Source code directory
# Test discovery
test_dir: "{project-root}/tests"
source_dir: "{project-root}/src"
auto_discover_tests: true # Automatically find tests related to story
# Traceability configuration
coverage_levels: "e2e,api,component,unit" # Which levels to trace (comma-separated)
map_by_test_id: true # Use test IDs (e.g., 1.3-E2E-001) for mapping
map_by_describe: true # Use describe blocks for mapping
map_by_filename: true # Use file paths for mapping
# Coverage classification
require_explicit_mapping: true # Require tests to explicitly reference criteria
flag_unit_only: true # Flag criteria covered only by unit tests
flag_integration_only: true # Flag criteria covered only by integration tests
flag_partial_coverage: true # Flag criteria with incomplete coverage
# Gap analysis
prioritize_by_risk: true # Use test-priorities (P0/P1/P2/P3) for gap severity
suggest_missing_tests: true # Recommend specific tests to add
check_duplicate_coverage: true # Warn about same behavior tested at multiple levels
# Integration with BMad artifacts
use_test_design: true # Load test-design.md if exists (risk assessment)
use_tech_spec: true # Load tech-spec.md if exists (technical context)
use_prd: true # Load PRD.md if exists (requirements context)
# Output configuration
output_file: "{output_folder}/traceability-matrix.md"
generate_gate_yaml: true # Create gate YAML snippet with coverage summary
generate_coverage_badge: true # Create coverage badge/metric
update_story_file: true # Add traceability section to story file
# Quality gates
min_p0_coverage: 100 # Percentage (P0 must be 100% covered)
min_p1_coverage: 90 # Percentage
min_overall_coverage: 80 # Percentage
# Advanced options
auto_load_knowledge: true # Load traceability, risk-governance, test-quality fragments
include_code_coverage: false # Integrate with code coverage reports (Istanbul, NYC)
check_assertions: true # Verify explicit assertions in tests
# PHASE 2: Gate Decision Variables (runs after traceability)
enable_gate_decision: true # Run gate decision after traceability (Phase 2)
# Gate target specification
gate_type: "story" # story | epic | release | hotfix
# story_id, epic_num, release_version inherited from trace context
# Gate decision configuration
# Workflow behavior
coverage_levels: "e2e,api,component,unit" # Which test levels to trace
gate_type: "story" # story | epic | release | hotfix - determines gate scope
decision_mode: "deterministic" # deterministic (rule-based) | manual (team decision)
allow_waivers: true # Allow business-approved waivers for FAIL → WAIVED
require_evidence: true # Require links to test results, reports, etc.
# Input sources for gate (auto-discovered from Phase 1 + external)
# story_file, test_design_file inherited from trace
nfr_file: "" # Path to nfr-assessment.md (optional, recommended for release gates)
test_results: "" # Path to test execution results (CI artifacts, reports)
# Decision criteria thresholds
min_p0_pass_rate: 100 # P0 tests must have 100% pass rate
min_p1_pass_rate: 95 # P1 tests threshold
min_overall_pass_rate: 90 # Overall test pass rate
# min_coverage already defined above (min_overall_coverage: 80)
max_critical_nfrs_fail: 0 # No critical NFRs can fail
max_security_issues: 0 # No unresolved security issues
# Risk tolerance
allow_p2_failures: true # P2 failures don't block release
allow_p3_failures: true # P3 failures don't block release
escalate_p1_failures: true # P1 failures require escalation approval
# Gate output configuration
gate_output_file: "{output_folder}/gate-decision-{gate_type}-{story_id}{epic_num}{release_version}.md"
append_to_history: true # Append to bmm-workflow-status.md gate history
notify_stakeholders: true # Generate notification message for team
# Advanced gate options
check_all_workflows_complete: true # Verify test-design, trace, nfr-assess complete
validate_evidence_freshness: true # Warn if assessments are >7 days old
require_sign_off: false # Require named approver for gate decision
# Output configuration
default_output_file: "{output_folder}/traceability-matrix.md"