diff --git a/src/modules/bmm/agents/tea.agent.yaml b/src/modules/bmm/agents/tea.agent.yaml
index 6dc17502..3fa3fca6 100644
--- a/src/modules/bmm/agents/tea.agent.yaml
+++ b/src/modules/bmm/agents/tea.agent.yaml
@@ -11,7 +11,7 @@ agent:
persona:
role: Master Test Architect
identity: Test architect specializing in CI/CD, automated frameworks, and scalable quality gates.
- communication_style: Data-driven advisor. Strong opinions, weakly held. Pragmatic. Makes random bird noises.
+ communication_style: Data-driven advisor. Strong opinions, weakly held. Pragmatic.
principles:
- Risk-based testing: depth scales with impact. Quality gates backed by data. Tests mirror usage. Cost = creation + execution + maintenance.
- Testing is feature work. Prioritize unit/integration over E2E. Flakiness is critical debt. ATDD: tests first, AI implements, suite validates.
@@ -44,7 +44,7 @@ agent:
- trigger: trace
workflow: "{project-root}/bmad/bmm/workflows/testarch/trace/workflow.yaml"
- description: Map requirements to tests Given-When-Then BDD format
+ description: Map requirements to tests (Phase 1) and make quality gate decision (Phase 2)
- trigger: nfr-assess
workflow: "{project-root}/bmad/bmm/workflows/testarch/nfr-assess/workflow.yaml"
@@ -54,10 +54,6 @@ agent:
workflow: "{project-root}/bmad/bmm/workflows/testarch/ci/workflow.yaml"
description: Scaffold CI/CD quality pipeline
- - trigger: gate
- workflow: "{project-root}/bmad/bmm/workflows/testarch/gate/workflow.yaml"
- description: Write/update quality gate decision assessment
-
- trigger: test-review
workflow: "{project-root}/bmad/bmm/workflows/testarch/test-review/workflow.yaml"
description: Review test quality using comprehensive knowledge base and best practices
diff --git a/src/modules/bmm/config.yaml b/src/modules/bmm/config.yaml
new file mode 100644
index 00000000..d2310766
--- /dev/null
+++ b/src/modules/bmm/config.yaml
@@ -0,0 +1,7 @@
+# Powered by BMAD™ Core
+name: bmm
+short-title: BMad Method Module
+author: Brian (BMad) Madison
+
+# TEA Agent Configuration
+tea_use_mcp_enhancements: true # Enable Playwright MCP capabilities (healing, exploratory, verification)
diff --git a/src/modules/bmm/testarch/README.md b/src/modules/bmm/testarch/README.md
index 1bc64897..a0356d01 100644
--- a/src/modules/bmm/testarch/README.md
+++ b/src/modules/bmm/testarch/README.md
@@ -49,7 +49,7 @@ TEA integrates across the entire BMad development lifecycle, providing quality a
│ ↓ │
│ TEA: *test-review (final audit, optional) │
│ ↓ │
-│ TEA: *gate ──→ PASS | CONCERNS | FAIL | WAIVED │
+│ TEA: *trace (Phase 2: Gate) ──→ PASS | CONCERNS | FAIL | WAIVED │
│ │
└──────────────────────────────────────────────────────────┘
```
@@ -81,19 +81,21 @@ Phase 3 (Solutioning) → [TEA validates architecture testability]
↓
Phase 4 (Implementation) → TEA: *atdd, *automate, *test-review, *trace (per story)
↓
-Epic/Release Gate → TEA: *nfr-assess, *gate (release decision)
+Epic/Release Gate → TEA: *nfr-assess, *trace Phase 2 (release decision)
```
-### Why TEA Needs 9 Workflows
+### Why TEA Needs 8 Workflows
**Standard agents**: 1-3 workflows per phase
-**TEA**: 9 workflows across 3+ phases
+**TEA**: 8 workflows across 3+ phases
| Phase | TEA Workflows | Frequency | Purpose |
| ----------- | -------------------------------------- | ---------------- | -------------------------------- |
| **Phase 2** | *framework, *ci, \*test-design | Once per project | Establish quality infrastructure |
| **Phase 4** | *atdd, *automate, *test-review, *trace | Per story/sprint | Continuous quality validation |
-| **Release** | *nfr-assess, *gate | Per epic/release | Go/no-go decision |
+| **Release** | *nfr-assess, *trace (Phase 2: gate) | Per epic/release | Go/no-go decision |
+
+**Note**: `*trace` is a two-phase workflow: Phase 1 (traceability) + Phase 2 (gate decision). This reduces cognitive load while maintaining natural workflow.
This complexity **requires specialized documentation** (this guide), **extensive knowledge base** (19+ fragments), and **unique architecture** (`testarch/` directory).
@@ -121,7 +123,7 @@ This complexity **requires specialized documentation** (this guide), **extensive
| Story Prep | - | Scrum Master `*create-story`, `*story-context` | Story markdown + context XML |
| Implementation | (Optional) Trigger `*atdd` before dev to supply failing tests + checklist | Implement story guided by ATDD checklist | Failing acceptance tests + implementation checklist |
| Post-Dev | Execute `*automate`, (Optional) `*test-review`, re-run `*trace` | Address recommendations, update code/tests | Regression specs, quality report, refreshed coverage matrix |
-| Release | (Optional) `*test-review` for final audit, Run `*gate` | Confirm Definition of Done, share release notes | Quality audit, Gate YAML + release summary (owners, waivers) |
+| Release | (Optional) `*test-review` for final audit, Run `*trace` (Phase 2) | Confirm Definition of Done, share release notes | Quality audit, Gate YAML + release summary (owners, waivers) |
Execution Notes
@@ -129,8 +131,8 @@ This complexity **requires specialized documentation** (this guide), **extensive
- Run `*framework` only once per repo or when modern harness support is missing.
- `*framework` followed by `*ci` establishes install + pipeline; `*test-design` then handles risk scoring, mitigations, and scenario planning in one pass.
- Use `*atdd` before coding when the team can adopt ATDD; share its checklist with the dev agent.
-- Post-implementation, keep `*trace` current, expand coverage with `*automate`, optionally review test quality with `*test-review`, and finish with `*gate`.
-- Use `*test-review` after `*atdd` to validate generated tests, after `*automate` to ensure regression quality, or before `*gate` for final audit.
+- Post-implementation, keep `*trace` current, expand coverage with `*automate`, optionally review test quality with `*test-review`. For release gate, run `*trace` with Phase 2 enabled to get deployment decision.
+- Use `*test-review` after `*atdd` to validate generated tests, after `*automate` to ensure regression quality, or before gate for final audit.
@@ -141,7 +143,7 @@ This complexity **requires specialized documentation** (this guide), **extensive
2. **Setup:** TEA checks harness via `*framework`, configures `*ci`, and runs `*test-design` to capture risk/coverage plans.
3. **Story Prep:** Scrum Master generates the story via `*create-story`; PO validates using `*assess-project-ready`.
4. **Implementation:** TEA optionally runs `*atdd`; Dev implements with guidance from failing tests and the plan.
-5. **Post-Dev and Release:** TEA runs `*automate`, optionally `*test-review` to audit test quality, re-runs `*trace`, and finishes with `*gate` to document the decision.
+5. **Post-Dev and Release:** TEA runs `*automate`, optionally `*test-review` to audit test quality, re-runs `*trace` with Phase 2 enabled to generate both traceability and gate decision.
@@ -155,7 +157,7 @@ This complexity **requires specialized documentation** (this guide), **extensive
| Story Prep | - | Scrum Master `*create-story` | Updated story markdown |
| Implementation | (Optional) Run `*atdd` before dev | Implement story, referencing checklist/tests | Failing acceptance tests + implementation checklist |
| Post-Dev | Apply `*automate`, (Optional) `*test-review`, re-run `*trace`, `*nfr-assess` if needed | Resolve gaps, update docs/tests | Regression specs, quality report, refreshed coverage matrix, NFR report |
-| Release | (Optional) `*test-review` for final audit, Run `*gate` | Product Owner `*assess-project-ready`, share release notes | Quality audit, Gate YAML + release summary |
+| Release | (Optional) `*test-review` for final audit, Run `*trace` (Phase 2) | Product Owner `*assess-project-ready`, share release notes | Quality audit, Gate YAML + release summary |
Execution Notes
@@ -163,7 +165,7 @@ This complexity **requires specialized documentation** (this guide), **extensive
- Lead with `*trace` so remediation plans target true coverage gaps. Ensure `*framework` and `*ci` are in place early in the engagement; if the brownfield lacks them, run those setup steps immediately after refreshing context.
- `*test-design` should highlight regression hotspots, mitigations, and P0 scenarios.
- Use `*atdd` when stories benefit from ATDD; otherwise proceed to implementation and rely on post-dev automation.
-- After development, expand coverage with `*automate`, optionally review test quality with `*test-review`, re-run `*trace`, and close with `*gate`. Run `*nfr-assess` now if non-functional risks weren't addressed earlier.
+- After development, expand coverage with `*automate`, optionally review test quality with `*test-review`, re-run `*trace` (Phase 2 for gate decision). Run `*nfr-assess` now if non-functional risks weren't addressed earlier.
- Use `*test-review` to validate existing brownfield tests or audit new tests before gate.
- Product Owner `*assess-project-ready` confirms the team has artifacts before handoff or release.
@@ -178,19 +180,19 @@ This complexity **requires specialized documentation** (this guide), **extensive
4. **Story Prep:** Scrum Master generates `stories/story-1.1.md` via `*create-story`, automatically pulling updated context.
5. **ATDD First:** TEA runs `*atdd`, producing failing Playwright specs under `tests/e2e/payments/` plus an implementation checklist.
6. **Implementation:** Dev pairs with the checklist/tests to deliver the story.
-7. **Post-Implementation:** TEA applies `*automate`, optionally `*test-review` to audit test quality, re-runs `*trace`, performs `*nfr-assess` to validate SLAs, and closes with `*gate` marking PASS with follow-ups.
+7. **Post-Implementation:** TEA applies `*automate`, optionally `*test-review` to audit test quality, re-runs `*trace` with Phase 2 enabled, performs `*nfr-assess` to validate SLAs. The `*trace` Phase 2 output marks PASS with follow-ups.
### Enterprise / Compliance Program (Level 4)
-| Phase | Test Architect | Dev / Team | Outputs |
-| ------------------- | ---------------------------------------------------------------- | ---------------------------------------------- | ---------------------------------------------------------- |
-| Strategic Planning | - | Analyst/PM/Architect standard workflows | Enterprise-grade PRD, epics, architecture |
-| Quality Planning | Run `*framework`, `*test-design`, `*nfr-assess` | Review guidance, align compliance requirements | Harness scaffold, risk + coverage plan, NFR documentation |
-| Pipeline Enablement | Configure `*ci` | Coordinate secrets, pipeline approvals | `.github/workflows/test.yml`, helper scripts |
-| Execution | Enforce `*atdd`, `*automate`, `*test-review`, `*trace` per story | Implement stories, resolve TEA findings | Tests, fixtures, quality reports, coverage matrices |
-| Release | (Optional) `*test-review` for final audit, Run `*gate` | Capture sign-offs, archive artifacts | Quality audit, updated assessments, gate YAML, audit trail |
+| Phase | Test Architect | Dev / Team | Outputs |
+| ------------------- | ----------------------------------------------------------------- | ---------------------------------------------- | ---------------------------------------------------------- |
+| Strategic Planning | - | Analyst/PM/Architect standard workflows | Enterprise-grade PRD, epics, architecture |
+| Quality Planning | Run `*framework`, `*test-design`, `*nfr-assess` | Review guidance, align compliance requirements | Harness scaffold, risk + coverage plan, NFR documentation |
+| Pipeline Enablement | Configure `*ci` | Coordinate secrets, pipeline approvals | `.github/workflows/test.yml`, helper scripts |
+| Execution | Enforce `*atdd`, `*automate`, `*test-review`, `*trace` per story | Implement stories, resolve TEA findings | Tests, fixtures, quality reports, coverage matrices |
+| Release | (Optional) `*test-review` for final audit, Run `*trace` (Phase 2) | Capture sign-offs, archive artifacts | Quality audit, updated assessments, gate YAML, audit trail |
Execution Notes
@@ -198,7 +200,7 @@ This complexity **requires specialized documentation** (this guide), **extensive
- Use `*atdd` for every story when feasible so acceptance tests lead implementation in regulated environments.
- `*ci` scaffolds selective testing scripts, burn-in jobs, caching, and notifications for long-running suites.
- Enforce `*test-review` per story or sprint to maintain quality standards and ensure compliance with testing best practices.
-- Prior to release, rerun coverage (`*trace`, `*automate`), perform final quality audit with `*test-review`, and formalize the decision in `*gate`; store everything for audits. Call `*nfr-assess` here if compliance/performance requirements weren't captured during planning.
+- Prior to release, rerun coverage (`*trace`, `*automate`), perform final quality audit with `*test-review`, and formalize the decision with `*trace` Phase 2 (gate decision); store everything for audits. Call `*nfr-assess` here if compliance/performance requirements weren't captured during planning.
@@ -209,36 +211,78 @@ This complexity **requires specialized documentation** (this guide), **extensive
2. **Quality Planning:** TEA runs `*framework`, `*test-design`, and `*nfr-assess` to establish mitigations, coverage, and NFR targets.
3. **Pipeline Setup:** TEA configures CI via `*ci` with selective execution scripts.
4. **Execution:** For each story, TEA enforces `*atdd`, `*automate`, `*test-review`, and `*trace`; Dev teams iterate on the findings.
-5. **Release:** TEA re-checks coverage, performs final quality audit with `*test-review`, and logs the final gate decision via `*gate`, archiving artifacts for compliance.
+5. **Release:** TEA re-checks coverage, performs final quality audit with `*test-review`, and logs the final gate decision via `*trace` Phase 2, archiving artifacts for compliance.
## Command Catalog
-| Command | Workflow README | Primary Outputs | Notes |
-| -------------- | ------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------ |
-| `*framework` | [📖](../workflows/testarch/framework/README.md) | Playwright/Cypress scaffold, `.env.example`, `.nvmrc`, sample specs | Use when no production-ready harness exists |
-| `*ci` | [📖](../workflows/testarch/ci/README.md) | CI workflow, selective test scripts, secrets checklist | Platform-aware (GitHub Actions default) |
-| `*test-design` | [📖](../workflows/testarch/test-design/README.md) | Combined risk assessment, mitigation plan, and coverage strategy | Handles risk scoring and test design in one pass |
-| `*atdd` | [📖](../workflows/testarch/atdd/README.md) | Failing acceptance tests + implementation checklist | Requires approved story + harness |
-| `*automate` | [📖](../workflows/testarch/automate/README.md) | Prioritized specs, fixtures, README/script updates, DoD summary | Avoid duplicate coverage (see priority matrix) |
-| `*trace` | [📖](../workflows/testarch/trace/README.md) | Coverage matrix, recommendations, gate snippet | Requires access to story/tests repositories |
-| `*nfr-assess` | [📖](../workflows/testarch/nfr-assess/README.md) | NFR assessment report with actions | Focus on security/performance/reliability |
-| `*gate` | [📖](../workflows/testarch/gate/README.md) | Gate YAML + summary (PASS/CONCERNS/FAIL/WAIVED) | Deterministic decision rules + rationale |
-| `*test-review` | [📖](../workflows/testarch/test-review/README.md) | Test quality review report with 0-100 score, violations, fixes | Reviews tests against knowledge base patterns |
-
-**📖** = Click to view detailed workflow documentation
-
-Command Guidance and Context Loading
+Optional Playwright MCP Enhancements
-- Each task now carries its own preflight/flow/deliverable guidance inline.
-- `tea-index.csv` maps workflow needs to knowledge fragments; keep tags accurate as you add guidance.
-- Consider future modularization into orchestrated workflows if additional automation is needed.
-- Update the fragment markdown files alongside workflow edits so guidance and outputs stay in sync.
+**Two Playwright MCP servers** (actively maintained, continuously updated):
+
+- `playwright` - Browser automation (`npx @playwright/mcp@latest`)
+- `playwright-test` - Test runner with failure analysis (`npx playwright run-test-mcp-server`)
+
+**How MCP Enhances TEA Workflows**:
+
+MCP provides additional capabilities on top of TEA's default AI-based approach:
+
+1. `*test-design`:
+ - Default: Analysis + documentation
+ - **+ MCP**: Interactive UI discovery with `browser_navigate`, `browser_click`, `browser_snapshot`, behavior observation
+
+ Benefit:Discover actual functionality, edge cases, undocumented features
+
+2. `*atdd`, `*automate`:
+ - Default: Infers selectors and interactions from requirements and knowledge fragments
+ - **+ MCP**: Generates tests **then** verifies with `generator_setup_page`, `browser_*` tools, validates against live app
+
+ Benefit: Accurate selectors from real DOM, verified behavior, refined test code
+
+3. `*automate`:
+ - Default: Pattern-based fixes from error messages + knowledge fragments
+ - **+ MCP**: Pattern fixes **enhanced with** `browser_snapshot`, `browser_console_messages`, `browser_network_requests`, `browser_generate_locator`
+
+ Benefit: Visual failure context, live DOM inspection, root cause discovery
+
+**Config example**:
+
+```json
+{
+ "mcpServers": {
+ "playwright": {
+ "command": "npx",
+ "args": ["@playwright/mcp@latest"]
+ },
+ "playwright-test": {
+ "command": "npx",
+ "args": ["playwright", "run-test-mcp-server"]
+ }
+ }
+}
+```
+
+**To disable**: Set `tea_use_mcp_enhancements: false` in `bmad/bmm/config.yaml` OR remove MCPs from IDE config.
+
+
+| Command | Workflow README | Primary Outputs | Notes | With Playwright MCP Enhancements |
+| -------------- | ------------------------------------------------- | --------------------------------------------------------------------------------------------- | ---------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
+| `*framework` | [📖](../workflows/testarch/framework/README.md) | Playwright/Cypress scaffold, `.env.example`, `.nvmrc`, sample specs | Use when no production-ready harness exists | - |
+| `*ci` | [📖](../workflows/testarch/ci/README.md) | CI workflow, selective test scripts, secrets checklist | Platform-aware (GitHub Actions default) | - |
+| `*test-design` | [📖](../workflows/testarch/test-design/README.md) | Combined risk assessment, mitigation plan, and coverage strategy | Risk scoring + optional exploratory mode | **+ Exploratory**: Interactive UI discovery with browser automation (uncover actual functionality) |
+| `*atdd` | [📖](../workflows/testarch/atdd/README.md) | Failing acceptance tests + implementation checklist | TDD red phase + optional recording mode | **+ Recording**: AI generation verified with live browser (accurate selectors from real DOM) |
+| `*automate` | [📖](../workflows/testarch/automate/README.md) | Prioritized specs, fixtures, README/script updates, DoD summary | Optional healing/recording, avoid duplicate coverage | **+ Healing**: Pattern fixes enhanced with visual debugging + **+ Recording**: AI verified with live browser |
+| `*test-review` | [📖](../workflows/testarch/test-review/README.md) | Test quality review report with 0-100 score, violations, fixes | Reviews tests against knowledge base patterns | - |
+| `*nfr-assess` | [📖](../workflows/testarch/nfr-assess/README.md) | NFR assessment report with actions | Focus on security/performance/reliability | - |
+| `*trace` | [📖](../workflows/testarch/trace/README.md) | Phase 1: Coverage matrix, recommendations. Phase 2: Gate decision (PASS/CONCERNS/FAIL/WAIVED) | Two-phase workflow: traceability + gate decision | - |
+
+**📖** = Click to view detailed workflow documentation
+
## Why TEA is Architecturally Different
TEA is the only BMM agent with its own top-level module directory (`bmm/testarch/`). This intentional design pattern reflects TEA's unique requirements:
@@ -255,13 +299,13 @@ src/modules/bmm/
├── workflows/
│ └── testarch/ # TEA workflows (standard location)
└── testarch/ # Knowledge base (UNIQUE!)
- ├── knowledge/ # 19+ reusable test pattern fragments
- ├── tea-index.csv # Centralized knowledge lookup
+ ├── knowledge/ # 21 production-ready test pattern fragments
+ ├── tea-index.csv # Centralized knowledge lookup (21 fragments indexed)
└── README.md # This guide
```
### Why TEA Gets Special Treatment
-TEA uniquely requires **extensive domain knowledge** (19+ fragments: test patterns, CI/CD, fixtures, quality practices), a **centralized reference system** (`tea-index.csv` for on-demand fragment loading), and **cross-cutting concerns** (domain-specific patterns vs project-specific artifacts like PRDs/stories). Other BMM agents don't require this architecture.
+TEA uniquely requires **extensive domain knowledge** (21 fragments, 12,821 lines: test patterns, CI/CD, fixtures, quality practices, healing strategies), a **centralized reference system** (`tea-index.csv` for on-demand fragment loading), **cross-cutting concerns** (domain-specific patterns vs project-specific artifacts like PRDs/stories), and **optional MCP integration** (healing, exploratory, verification modes). Other BMM agents don't require this architecture.
diff --git a/src/modules/bmm/testarch/knowledge/ci-burn-in.md b/src/modules/bmm/testarch/knowledge/ci-burn-in.md
index cfb8cadc..65d40695 100644
--- a/src/modules/bmm/testarch/knowledge/ci-burn-in.md
+++ b/src/modules/bmm/testarch/knowledge/ci-burn-in.md
@@ -1,9 +1,675 @@
# CI Pipeline and Burn-In Strategy
-- Stage jobs: install/caching once, run `test-changed` for quick feedback, then shard full suites with `fail-fast: false` so evidence isn’t lost.
-- Re-run changed specs 5–10x (burn-in) before merging to flush flakes; fail the pipeline on the first inconsistent run.
-- Upload artifacts on failure (videos, traces, HAR) and keep retry counts explicit—hidden retries hide instability.
-- Use `wait-on` for app startup, enforce time budgets (<10 min per job), and document required secrets alongside workflows.
-- Mirror CI scripts locally (`npm run test:ci`, `scripts/burn-in-changed.sh`) so devs reproduce pipeline behaviour exactly.
+## Principle
-_Source: Murat CI/CD strategy blog, Playwright/Cypress workflow examples._
+CI pipelines must execute tests reliably, quickly, and provide clear feedback. Burn-in testing (running changed tests multiple times) flushes out flakiness before merge. Stage jobs strategically: install/cache once, run changed specs first for fast feedback, then shard full suites with fail-fast disabled to preserve evidence.
+
+## Rationale
+
+CI is the quality gate for production. A poorly configured pipeline either wastes developer time (slow feedback, false positives) or ships broken code (false negatives, insufficient coverage). Burn-in testing ensures reliability by stress-testing changed code, while parallel execution and intelligent test selection optimize speed without sacrificing thoroughness.
+
+## Pattern Examples
+
+### Example 1: GitHub Actions Workflow with Parallel Execution
+
+**Context**: Production-ready CI/CD pipeline for E2E tests with caching, parallelization, and burn-in testing.
+
+**Implementation**:
+
+```yaml
+# .github/workflows/e2e-tests.yml
+name: E2E Tests
+on:
+ pull_request:
+ push:
+ branches: [main, develop]
+
+env:
+ NODE_VERSION_FILE: '.nvmrc'
+ CACHE_KEY: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
+
+jobs:
+ install-dependencies:
+ name: Install & Cache Dependencies
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version-file: ${{ env.NODE_VERSION_FILE }}
+ cache: 'npm'
+
+ - name: Cache node modules
+ uses: actions/cache@v4
+ id: npm-cache
+ with:
+ path: |
+ ~/.npm
+ node_modules
+ ~/.cache/Cypress
+ ~/.cache/ms-playwright
+ key: ${{ env.CACHE_KEY }}
+ restore-keys: |
+ ${{ runner.os }}-node-
+
+ - name: Install dependencies
+ if: steps.npm-cache.outputs.cache-hit != 'true'
+ run: npm ci --prefer-offline --no-audit
+
+ - name: Install Playwright browsers
+ if: steps.npm-cache.outputs.cache-hit != 'true'
+ run: npx playwright install --with-deps chromium
+
+ test-changed-specs:
+ name: Test Changed Specs First (Burn-In)
+ needs: install-dependencies
+ runs-on: ubuntu-latest
+ timeout-minutes: 15
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # Full history for accurate diff
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version-file: ${{ env.NODE_VERSION_FILE }}
+ cache: 'npm'
+
+ - name: Restore dependencies
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.npm
+ node_modules
+ ~/.cache/ms-playwright
+ key: ${{ env.CACHE_KEY }}
+
+ - name: Detect changed test files
+ id: changed-tests
+ run: |
+ CHANGED_SPECS=$(git diff --name-only origin/main...HEAD | grep -E '\.(spec|test)\.(ts|js|tsx|jsx)$' || echo "")
+ echo "changed_specs=${CHANGED_SPECS}" >> $GITHUB_OUTPUT
+ echo "Changed specs: ${CHANGED_SPECS}"
+
+ - name: Run burn-in on changed specs (10 iterations)
+ if: steps.changed-tests.outputs.changed_specs != ''
+ run: |
+ SPECS="${{ steps.changed-tests.outputs.changed_specs }}"
+ echo "Running burn-in: 10 iterations on changed specs"
+ for i in {1..10}; do
+ echo "Burn-in iteration $i/10"
+ npm run test -- $SPECS || {
+ echo "❌ Burn-in failed on iteration $i"
+ exit 1
+ }
+ done
+ echo "✅ Burn-in passed - 10/10 successful runs"
+
+ - name: Upload artifacts on failure
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: burn-in-failure-artifacts
+ path: |
+ test-results/
+ playwright-report/
+ screenshots/
+ retention-days: 7
+
+ test-e2e-sharded:
+ name: E2E Tests (Shard ${{ matrix.shard }}/${{ strategy.job-total }})
+ needs: [install-dependencies, test-changed-specs]
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+ strategy:
+ fail-fast: false # Run all shards even if one fails
+ matrix:
+ shard: [1, 2, 3, 4]
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version-file: ${{ env.NODE_VERSION_FILE }}
+ cache: 'npm'
+
+ - name: Restore dependencies
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.npm
+ node_modules
+ ~/.cache/ms-playwright
+ key: ${{ env.CACHE_KEY }}
+
+ - name: Run E2E tests (shard ${{ matrix.shard }})
+ run: npm run test:e2e -- --shard=${{ matrix.shard }}/4
+ env:
+ TEST_ENV: staging
+ CI: true
+
+ - name: Upload test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-results-shard-${{ matrix.shard }}
+ path: |
+ test-results/
+ playwright-report/
+ retention-days: 30
+
+ - name: Upload JUnit report
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: junit-results-shard-${{ matrix.shard }}
+ path: test-results/junit.xml
+ retention-days: 30
+
+ merge-test-results:
+ name: Merge Test Results & Generate Report
+ needs: test-e2e-sharded
+ runs-on: ubuntu-latest
+ if: always()
+ steps:
+ - name: Download all shard results
+ uses: actions/download-artifact@v4
+ with:
+ pattern: test-results-shard-*
+ path: all-results/
+
+ - name: Merge HTML reports
+ run: |
+ npx playwright merge-reports --reporter=html all-results/
+ echo "Merged report available in playwright-report/"
+
+ - name: Upload merged report
+ uses: actions/upload-artifact@v4
+ with:
+ name: merged-playwright-report
+ path: playwright-report/
+ retention-days: 30
+
+ - name: Comment PR with results
+ if: github.event_name == 'pull_request'
+ uses: daun/playwright-report-comment@v3
+ with:
+ report-path: playwright-report/
+```
+
+**Key Points**:
+
+- **Install once, reuse everywhere**: Dependencies cached across all jobs
+- **Burn-in first**: Changed specs run 10x before full suite
+- **Fail-fast disabled**: All shards run to completion for full evidence
+- **Parallel execution**: 4 shards cut execution time by ~75%
+- **Artifact retention**: 30 days for reports, 7 days for failure debugging
+
+---
+
+### Example 2: Burn-In Loop Pattern (Standalone Script)
+
+**Context**: Reusable bash script for burn-in testing changed specs locally or in CI.
+
+**Implementation**:
+
+```bash
+#!/bin/bash
+# scripts/burn-in-changed.sh
+# Usage: ./scripts/burn-in-changed.sh [iterations] [base-branch]
+
+set -e # Exit on error
+
+# Configuration
+ITERATIONS=${1:-10}
+BASE_BRANCH=${2:-main}
+SPEC_PATTERN='\.(spec|test)\.(ts|js|tsx|jsx)$'
+
+echo "🔥 Burn-In Test Runner"
+echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+echo "Iterations: $ITERATIONS"
+echo "Base branch: $BASE_BRANCH"
+echo ""
+
+# Detect changed test files
+echo "📋 Detecting changed test files..."
+CHANGED_SPECS=$(git diff --name-only $BASE_BRANCH...HEAD | grep -E "$SPEC_PATTERN" || echo "")
+
+if [ -z "$CHANGED_SPECS" ]; then
+ echo "✅ No test files changed. Skipping burn-in."
+ exit 0
+fi
+
+echo "Changed test files:"
+echo "$CHANGED_SPECS" | sed 's/^/ - /'
+echo ""
+
+# Count specs
+SPEC_COUNT=$(echo "$CHANGED_SPECS" | wc -l | xargs)
+echo "Running burn-in on $SPEC_COUNT test file(s)..."
+echo ""
+
+# Burn-in loop
+FAILURES=()
+for i in $(seq 1 $ITERATIONS); do
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ echo "🔄 Iteration $i/$ITERATIONS"
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+
+ # Run tests with explicit file list
+ if npm run test -- $CHANGED_SPECS 2>&1 | tee "burn-in-log-$i.txt"; then
+ echo "✅ Iteration $i passed"
+ else
+ echo "❌ Iteration $i failed"
+ FAILURES+=($i)
+
+ # Save failure artifacts
+ mkdir -p burn-in-failures/iteration-$i
+ cp -r test-results/ burn-in-failures/iteration-$i/ 2>/dev/null || true
+ cp -r screenshots/ burn-in-failures/iteration-$i/ 2>/dev/null || true
+
+ echo ""
+ echo "🛑 BURN-IN FAILED on iteration $i"
+ echo "Failure artifacts saved to: burn-in-failures/iteration-$i/"
+ echo "Logs saved to: burn-in-log-$i.txt"
+ echo ""
+ exit 1
+ fi
+
+ echo ""
+done
+
+# Success summary
+echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+echo "🎉 BURN-IN PASSED"
+echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+echo "All $ITERATIONS iterations passed for $SPEC_COUNT test file(s)"
+echo "Changed specs are stable and ready to merge."
+echo ""
+
+# Cleanup logs
+rm -f burn-in-log-*.txt
+
+exit 0
+```
+
+**Usage**:
+
+```bash
+# Run locally with default settings (10 iterations, compare to main)
+./scripts/burn-in-changed.sh
+
+# Custom iterations and base branch
+./scripts/burn-in-changed.sh 20 develop
+
+# Add to package.json
+{
+ "scripts": {
+ "test:burn-in": "bash scripts/burn-in-changed.sh",
+ "test:burn-in:strict": "bash scripts/burn-in-changed.sh 20"
+ }
+}
+```
+
+**Key Points**:
+
+- **Exit on first failure**: Flaky tests caught immediately
+- **Failure artifacts**: Saved per-iteration for debugging
+- **Flexible configuration**: Iterations and base branch customizable
+- **CI/local parity**: Same script runs in both environments
+- **Clear output**: Visual feedback on progress and results
+
+---
+
+### Example 3: Shard Orchestration with Result Aggregation
+
+**Context**: Advanced sharding strategy for large test suites with intelligent result merging.
+
+**Implementation**:
+
+```javascript
+// scripts/run-sharded-tests.js
+const { spawn } = require('child_process');
+const fs = require('fs');
+const path = require('path');
+
+/**
+ * Run tests across multiple shards and aggregate results
+ * Usage: node scripts/run-sharded-tests.js --shards=4 --env=staging
+ */
+
+const SHARD_COUNT = parseInt(process.env.SHARD_COUNT || '4');
+const TEST_ENV = process.env.TEST_ENV || 'local';
+const RESULTS_DIR = path.join(__dirname, '../test-results');
+
+console.log(`🚀 Running tests across ${SHARD_COUNT} shards`);
+console.log(`Environment: ${TEST_ENV}`);
+console.log('━'.repeat(50));
+
+// Ensure results directory exists
+if (!fs.existsSync(RESULTS_DIR)) {
+ fs.mkdirSync(RESULTS_DIR, { recursive: true });
+}
+
+/**
+ * Run a single shard
+ */
+function runShard(shardIndex) {
+ return new Promise((resolve, reject) => {
+ const shardId = `${shardIndex}/${SHARD_COUNT}`;
+ console.log(`\n📦 Starting shard ${shardId}...`);
+
+ const child = spawn('npx', ['playwright', 'test', `--shard=${shardId}`, '--reporter=json'], {
+ env: { ...process.env, TEST_ENV, SHARD_INDEX: shardIndex },
+ stdio: 'pipe',
+ });
+
+ let stdout = '';
+ let stderr = '';
+
+ child.stdout.on('data', (data) => {
+ stdout += data.toString();
+ process.stdout.write(data);
+ });
+
+ child.stderr.on('data', (data) => {
+ stderr += data.toString();
+ process.stderr.write(data);
+ });
+
+ child.on('close', (code) => {
+ // Save shard results
+ const resultFile = path.join(RESULTS_DIR, `shard-${shardIndex}.json`);
+ try {
+ const result = JSON.parse(stdout);
+ fs.writeFileSync(resultFile, JSON.stringify(result, null, 2));
+ console.log(`✅ Shard ${shardId} completed (exit code: ${code})`);
+ resolve({ shardIndex, code, result });
+ } catch (error) {
+ console.error(`❌ Shard ${shardId} failed to parse results:`, error.message);
+ reject({ shardIndex, code, error });
+ }
+ });
+
+ child.on('error', (error) => {
+ console.error(`❌ Shard ${shardId} process error:`, error.message);
+ reject({ shardIndex, error });
+ });
+ });
+}
+
+/**
+ * Aggregate results from all shards
+ */
+function aggregateResults() {
+ console.log('\n📊 Aggregating results from all shards...');
+
+ const shardResults = [];
+ let totalTests = 0;
+ let totalPassed = 0;
+ let totalFailed = 0;
+ let totalSkipped = 0;
+ let totalFlaky = 0;
+
+ for (let i = 1; i <= SHARD_COUNT; i++) {
+ const resultFile = path.join(RESULTS_DIR, `shard-${i}.json`);
+ if (fs.existsSync(resultFile)) {
+ const result = JSON.parse(fs.readFileSync(resultFile, 'utf8'));
+ shardResults.push(result);
+
+ // Aggregate stats
+ totalTests += result.stats?.expected || 0;
+ totalPassed += result.stats?.expected || 0;
+ totalFailed += result.stats?.unexpected || 0;
+ totalSkipped += result.stats?.skipped || 0;
+ totalFlaky += result.stats?.flaky || 0;
+ }
+ }
+
+ const summary = {
+ totalShards: SHARD_COUNT,
+ environment: TEST_ENV,
+ totalTests,
+ passed: totalPassed,
+ failed: totalFailed,
+ skipped: totalSkipped,
+ flaky: totalFlaky,
+ duration: shardResults.reduce((acc, r) => acc + (r.duration || 0), 0),
+ timestamp: new Date().toISOString(),
+ };
+
+ // Save aggregated summary
+ fs.writeFileSync(path.join(RESULTS_DIR, 'summary.json'), JSON.stringify(summary, null, 2));
+
+ console.log('\n━'.repeat(50));
+ console.log('📈 Test Results Summary');
+ console.log('━'.repeat(50));
+ console.log(`Total tests: ${totalTests}`);
+ console.log(`✅ Passed: ${totalPassed}`);
+ console.log(`❌ Failed: ${totalFailed}`);
+ console.log(`⏭️ Skipped: ${totalSkipped}`);
+ console.log(`⚠️ Flaky: ${totalFlaky}`);
+ console.log(`⏱️ Duration: ${(summary.duration / 1000).toFixed(2)}s`);
+ console.log('━'.repeat(50));
+
+ return summary;
+}
+
+/**
+ * Main execution
+ */
+async function main() {
+ const startTime = Date.now();
+ const shardPromises = [];
+
+ // Run all shards in parallel
+ for (let i = 1; i <= SHARD_COUNT; i++) {
+ shardPromises.push(runShard(i));
+ }
+
+ try {
+ await Promise.allSettled(shardPromises);
+ } catch (error) {
+ console.error('❌ One or more shards failed:', error);
+ }
+
+ // Aggregate results
+ const summary = aggregateResults();
+
+ const totalTime = ((Date.now() - startTime) / 1000).toFixed(2);
+ console.log(`\n⏱️ Total execution time: ${totalTime}s`);
+
+ // Exit with failure if any tests failed
+ if (summary.failed > 0) {
+ console.error('\n❌ Test suite failed');
+ process.exit(1);
+ }
+
+ console.log('\n✅ All tests passed');
+ process.exit(0);
+}
+
+main().catch((error) => {
+ console.error('Fatal error:', error);
+ process.exit(1);
+});
+```
+
+**package.json integration**:
+
+```json
+{
+ "scripts": {
+ "test:sharded": "node scripts/run-sharded-tests.js",
+ "test:sharded:ci": "SHARD_COUNT=8 TEST_ENV=staging node scripts/run-sharded-tests.js"
+ }
+}
+```
+
+**Key Points**:
+
+- **Parallel shard execution**: All shards run simultaneously
+- **Result aggregation**: Unified summary across shards
+- **Failure detection**: Exit code reflects overall test status
+- **Artifact preservation**: Individual shard results saved for debugging
+- **CI/local compatibility**: Same script works in both environments
+
+---
+
+### Example 4: Selective Test Execution (Changed Files + Tags)
+
+**Context**: Optimize CI by running only relevant tests based on file changes and tags.
+
+**Implementation**:
+
+```bash
+#!/bin/bash
+# scripts/selective-test-runner.sh
+# Intelligent test selection based on changed files and test tags
+
+set -e
+
+BASE_BRANCH=${BASE_BRANCH:-main}
+TEST_ENV=${TEST_ENV:-local}
+
+echo "🎯 Selective Test Runner"
+echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+echo "Base branch: $BASE_BRANCH"
+echo "Environment: $TEST_ENV"
+echo ""
+
+# Detect changed files (all types, not just tests)
+CHANGED_FILES=$(git diff --name-only $BASE_BRANCH...HEAD)
+
+if [ -z "$CHANGED_FILES" ]; then
+ echo "✅ No files changed. Skipping tests."
+ exit 0
+fi
+
+echo "Changed files:"
+echo "$CHANGED_FILES" | sed 's/^/ - /'
+echo ""
+
+# Determine test strategy based on changes
+run_smoke_only=false
+run_all_tests=false
+affected_specs=""
+
+# Critical files = run all tests
+if echo "$CHANGED_FILES" | grep -qE '(package\.json|package-lock\.json|playwright\.config|cypress\.config|\.github/workflows)'; then
+ echo "⚠️ Critical configuration files changed. Running ALL tests."
+ run_all_tests=true
+
+# Auth/security changes = run all auth + smoke tests
+elif echo "$CHANGED_FILES" | grep -qE '(auth|login|signup|security)'; then
+ echo "🔒 Auth/security files changed. Running auth + smoke tests."
+ npm run test -- --grep "@auth|@smoke"
+ exit $?
+
+# API changes = run integration + smoke tests
+elif echo "$CHANGED_FILES" | grep -qE '(api|service|controller)'; then
+ echo "🔌 API files changed. Running integration + smoke tests."
+ npm run test -- --grep "@integration|@smoke"
+ exit $?
+
+# UI component changes = run related component tests
+elif echo "$CHANGED_FILES" | grep -qE '\.(tsx|jsx|vue)$'; then
+ echo "🎨 UI components changed. Running component + smoke tests."
+
+ # Extract component names and find related tests
+ components=$(echo "$CHANGED_FILES" | grep -E '\.(tsx|jsx|vue)$' | xargs -I {} basename {} | sed 's/\.[^.]*$//')
+ for component in $components; do
+ # Find tests matching component name
+ affected_specs+=$(find tests -name "*${component}*" -type f) || true
+ done
+
+ if [ -n "$affected_specs" ]; then
+ echo "Running tests for: $affected_specs"
+ npm run test -- $affected_specs --grep "@smoke"
+ else
+ echo "No specific tests found. Running smoke tests only."
+ npm run test -- --grep "@smoke"
+ fi
+ exit $?
+
+# Documentation/config only = run smoke tests
+elif echo "$CHANGED_FILES" | grep -qE '\.(md|txt|json|yml|yaml)$'; then
+ echo "📝 Documentation/config files changed. Running smoke tests only."
+ run_smoke_only=true
+else
+ echo "⚙️ Other files changed. Running smoke tests."
+ run_smoke_only=true
+fi
+
+# Execute selected strategy
+if [ "$run_all_tests" = true ]; then
+ echo ""
+ echo "Running full test suite..."
+ npm run test
+elif [ "$run_smoke_only" = true ]; then
+ echo ""
+ echo "Running smoke tests..."
+ npm run test -- --grep "@smoke"
+fi
+```
+
+**Usage in GitHub Actions**:
+
+```yaml
+# .github/workflows/selective-tests.yml
+name: Selective Tests
+on: pull_request
+
+jobs:
+ selective-tests:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Run selective tests
+ run: bash scripts/selective-test-runner.sh
+ env:
+ BASE_BRANCH: ${{ github.base_ref }}
+ TEST_ENV: staging
+```
+
+**Key Points**:
+
+- **Intelligent routing**: Tests selected based on changed file types
+- **Tag-based filtering**: Use @smoke, @auth, @integration tags
+- **Fast feedback**: Only relevant tests run on most PRs
+- **Safety net**: Critical changes trigger full suite
+- **Component mapping**: UI changes run related component tests
+
+---
+
+## CI Configuration Checklist
+
+Before deploying your CI pipeline, verify:
+
+- [ ] **Caching strategy**: node_modules, npm cache, browser binaries cached
+- [ ] **Timeout budgets**: Each job has reasonable timeout (10-30 min)
+- [ ] **Artifact retention**: 30 days for reports, 7 days for failure artifacts
+- [ ] **Parallelization**: Matrix strategy uses fail-fast: false
+- [ ] **Burn-in enabled**: Changed specs run 5-10x before merge
+- [ ] **wait-on app startup**: CI waits for app (wait-on: 'http://localhost:3000')
+- [ ] **Secrets documented**: README lists required secrets (API keys, tokens)
+- [ ] **Local parity**: CI scripts runnable locally (npm run test:ci)
+
+## Integration Points
+
+- Used in workflows: `*ci` (CI/CD pipeline setup)
+- Related fragments: `selective-testing.md`, `playwright-config.md`, `test-quality.md`
+- CI tools: GitHub Actions, GitLab CI, CircleCI, Jenkins
+
+_Source: Murat CI/CD strategy blog, Playwright/Cypress workflow examples, SEON production pipelines_
diff --git a/src/modules/bmm/testarch/knowledge/component-tdd.md b/src/modules/bmm/testarch/knowledge/component-tdd.md
index d73af37b..d14ba8f3 100644
--- a/src/modules/bmm/testarch/knowledge/component-tdd.md
+++ b/src/modules/bmm/testarch/knowledge/component-tdd.md
@@ -1,9 +1,486 @@
# Component Test-Driven Development Loop
-- Start every UI change with a failing component spec (`cy.mount` or RTL `render`); ship only after red → green → refactor passes.
-- Recreate providers/stores per spec to prevent state bleed and keep parallel runs deterministic.
-- Use factories to exercise prop/state permutations; cover accessibility by asserting against roles, labels, and keyboard flows.
-- Keep component specs under ~100 lines: split by intent (rendering, state transitions, error messaging) to preserve clarity.
-- Pair component tests with visual debugging (Cypress runner, Storybook, Playwright trace viewer) to accelerate diagnosis.
+## Principle
-_Source: CCTDD repository, Murat component testing talks._
+Start every UI change with a failing component test (`cy.mount`, Playwright component test, or RTL `render`). Follow the Red-Green-Refactor cycle: write a failing test (red), make it pass with minimal code (green), then improve the implementation (refactor). Ship only after the cycle completes. Keep component tests under 100 lines, isolated with fresh providers per test, and validate accessibility alongside functionality.
+
+## Rationale
+
+Component TDD provides immediate feedback during development. Failing tests (red) clarify requirements before writing code. Minimal implementations (green) prevent over-engineering. Refactoring with passing tests ensures changes don't break functionality. Isolated tests with fresh providers prevent state bleed in parallel runs. Accessibility assertions catch usability issues early. Visual debugging (Cypress runner, Storybook, Playwright trace viewer) accelerates diagnosis when tests fail.
+
+## Pattern Examples
+
+### Example 1: Red-Green-Refactor Loop
+
+**Context**: When building a new component, start with a failing test that describes the desired behavior. Implement just enough to pass, then refactor for quality.
+
+**Implementation**:
+
+```typescript
+// Step 1: RED - Write failing test
+// Button.cy.tsx (Cypress Component Test)
+import { Button } from './Button';
+
+describe('Button Component', () => {
+ it('should render with label', () => {
+ cy.mount();
+ cy.contains('Click Me').should('be.visible');
+ });
+
+ it('should call onClick when clicked', () => {
+ const onClickSpy = cy.stub().as('onClick');
+ cy.mount();
+
+ cy.get('button').click();
+ cy.get('@onClick').should('have.been.calledOnce');
+ });
+});
+
+// Run test: FAILS - Button component doesn't exist yet
+// Error: "Cannot find module './Button'"
+
+// Step 2: GREEN - Minimal implementation
+// Button.tsx
+type ButtonProps = {
+ label: string;
+ onClick?: () => void;
+};
+
+export const Button = ({ label, onClick }: ButtonProps) => {
+ return ;
+};
+
+// Run test: PASSES - Component renders and handles clicks
+
+// Step 3: REFACTOR - Improve implementation
+// Add disabled state, loading state, variants
+type ButtonProps = {
+ label: string;
+ onClick?: () => void;
+ disabled?: boolean;
+ loading?: boolean;
+ variant?: 'primary' | 'secondary' | 'danger';
+};
+
+export const Button = ({
+ label,
+ onClick,
+ disabled = false,
+ loading = false,
+ variant = 'primary'
+}: ButtonProps) => {
+ return (
+
+ );
+};
+
+// Step 4: Expand tests for new features
+describe('Button Component', () => {
+ it('should render with label', () => {
+ cy.mount();
+ cy.contains('Click Me').should('be.visible');
+ });
+
+ it('should call onClick when clicked', () => {
+ const onClickSpy = cy.stub().as('onClick');
+ cy.mount();
+
+ cy.get('button').click();
+ cy.get('@onClick').should('have.been.calledOnce');
+ });
+
+ it('should be disabled when disabled prop is true', () => {
+ cy.mount();
+ cy.get('button').should('be.disabled');
+ });
+
+ it('should show spinner when loading', () => {
+ cy.mount();
+ cy.get('[data-testid="spinner"]').should('be.visible');
+ cy.get('button').should('be.disabled');
+ });
+
+ it('should apply variant styles', () => {
+ cy.mount();
+ cy.get('button').should('have.class', 'btn-danger');
+ });
+});
+
+// Run tests: ALL PASS - Refactored component still works
+
+// Playwright Component Test equivalent
+import { test, expect } from '@playwright/experimental-ct-react';
+import { Button } from './Button';
+
+test.describe('Button Component', () => {
+ test('should call onClick when clicked', async ({ mount }) => {
+ let clicked = false;
+ const component = await mount(
+