feat: v6.0.0-alpha.0 - the future is now
This commit is contained in:
40
src/modules/bmm/testarch/atdd.md
Normal file
40
src/modules/bmm/testarch/atdd.md
Normal file
@@ -0,0 +1,40 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Acceptance TDD v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/tdd" name="Acceptance Test Driven Development">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*tdd"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the row where command equals command_key</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md into context</i>
|
||||
<i>Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags to guide execution</i>
|
||||
<i>Split pipe-delimited fields into individual checklist items</i>
|
||||
<i>Map knowledge_tags to sections in the knowledge brief and apply them while writing tests</i>
|
||||
<i>Keep responses concise and focused on generating the failing acceptance tests plus the implementation checklist</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Verify each preflight requirement; gather missing info from user when needed</action>
|
||||
<action>Abort if halt_rules are triggered</action>
|
||||
</step>
|
||||
<step n="2" title="Execute TDD Flow">
|
||||
<action>Walk through flow_cues sequentially, adapting to story context</action>
|
||||
<action>Use knowledge brief heuristics to enforce Murat's patterns (one test = one concern, explicit assertions, etc.)</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Produce artifacts described in deliverables</action>
|
||||
<action>Summarize failing tests and checklist items for the developer</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Apply halt_rules from the CSV row exactly</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Use the notes column for additional constraints or reminders</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Failing acceptance test files + implementation checklist summary</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
38
src/modules/bmm/testarch/automate.md
Normal file
38
src/modules/bmm/testarch/automate.md
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Automation Expansion v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/automate" name="Automation Expansion">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*automate"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the row where command equals command_key</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md for heuristics</i>
|
||||
<i>Follow CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags</i>
|
||||
<i>Convert pipe-delimited values into actionable checklists</i>
|
||||
<i>Apply Murat's opinions from the knowledge brief when filling gaps or refactoring tests</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Confirm prerequisites; stop if halt_rules are triggered</action>
|
||||
</step>
|
||||
<step n="2" title="Execute Automation Flow">
|
||||
<action>Walk through flow_cues to analyse existing coverage and add only necessary specs</action>
|
||||
<action>Use knowledge heuristics (composable helpers, deterministic waits, network boundary) while generating code</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Create or update artifacts listed in deliverables</action>
|
||||
<action>Summarize coverage deltas and remaining recommendations</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Apply halt_rules from the CSV row as written</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Reference notes column for additional guardrails</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Updated spec files and concise summary of automation changes</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
39
src/modules/bmm/testarch/ci.md
Normal file
39
src/modules/bmm/testarch/ci.md
Normal file
@@ -0,0 +1,39 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# CI/CD Enablement v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/ci" name="CI/CD Enablement">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*ci"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the row where command equals command_key</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to recall CI heuristics</i>
|
||||
<i>Follow CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags</i>
|
||||
<i>Split pipe-delimited values into actionable lists</i>
|
||||
<i>Keep output focused on workflow YAML, scripts, and guidance explicitly requested in deliverables</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Confirm prerequisites and required permissions</action>
|
||||
<action>Stop if halt_rules trigger</action>
|
||||
</step>
|
||||
<step n="2" title="Execute CI Flow">
|
||||
<action>Apply flow_cues to design the pipeline stages</action>
|
||||
<action>Leverage knowledge brief guidance (cost vs confidence, sharding, artifacts) when making trade-offs</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Create artifacts listed in deliverables (workflow files, scripts, documentation)</action>
|
||||
<action>Summarize the pipeline, selective testing strategy, and required secrets</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Use halt_rules from the CSV row verbatim</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Reference notes column for optimization reminders</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>CI workflow + concise explanation ready for team adoption</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
41
src/modules/bmm/testarch/framework.md
Normal file
41
src/modules/bmm/testarch/framework.md
Normal file
@@ -0,0 +1,41 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Test Framework Setup v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/framework" name="Test Framework Setup">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*framework"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the row where command equals command_key</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to internal memory</i>
|
||||
<i>Use the CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags to guide behaviour</i>
|
||||
<i>Split pipe-delimited values (|) into individual checklist items</i>
|
||||
<i>Map knowledge_tags to matching sections in the knowledge brief and apply those heuristics throughout execution</i>
|
||||
<i>DO NOT expand beyond the guidance unless the user supplies extra context; keep instructions lean and adaptive</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Run Preflight Checks">
|
||||
<action>Evaluate each item in preflight; confirm or collect missing information</action>
|
||||
<action>If any preflight requirement fails, follow halt_rules and stop</action>
|
||||
</step>
|
||||
<step n="2" title="Execute Framework Flow">
|
||||
<action>Follow flow_cues sequence, adapting to the project's stack</action>
|
||||
<action>When deciding frameworks or patterns, apply relevant heuristics from tea-knowledge.md via knowledge_tags</action>
|
||||
<action>Keep generated assets minimal—only what the CSV specifies</action>
|
||||
</step>
|
||||
<step n="3" title="Finalize Deliverables">
|
||||
<action>Create artifacts listed in deliverables</action>
|
||||
<action>Capture a concise summary for the user explaining what was scaffolded</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Follow halt_rules from the CSV row verbatim</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Use notes column for additional guardrails while executing</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Deliverables and summary specified in the CSV row</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
38
src/modules/bmm/testarch/nfr-assess.md
Normal file
38
src/modules/bmm/testarch/nfr-assess.md
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# NFR Assessment v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/nfr-assess" name="NFR Assessment">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*nfr-assess"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the matching row</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md focusing on NFR guidance</i>
|
||||
<i>Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags</i>
|
||||
<i>Split pipe-delimited values into actionable lists</i>
|
||||
<i>Demand evidence for each non-functional claim (tests, telemetry, logs)</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Confirm prerequisites; halt per halt_rules if unmet</action>
|
||||
</step>
|
||||
<step n="2" title="Assess NFRs">
|
||||
<action>Follow flow_cues to evaluate Security, Performance, Reliability, Maintainability</action>
|
||||
<action>Use knowledge heuristics to suggest monitoring and fail-fast patterns</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Produce assessment document and recommendations defined in deliverables</action>
|
||||
<action>Summarize status, gaps, and actions</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Apply halt_rules from the CSV row</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Reference notes column for negotiation framing (cost vs confidence)</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>NFR assessment markdown with clear next steps</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
38
src/modules/bmm/testarch/risk-profile.md
Normal file
38
src/modules/bmm/testarch/risk-profile.md
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Risk Profile v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/risk-profile" name="Risk Profile">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*risk-profile"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the row where command equals command_key</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md focusing on risk-model guidance</i>
|
||||
<i>Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags as the full instruction set</i>
|
||||
<i>Split pipe-delimited values into actionable items</i>
|
||||
<i>Keep assessment grounded in evidence from PRD/architecture/story files—do not restate requirements as risks</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Verify prerequisites; stop if halt_rules trigger</action>
|
||||
</step>
|
||||
<step n="2" title="Execute Risk Analysis">
|
||||
<action>Follow flow_cues to distinguish requirements from genuine risks and score probability × impact</action>
|
||||
<action>Use knowledge heuristics to calibrate scoring (score 9 rare, ≥6 notable) and recommend mitigations</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Produce artifacts described in deliverables (assessment markdown, gate snippet, mitigation plan)</action>
|
||||
<action>Summarize key findings with clear recommendations</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Apply halt_rules from the CSV row without modification</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Use notes column for calibration reminders</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Risk assessment report + gate summary</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
11
src/modules/bmm/testarch/tea-commands.csv
Normal file
11
src/modules/bmm/testarch/tea-commands.csv
Normal file
@@ -0,0 +1,11 @@
|
||||
command,title,when_to_use,preflight,flow_cues,deliverables,halt_rules,notes,knowledge_tags
|
||||
*framework,Initialize test architecture,Run once per repo or when no production-ready harness exists,package.json present|no existing E2E framework detected|architectural context available,"Identify stack from package.json (React/Vue/Angular/Next.js); detect bundler (Vite/Webpack/Rollup/esbuild); match test language to source (JS/TS frontend -> JS/TS tests); choose Playwright for large or performance-critical repos, Cypress for small DX-first teams; create {framework}/tests/ and {framework}/support/fixtures/ and {framework}/support/helpers/; configure config files with timeouts (action 15s, navigation 30s, test 60s) and reporters (HTML + JUnit); create .env.example with TEST_ENV, BASE_URL, API_URL; implement pure function->fixture->mergeTests pattern and faker-based data factories; enable failure-only screenshots/videos and ensure .nvmrc recorded",playwright/ or cypress/ folder with config + support tree; .env.example; .nvmrc; example tests; README with setup instructions,"If package.json missing OR framework already configured, halt and instruct manual review","Playwright: worker parallelism, trace viewer, multi-language support; Cypress: avoid if many dependent API calls; Component testing: Vitest (large) or Cypress CT (small); Contract testing: Pact for microservices; always use data-cy/data-testid selectors",philosophy/core|patterns/fixtures|patterns/selectors
|
||||
*tdd,Acceptance Test Driven Development,Before implementation when team commits to TDD,story approved with acceptance criteria|dev sandbox ready|framework scaffolding in place,Clarify acceptance criteria and affected systems; pick appropriate test level (E2E/API/Component); write failing acceptance tests using Given-When-Then with network interception first then navigation; create data factories and fixture stubs for required entities; outline mocks/fixtures infrastructure the dev team must supply; generate component tests for critical UI logic; compile implementation checklist mapping each test to source work; share failing tests with dev agent and maintain red -> green -> refactor loop,Failing acceptance test files; component test stubs; fixture/mocks skeleton; implementation checklist with test-to-code mapping; documented data-testid requirements,"If criteria ambiguous or framework missing, halt for clarification",Start red; one assertion per test; use beforeEach for visible setup (no shared state); remind devs to run tests before writing production code; update checklist as each test goes green,philosophy/core|patterns/test-structure
|
||||
*automate,Automation expansion,After implementation or when reforging coverage,all acceptance criteria satisfied|code builds locally|framework configured,"Review story source/diff to confirm automation target; ensure fixture architecture exists (mergeTests for Playwright, commands for Cypress) and implement apiRequest/network/auth/log fixtures if missing; map acceptance criteria with test-levels-framework.md guidance and avoid duplicate coverage; assign priorities using test-priorities-matrix.md; generate unit/integration/E2E specs with naming convention feature-name.spec.ts, covering happy, negative, and edge paths; enforce deterministic waits, self-cleaning factories, and <=1.5 minute execution per test; run suite and capture Definition of Done results; update package.json scripts and README instructions",New or enhanced spec files grouped by level; fixture modules under support/; data factory utilities; updated package.json scripts and README notes; DoD summary with remaining gaps; gate-ready coverage summary,"If automation target unclear or framework missing, halt and request clarification",Never create page objects; keep tests <300 lines and stateless; forbid hard waits and conditional flow in tests; co-locate tests near source; flag flaky patterns immediately,philosophy/core|patterns/helpers|patterns/waits|patterns/dod
|
||||
*ci,CI/CD quality pipeline,Once automation suite exists or needs optimization,git repository initialized|tests pass locally|team agrees on target environments|access to CI platform settings,"Detect CI platform (default GitHub Actions, ask if GitLab/CircleCI/etc); scaffold workflow (.github/workflows/test.yml or platform equivalent) with triggers; set Node.js version from .nvmrc and cache node_modules + browsers; stage jobs: lint -> unit -> component -> e2e with matrix parallelization (shard by file not test); add selective execution script for affected tests; create burn-in job that reruns changed specs 3x to catch flakiness; attach artifacts on failure (traces/videos/HAR); configure retries/backoff and concurrency controls; document required secrets and environment variables; add Slack/email notifications and local script mirroring CI",.github/workflows/test.yml (or platform equivalent); scripts/test-changed.sh; scripts/burn-in-changed.sh; updated README/ci.md instructions; secrets checklist; dashboard or badge configuration,"If git repo absent, test framework missing, or CI platform unspecified, halt and request setup",Target 20x speedups via parallel shards + caching; shard by file; keep jobs under 10 minutes; wait-on-timeout 120s for app startup; ensure npm test locally matches CI run; mention alternative platform paths when not on GitHub,philosophy/core|ci-strategy
|
||||
*risk-profile,Risk profile analysis,"After story approval, before development",story markdown present|acceptance criteria clear|architecture/PRD accessible,"Filter requirements so only genuine risks remain; review PRD/architecture/story for unresolved gaps; classify risks across TECH, SEC, PERF, DATA, BUS, OPS with category definitions; request clarification when evidence missing; score probability (1 unlikely, 2 possible, 3 likely) and impact (1 minor, 2 degraded, 3 critical) then compute totals; highlight risks >=6 and plan mitigations with owners and timelines; prepare gate summary with residual risk",Risk assessment markdown in docs/qa/assessments; table of category/probability/impact/score; gate YAML snippet summarizing totals; mitigation matrix with owners and due dates,"If story missing or criteria unclear, halt for clarification","Category definitions: TECH=unmitigated architecture flaws, SEC=missing controls/vulnerabilities, PERF=SLA-breaking performance, DATA=loss/corruption scenarios, BUS=user/business harm, OPS=deployment/run failures; rely on evidence, not speculation; score 9 -> FAIL, 6-8 -> CONCERNS; most stories should have 0-1 high risks",philosophy/core|risk-model
|
||||
*test-design,Test design playbook,"After risk profile, before coding",risk assessment completed|story acceptance criteria available,"Break acceptance criteria into atomic scenarios; reference test-levels-framework.md to pick unit/integration/E2E/component levels; avoid duplicate coverage and prefer lower levels when possible; assign priorities using test-priorities-matrix.md (P0 revenue/security, P1 core journeys, P2 secondary, P3 nice-to-have); map scenarios to risk mitigations and required data/tooling; follow naming {epic}.{story}-LEVEL-SEQ and plan execution order",Test-design markdown saved to docs/qa/assessments; scenario table with requirement/level/priority/mitigation; gate YAML block summarizing scenario counts and coverage; recommended execution order,"If risk profile missing or acceptance criteria unclear, request it and halt","Shift left: unit first, escalate only when needed; tie scenarios back to risk mitigations; keep scenarios independent and maintainable",philosophy/core|patterns/test-structure
|
||||
*trace,Requirements traceability,Mid-development checkpoint or before review,tests exist for story|access to source + specs,"Gather acceptance criteria and implemented tests; map each criterion to concrete tests (file + describe/it) using Given-When-Then narrative; classify coverage status as FULL, PARTIAL, NONE, UNIT-ONLY, INTEGRATION-ONLY; flag severity based on priority (P0 gaps critical); recommend additional tests or refactors; generate gate YAML coverage summary",Traceability report saved under docs/qa/assessments; coverage matrix with status per criterion; gate YAML snippet for coverage totals and gaps,"If story lacks implemented tests, pause and advise running *tdd or writing tests","Definitions: FULL=all scenarios validated, PARTIAL=some coverage exists, NONE=no validation, UNIT-ONLY=missing higher level, INTEGRATION-ONLY=lacks lower confidence; ensure assertions explicit and avoid duplicate coverage",philosophy/core|patterns/assertions
|
||||
*nfr-assess,NFR validation,Late development or pre-review for critical stories,implementation deployed locally|non-functional goals defined or discoverable,"Ask which NFRs to assess; default to core four (security, performance, reliability, maintainability); gather thresholds from story/architecture/technical-preferences and mark unknown targets; inspect evidence (tests, telemetry, logs) for each NFR; classify status using deterministic pass/concerns/fail rules and list quick wins; produce gate block and assessment doc with recommended actions",NFR assessment markdown with findings; gate YAML block capturing statuses and notes; checklist of evidence gaps and follow-up owners,"If NFR targets undefined and no guidance available, request definition and halt","Unknown thresholds -> CONCERNS, never guess; ensure each NFR has evidence or call it out; suggest monitoring hooks and fail-fast mechanisms when gaps exist",philosophy/core|nfr
|
||||
*review,Comprehensive TEA review,Story marked ready; tests passing locally,traceability complete|risk + design docs available|tests executed locally,"Determine review depth (deep if security/auth touched, no new tests, diff >500, prior gate FAIL/CONCERNS, >5 acceptance criteria); follow flow cues to inspect code quality, selectors, waits, and architecture alignment; map requirements to tests and ensure coverage matches trace report; perform safe refactors when low risk and record others as recommendations; prepare TEA Results summary and gate recommendation",Updated story markdown with TEA Results and recommendations; gate recommendation summary; list of refactors performed and outstanding issues,"If prerequisites missing (tests failing, docs absent), halt with checklist","Evidence-focused: reference concrete files/lines; escalate security/performance issues immediately; distinguish must-fix vs optional improvements; reuse Murat patterns for helpers, waits, selectors",philosophy/core|patterns/review
|
||||
*gate,Quality gate decision,After review or mitigation updates,latest assessments gathered|team consensus on fixes,"Assemble story metadata (id, title); choose gate status using deterministic rules (PASS all critical issues resolved, CONCERNS minor residual risk, FAIL critical blockers, WAIVED approved by business); update YAML schema with sections: metadata, waiver status, top_issues, risk_summary totals, recommendations (must_fix, monitor), nfr_validation statuses, history; capture rationale, owners, due dates, and summary comment back to story","docs/qa/gates/{story}.yml updated with schema fields (schema, story, story_title, gate, status_reason, reviewer, updated, waiver, top_issues, risk_summary, recommendations, nfr_validation, history); summary message for team","If review incomplete or risk data outdated, halt and request rerun","FAIL whenever unresolved P0 risks/tests or security holes remain; CONCERNS when mitigations planned but residual risk exists; WAIVED requires reason, approver, and expiry; maintain audit trail in history",philosophy/core|risk-model
|
||||
|
38
src/modules/bmm/testarch/tea-gate.md
Normal file
38
src/modules/bmm/testarch/tea-gate.md
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Quality Gate v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/tea-gate" name="Quality Gate">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*gate"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the matching row</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to reinforce risk-model heuristics</i>
|
||||
<i>Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags</i>
|
||||
<i>Split pipe-delimited values into actionable items</i>
|
||||
<i>Apply deterministic rules for PASS/CONCERNS/FAIL/WAIVED; capture rationale and approvals</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Gather latest assessments and confirm prerequisites; halt per halt_rules if missing</action>
|
||||
</step>
|
||||
<step n="2" title="Set Gate Decision">
|
||||
<action>Follow flow_cues to determine status, residual risk, follow-ups</action>
|
||||
<action>Use knowledge heuristics to balance cost vs confidence when negotiating waivers</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Update gate YAML specified in deliverables</action>
|
||||
<action>Summarize decision, rationale, owners, and deadlines</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Apply halt_rules from the CSV row</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Use notes column for quality bar reminders</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Updated gate file with documented decision</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
275
src/modules/bmm/testarch/tea-knowledge.md
Normal file
275
src/modules/bmm/testarch/tea-knowledge.md
Normal file
@@ -0,0 +1,275 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Murat Test Architecture Foundations (Slim Brief)
|
||||
|
||||
This brief distills Murat Ozcan's testing philosophy used by the Test Architect agent. Use it as the north star after loading `tea-commands.csv`.
|
||||
|
||||
## Core Principles
|
||||
|
||||
- Cost vs confidence: cost = creation + execution + maintenance. Push confidence where impact is highest and skip redundant checks.
|
||||
- Engineering assumes failure: predict what breaks, defend with tests, learn from every failure. A single failing test means the software is not ready.
|
||||
- Quality is team work. Story estimates include testing, documentation, and deployment work required to ship safely.
|
||||
- Missing test coverage is feature debt (hurts customers), not mere tech debt—treat it with the same urgency as functionality gaps.
|
||||
- Shared mutable state is the source of all evil: design fixtures and helpers so each test owns its data.
|
||||
- Composition over inheritance: prefer functional helpers and fixtures that compose behaviour; page objects and deep class trees hide duplication.
|
||||
- Setup via API, assert via UI. Keep tests user-centric while priming state through fast interfaces.
|
||||
- One test = one concern. Explicit assertions live in the test body, not buried in helpers.
|
||||
|
||||
## Patterns & Heuristics
|
||||
|
||||
- Selector order: `data-cy` / `data-testid` -> ARIA -> text. Avoid brittle CSS, IDs, or index based locators.
|
||||
- Network boundary is the mock boundary. Stub at the edge, never mid-service unless risk demands.
|
||||
- **Network-first pattern**: ALWAYS intercept before navigation: `const call = interceptNetwork(); await page.goto(); await call;`
|
||||
- Deterministic waits only: await specific network responses, elements disappearing, or event hooks. Ban fixed sleeps.
|
||||
- **Fixture architecture (The Murat Way)**:
|
||||
```typescript
|
||||
// 1. Pure function first (testable independently)
|
||||
export async function apiRequest({ request, method, url, data }) {
|
||||
/* implementation */
|
||||
}
|
||||
// 2. Fixture wrapper
|
||||
export const apiRequestFixture = base.extend({
|
||||
apiRequest: async ({ request }, use) => {
|
||||
await use((params) => apiRequest({ request, ...params }));
|
||||
},
|
||||
});
|
||||
// 3. Compose via mergeTests
|
||||
export const test = mergeTests(base, apiRequestFixture, authFixture, networkFixture);
|
||||
```
|
||||
- **Data factories pattern**:
|
||||
```typescript
|
||||
export const createUser = (overrides = {}) => ({
|
||||
id: faker.string.uuid(),
|
||||
email: faker.internet.email(),
|
||||
...overrides,
|
||||
});
|
||||
```
|
||||
- Visual debugging: keep component/test runner UIs available (Playwright trace viewer, Cypress runner) to accelerate feedback.
|
||||
|
||||
## Risk & Coverage
|
||||
|
||||
- Risk score = probability (1-3) × impact (1-3). Score 9 => gate FAIL, ≥6 => CONCERNS. Most stories have 0-1 high risks.
|
||||
- Test level ratio: heavy unit/component coverage, but always include E2E for critical journeys and integration seams.
|
||||
- Traceability looks for reality: map each acceptance criterion to concrete tests and flag missing coverage or duplicate value.
|
||||
- NFR focus areas: Security, Performance, Reliability, Maintainability. Demand evidence (tests, telemetry, alerts) before approving.
|
||||
|
||||
## Test Configuration
|
||||
|
||||
- **Timeouts**: actionTimeout 15s, navigationTimeout 30s, testTimeout 60s, expectTimeout 10s
|
||||
- **Reporters**: HTML (never auto-open) + JUnit XML for CI integration
|
||||
- **Media**: screenshot only-on-failure, video retain-on-failure
|
||||
- **Language Matching**: Tests should match source code language (JS/TS frontend -> JS/TS tests)
|
||||
|
||||
## Automation & CI
|
||||
|
||||
- Prefer Playwright for multi-language teams, worker parallelism, rich debugging; Cypress suits smaller DX-first repos or component-heavy spikes.
|
||||
- **Framework Selection**: Large repo + performance = Playwright, Small repo + DX = Cypress
|
||||
- **Component Testing**: Large repos = Vitest (has UI, easy RTL conversion), Small repos = Cypress CT
|
||||
- CI pipelines run lint -> unit -> component -> e2e, with selective reruns for flakes and artifacts (videos, traces) on failure.
|
||||
- Shard suites to keep feedback tight; treat CI as shared safety net, not a bottleneck.
|
||||
- Test selection ideas (32+ strategies): filter by tags/grep (`npm run test -- --grep "@smoke"`), file patterns (`--spec "**/*checkout*"`), changed files (`npm run test:changed`), or test level (`npm run test:unit` / `npm run test:e2e`).
|
||||
- Burn-in testing: run new or changed specs multiple times (e.g., 3-10x) to flush flakes before they land in main.
|
||||
- Keep helper scripts handy (`scripts/test-changed.sh`, `scripts/burn-in-changed.sh`) so CI and local workflows stay in sync.
|
||||
|
||||
## Project Structure & Config
|
||||
|
||||
- **Directory structure**:
|
||||
```
|
||||
project/
|
||||
├── playwright.config.ts # Environment-based config loading
|
||||
├── playwright/
|
||||
│ ├── tests/ # All specs (group by domain: auth/, network/, feature-flags/…)
|
||||
│ ├── support/ # Frequently touched helpers (global-setup, merged-fixtures, ui helpers, factories)
|
||||
│ ├── config/ # Environment configs (base, local, staging, production)
|
||||
│ └── scripts/ # Expert utilities (burn-in, record/playback, maintenance)
|
||||
```
|
||||
- **Environment config pattern**:
|
||||
```javascript
|
||||
const configs = {
|
||||
local: require('./config/local.config'),
|
||||
staging: require('./config/staging.config'),
|
||||
prod: require('./config/prod.config'),
|
||||
};
|
||||
export default configs[process.env.TEST_ENV || 'local'];
|
||||
```
|
||||
|
||||
## Test Hygiene & Independence
|
||||
|
||||
- Tests must be independent and stateless; never rely on execution order.
|
||||
- Cleanup all data created during tests (afterEach or API cleanup).
|
||||
- Ensure idempotency: same results every run.
|
||||
- No shared mutable state; prefer factory functions per test.
|
||||
- Tests must run in parallel safely; never commit `.only`.
|
||||
- Prefer co-location: component tests next to components, integration in `tests/integration`, etc.
|
||||
- Feature flags: centralise enum definitions (e.g., `export const FLAGS = Object.freeze({ NEW_FEATURE: 'new-feature' })`), provide helpers to set/clear targeting, and write dedicated flag tests that clean up targeting after each run.
|
||||
|
||||
## CCTDD (Component Test-Driven Development)
|
||||
|
||||
- Start with failing component test -> implement minimal component -> refactor.
|
||||
- Component tests catch ~70% of bugs before integration.
|
||||
- Use `cy.mount()` or `render()` to test components in isolation; focus on user interactions.
|
||||
|
||||
## CI Optimization Strategies
|
||||
|
||||
- **Parallel execution**: Split by test file, not test case.
|
||||
- **Smart selection**: Run only tests affected by changes (dependency graphs, git diff).
|
||||
- **Burn-in testing**: Run new/modified tests 3x to catch flakiness early.
|
||||
- **HAR recording**: Record network traffic for offline playback in CI.
|
||||
- **Selective reruns**: Only rerun failed specs, not entire suite.
|
||||
- **Network recording**: capture HAR files during stable runs so CI can replay network traffic when external systems are flaky.
|
||||
|
||||
## Package Scripts
|
||||
|
||||
- **Essential npm scripts**:
|
||||
```json
|
||||
"test:e2e": "playwright test",
|
||||
"test:unit": "vitest run",
|
||||
"test:component": "cypress run --component",
|
||||
"test:contract": "jest --testMatch='**/pact/*.spec.ts'",
|
||||
"test:debug": "playwright test --headed",
|
||||
"test:ci": "npm run test:unit && npm run test:e2e",
|
||||
"contract:publish": "pact-broker publish"
|
||||
```
|
||||
|
||||
## Contract Testing (Pact)
|
||||
|
||||
- Use for microservices with integration points.
|
||||
- Consumer generates contracts, provider verifies.
|
||||
- Structure: `pact/` directory at root, `pact/config.ts` for broker settings.
|
||||
- Reference repos: pact-js-example-consumer, pact-js-example-provider, pact-js-example-react-consumer.
|
||||
|
||||
## Online Resources & Examples
|
||||
|
||||
- Fixture architecture: https://github.com/muratkeremozcan/cy-vs-pw-murats-version
|
||||
- Playwright patterns: https://github.com/muratkeremozcan/pw-book
|
||||
- Component testing (CCTDD): https://github.com/muratkeremozcan/cctdd
|
||||
- Contract testing: https://github.com/muratkeremozcan/pact-js-example-consumer
|
||||
- Full app example: https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts
|
||||
- Blog posts: https://dev.to/muratkeremozcan
|
||||
|
||||
## Risk Model Details
|
||||
|
||||
- TECH: Unmitigated architecture flaws, experimental patterns without fallbacks.
|
||||
- SEC: Missing security controls, potential vulnerabilities, unsafe data handling.
|
||||
- PERF: SLA-breaking slowdowns, resource exhaustion, lack of caching.
|
||||
- DATA: Loss or corruption scenarios, migrations without rollback, inconsistent schemas.
|
||||
- BUS: Business or user harm, revenue-impacting failures, compliance gaps.
|
||||
- OPS: Deployment, infrastructure, or observability gaps that block releases.
|
||||
|
||||
## Probability & Impact Scale
|
||||
|
||||
- Probability 1 = Unlikely (standard implementation, low risk).
|
||||
- Probability 2 = Possible (edge cases, needs attention).
|
||||
- Probability 3 = Likely (known issues, high uncertainty).
|
||||
- Impact 1 = Minor (cosmetic, easy workaround).
|
||||
- Impact 2 = Degraded (partial feature loss, manual workaround needed).
|
||||
- Impact 3 = Critical (blocker, data/security/regulatory impact).
|
||||
- Scores: 9 => FAIL, 6-8 => CONCERNS, 4 => monitor, 1-3 => note only.
|
||||
|
||||
## Test Design Frameworks
|
||||
|
||||
- Use `docs/docs-v6/v6-bmm/test-levels-framework.md` for level selection and anti-patterns.
|
||||
- Use `docs/docs-v6/v6-bmm/test-priorities-matrix.md` for P0-P3 priority criteria.
|
||||
- Naming convention: `{epic}.{story}-{LEVEL}-{sequence}` (e.g., `2.4-E2E-01`).
|
||||
- Tie each scenario to risk mitigations or acceptance criteria.
|
||||
|
||||
## Test Quality Definition of Done
|
||||
|
||||
- No hard waits (`page.waitForTimeout`, `cy.wait(ms)`)—use deterministic waits.
|
||||
- Each test < 300 lines and executes in <= 1.5 minutes.
|
||||
- Tests are stateless, parallel-safe, and self-cleaning.
|
||||
- No conditional logic in tests (`if/else`, `try/catch` controlling flow).
|
||||
- Explicit assertions live in tests, not hidden in helpers.
|
||||
- Tests must run green locally and in CI with identical commands.
|
||||
- A test delivers value only when it has failed at least once—design suites so they regularly catch regressions during development.
|
||||
|
||||
## NFR Status Criteria
|
||||
|
||||
- **Security**: PASS (auth, authz, secrets handled), CONCERNS (minor gaps), FAIL (critical exposure).
|
||||
- **Performance**: PASS (meets targets, profiling evidence), CONCERNS (approaching limits), FAIL (breaches limits, leaks).
|
||||
- **Reliability**: PASS (error handling, retries, health checks), CONCERNS (partial coverage), FAIL (no recovery, crashes).
|
||||
- **Maintainability**: PASS (tests + docs + clean code), CONCERNS (duplication, low coverage), FAIL (no tests, tangled code).
|
||||
- Unknown targets => CONCERNS until defined.
|
||||
|
||||
## Quality Gate Schema
|
||||
|
||||
```yaml
|
||||
schema: 1
|
||||
story: '{epic}.{story}'
|
||||
story_title: '{title}'
|
||||
gate: PASS|CONCERNS|FAIL|WAIVED
|
||||
status_reason: 'Single sentence summary'
|
||||
reviewer: 'Murat (Master Test Architect)'
|
||||
updated: '2024-09-20T12:34:56Z'
|
||||
waiver:
|
||||
active: false
|
||||
reason: ''
|
||||
approved_by: ''
|
||||
expires: ''
|
||||
top_issues:
|
||||
- id: SEC-001
|
||||
severity: high
|
||||
finding: 'Issue description'
|
||||
suggested_action: 'Action to resolve'
|
||||
risk_summary:
|
||||
totals:
|
||||
critical: 0
|
||||
high: 0
|
||||
medium: 0
|
||||
low: 0
|
||||
recommendations:
|
||||
must_fix: []
|
||||
monitor: []
|
||||
nfr_validation:
|
||||
security: { status: PASS, notes: '' }
|
||||
performance: { status: CONCERNS, notes: 'Add caching' }
|
||||
reliability: { status: PASS, notes: '' }
|
||||
maintainability: { status: PASS, notes: '' }
|
||||
history:
|
||||
- at: '2024-09-20T12:34:56Z'
|
||||
gate: CONCERNS
|
||||
note: 'Initial review'
|
||||
```
|
||||
|
||||
- Optional sections: `quality_score` block for extended metrics, and `evidence` block (tests_reviewed, risks_identified, trace.ac_covered/ac_gaps) when teams track them.
|
||||
|
||||
## Collaborative TDD Loop
|
||||
|
||||
- Share failing acceptance tests with the developer or AI agent.
|
||||
- Track red -> green -> refactor progress alongside the implementation checklist.
|
||||
- Update checklist items as each test passes; add new tests for discovered edge cases.
|
||||
- Keep conversation focused on observable behavior, not implementation detail.
|
||||
|
||||
## Traceability Coverage Definitions
|
||||
|
||||
- FULL: All scenarios for the criterion validated across appropriate levels.
|
||||
- PARTIAL: Some coverage exists but gaps remain.
|
||||
- NONE: No tests currently validate the criterion.
|
||||
- UNIT-ONLY: Only low-level tests exist; add integration/E2E.
|
||||
- INTEGRATION-ONLY: Missing unit/component coverage for fast feedback.
|
||||
- Avoid naive UI E2E until service-level confidence exists; use API or contract tests to harden backends first, then add minimal UI coverage to fill the gaps.
|
||||
|
||||
## CI Platform Guidance
|
||||
|
||||
- Default to GitHub Actions if no preference is given; otherwise ask for GitLab, CircleCI, etc.
|
||||
- Ensure local script mirrors CI pipeline (npm test vs CI workflow).
|
||||
- Use concurrency controls to prevent duplicate runs (`concurrency` block in GitHub Actions).
|
||||
- Keep job runtime under 10 minutes; split further if necessary.
|
||||
|
||||
## Testing Tool Preferences
|
||||
|
||||
- Component testing: Large repositories prioritize Vitest with UI (fast, component-native). Smaller DX-first teams with existing Cypress stacks can keep Cypress Component Testing for consistency.
|
||||
- E2E testing: Favor Playwright for large or performance-sensitive repos; reserve Cypress for smaller DX-first teams where developer experience outweighs scale.
|
||||
- API testing: Prefer Playwright's API testing or contract suites over ad-hoc REST clients.
|
||||
- Contract testing: Pact.js for consumer-driven contracts; keep `pact/` config in repo.
|
||||
- Visual testing: Percy, Chromatic, or Playwright snapshots when UX must be audited.
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
- File names: `ComponentName.cy.tsx` for Cypress component tests, `component-name.spec.ts` for Playwright, `ComponentName.test.tsx` for unit/RTL.
|
||||
- Describe blocks: `describe('Feature/Component Name', () => { context('when condition', ...) })`.
|
||||
- Data attributes: always kebab-case (`data-cy="submit-button"`, `data-testid="user-email"`).
|
||||
|
||||
## Reference Materials
|
||||
|
||||
If deeper context is needed, consult Murat's testing philosophy notes, blog posts, and sample repositories in https://github.com/muratkeremozcan/test-resources-for-ai/blob/main/gitingest-full-repo-text-version.txt.
|
||||
39
src/modules/bmm/testarch/test-design.md
Normal file
39
src/modules/bmm/testarch/test-design.md
Normal file
@@ -0,0 +1,39 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Test Design v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/test-design" name="Test Design">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*test-design"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the matching row</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to reinforce Murat's coverage heuristics</i>
|
||||
<i>Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags as guidance</i>
|
||||
<i>Split pipe-delimited values into actionable lists</i>
|
||||
<i>Keep documents actionable—no verbose restatement of requirements</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Confirm required inputs (risk profile, acceptance criteria)</action>
|
||||
<action>Abort using halt_rules if prerequisites missing</action>
|
||||
</step>
|
||||
<step n="2" title="Design Strategy">
|
||||
<action>Follow flow_cues to map criteria to scenarios, assign test levels, set priorities</action>
|
||||
<action>Use knowledge heuristics for ratios, data factories, and cost vs confidence trade-offs</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Create artifacts defined in deliverables (strategy markdown, tables)</action>
|
||||
<action>Summarize guidance for developers/testers</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Follow halt_rules from the CSV row</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Apply notes column for extra context</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Lean test design document aligned with risk profile</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
148
src/modules/bmm/testarch/test-levels-framework.md
Normal file
148
src/modules/bmm/testarch/test-levels-framework.md
Normal file
@@ -0,0 +1,148 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Test Levels Framework
|
||||
|
||||
Comprehensive guide for determining appropriate test levels (unit, integration, E2E) for different scenarios.
|
||||
|
||||
## Test Level Decision Matrix
|
||||
|
||||
### Unit Tests
|
||||
|
||||
**When to use:**
|
||||
|
||||
- Testing pure functions and business logic
|
||||
- Algorithm correctness
|
||||
- Input validation and data transformation
|
||||
- Error handling in isolated components
|
||||
- Complex calculations or state machines
|
||||
|
||||
**Characteristics:**
|
||||
|
||||
- Fast execution (immediate feedback)
|
||||
- No external dependencies (DB, API, file system)
|
||||
- Highly maintainable and stable
|
||||
- Easy to debug failures
|
||||
|
||||
**Example scenarios:**
|
||||
|
||||
```yaml
|
||||
unit_test:
|
||||
component: 'PriceCalculator'
|
||||
scenario: 'Calculate discount with multiple rules'
|
||||
justification: 'Complex business logic with multiple branches'
|
||||
mock_requirements: 'None - pure function'
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
|
||||
**When to use:**
|
||||
|
||||
- Component interaction verification
|
||||
- Database operations and transactions
|
||||
- API endpoint contracts
|
||||
- Service-to-service communication
|
||||
- Middleware and interceptor behavior
|
||||
|
||||
**Characteristics:**
|
||||
|
||||
- Moderate execution time
|
||||
- Tests component boundaries
|
||||
- May use test databases or containers
|
||||
- Validates system integration points
|
||||
|
||||
**Example scenarios:**
|
||||
|
||||
```yaml
|
||||
integration_test:
|
||||
components: ['UserService', 'AuthRepository']
|
||||
scenario: 'Create user with role assignment'
|
||||
justification: 'Critical data flow between service and persistence'
|
||||
test_environment: 'In-memory database'
|
||||
```
|
||||
|
||||
### End-to-End Tests
|
||||
|
||||
**When to use:**
|
||||
|
||||
- Critical user journeys
|
||||
- Cross-system workflows
|
||||
- Visual regression testing
|
||||
- Compliance and regulatory requirements
|
||||
- Final validation before release
|
||||
|
||||
**Characteristics:**
|
||||
|
||||
- Slower execution
|
||||
- Tests complete workflows
|
||||
- Requires full environment setup
|
||||
- Most realistic but most brittle
|
||||
|
||||
**Example scenarios:**
|
||||
|
||||
```yaml
|
||||
e2e_test:
|
||||
journey: 'Complete checkout process'
|
||||
scenario: 'User purchases with saved payment method'
|
||||
justification: 'Revenue-critical path requiring full validation'
|
||||
environment: 'Staging with test payment gateway'
|
||||
```
|
||||
|
||||
## Test Level Selection Rules
|
||||
|
||||
### Favor Unit Tests When:
|
||||
|
||||
- Logic can be isolated
|
||||
- No side effects involved
|
||||
- Fast feedback needed
|
||||
- High cyclomatic complexity
|
||||
|
||||
### Favor Integration Tests When:
|
||||
|
||||
- Testing persistence layer
|
||||
- Validating service contracts
|
||||
- Testing middleware/interceptors
|
||||
- Component boundaries critical
|
||||
|
||||
### Favor E2E Tests When:
|
||||
|
||||
- User-facing critical paths
|
||||
- Multi-system interactions
|
||||
- Regulatory compliance scenarios
|
||||
- Visual regression important
|
||||
|
||||
## Anti-patterns to Avoid
|
||||
|
||||
- E2E testing for business logic validation
|
||||
- Unit testing framework behavior
|
||||
- Integration testing third-party libraries
|
||||
- Duplicate coverage across levels
|
||||
|
||||
## Duplicate Coverage Guard
|
||||
|
||||
**Before adding any test, check:**
|
||||
|
||||
1. Is this already tested at a lower level?
|
||||
2. Can a unit test cover this instead of integration?
|
||||
3. Can an integration test cover this instead of E2E?
|
||||
|
||||
**Coverage overlap is only acceptable when:**
|
||||
|
||||
- Testing different aspects (unit: logic, integration: interaction, e2e: user experience)
|
||||
- Critical paths requiring defense in depth
|
||||
- Regression prevention for previously broken functionality
|
||||
|
||||
## Test Naming Conventions
|
||||
|
||||
- Unit: `test_{component}_{scenario}`
|
||||
- Integration: `test_{flow}_{interaction}`
|
||||
- E2E: `test_{journey}_{outcome}`
|
||||
|
||||
## Test ID Format
|
||||
|
||||
`{EPIC}.{STORY}-{LEVEL}-{SEQ}`
|
||||
|
||||
Examples:
|
||||
|
||||
- `1.3-UNIT-001`
|
||||
- `1.3-INT-002`
|
||||
- `1.3-E2E-001`
|
||||
174
src/modules/bmm/testarch/test-priorities-matrix.md
Normal file
174
src/modules/bmm/testarch/test-priorities-matrix.md
Normal file
@@ -0,0 +1,174 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Test Priorities Matrix
|
||||
|
||||
Guide for prioritizing test scenarios based on risk, criticality, and business impact.
|
||||
|
||||
## Priority Levels
|
||||
|
||||
### P0 - Critical (Must Test)
|
||||
|
||||
**Criteria:**
|
||||
|
||||
- Revenue-impacting functionality
|
||||
- Security-critical paths
|
||||
- Data integrity operations
|
||||
- Regulatory compliance requirements
|
||||
- Previously broken functionality (regression prevention)
|
||||
|
||||
**Examples:**
|
||||
|
||||
- Payment processing
|
||||
- Authentication/authorization
|
||||
- User data creation/deletion
|
||||
- Financial calculations
|
||||
- GDPR/privacy compliance
|
||||
|
||||
**Testing Requirements:**
|
||||
|
||||
- Comprehensive coverage at all levels
|
||||
- Both happy and unhappy paths
|
||||
- Edge cases and error scenarios
|
||||
- Performance under load
|
||||
|
||||
### P1 - High (Should Test)
|
||||
|
||||
**Criteria:**
|
||||
|
||||
- Core user journeys
|
||||
- Frequently used features
|
||||
- Features with complex logic
|
||||
- Integration points between systems
|
||||
- Features affecting user experience
|
||||
|
||||
**Examples:**
|
||||
|
||||
- User registration flow
|
||||
- Search functionality
|
||||
- Data import/export
|
||||
- Notification systems
|
||||
- Dashboard displays
|
||||
|
||||
**Testing Requirements:**
|
||||
|
||||
- Primary happy paths required
|
||||
- Key error scenarios
|
||||
- Critical edge cases
|
||||
- Basic performance validation
|
||||
|
||||
### P2 - Medium (Nice to Test)
|
||||
|
||||
**Criteria:**
|
||||
|
||||
- Secondary features
|
||||
- Admin functionality
|
||||
- Reporting features
|
||||
- Configuration options
|
||||
- UI polish and aesthetics
|
||||
|
||||
**Examples:**
|
||||
|
||||
- Admin settings panels
|
||||
- Report generation
|
||||
- Theme customization
|
||||
- Help documentation
|
||||
- Analytics tracking
|
||||
|
||||
**Testing Requirements:**
|
||||
|
||||
- Happy path coverage
|
||||
- Basic error handling
|
||||
- Can defer edge cases
|
||||
|
||||
### P3 - Low (Test if Time Permits)
|
||||
|
||||
**Criteria:**
|
||||
|
||||
- Rarely used features
|
||||
- Nice-to-have functionality
|
||||
- Cosmetic issues
|
||||
- Non-critical optimizations
|
||||
|
||||
**Examples:**
|
||||
|
||||
- Advanced preferences
|
||||
- Legacy feature support
|
||||
- Experimental features
|
||||
- Debug utilities
|
||||
|
||||
**Testing Requirements:**
|
||||
|
||||
- Smoke tests only
|
||||
- Can rely on manual testing
|
||||
- Document known limitations
|
||||
|
||||
## Risk-Based Priority Adjustments
|
||||
|
||||
### Increase Priority When:
|
||||
|
||||
- High user impact (affects >50% of users)
|
||||
- High financial impact (>$10K potential loss)
|
||||
- Security vulnerability potential
|
||||
- Compliance/legal requirements
|
||||
- Customer-reported issues
|
||||
- Complex implementation (>500 LOC)
|
||||
- Multiple system dependencies
|
||||
|
||||
### Decrease Priority When:
|
||||
|
||||
- Feature flag protected
|
||||
- Gradual rollout planned
|
||||
- Strong monitoring in place
|
||||
- Easy rollback capability
|
||||
- Low usage metrics
|
||||
- Simple implementation
|
||||
- Well-isolated component
|
||||
|
||||
## Test Coverage by Priority
|
||||
|
||||
| Priority | Unit Coverage | Integration Coverage | E2E Coverage |
|
||||
| -------- | ------------- | -------------------- | ------------------ |
|
||||
| P0 | >90% | >80% | All critical paths |
|
||||
| P1 | >80% | >60% | Main happy paths |
|
||||
| P2 | >60% | >40% | Smoke tests |
|
||||
| P3 | Best effort | Best effort | Manual only |
|
||||
|
||||
## Priority Assignment Rules
|
||||
|
||||
1. **Start with business impact** - What happens if this fails?
|
||||
2. **Consider probability** - How likely is failure?
|
||||
3. **Factor in detectability** - Would we know if it failed?
|
||||
4. **Account for recoverability** - Can we fix it quickly?
|
||||
|
||||
## Priority Decision Tree
|
||||
|
||||
```
|
||||
Is it revenue-critical?
|
||||
├─ YES → P0
|
||||
└─ NO → Does it affect core user journey?
|
||||
├─ YES → Is it high-risk?
|
||||
│ ├─ YES → P0
|
||||
│ └─ NO → P1
|
||||
└─ NO → Is it frequently used?
|
||||
├─ YES → P1
|
||||
└─ NO → Is it customer-facing?
|
||||
├─ YES → P2
|
||||
└─ NO → P3
|
||||
```
|
||||
|
||||
## Test Execution Order
|
||||
|
||||
1. Execute P0 tests first (fail fast on critical issues)
|
||||
2. Execute P1 tests second (core functionality)
|
||||
3. Execute P2 tests if time permits
|
||||
4. P3 tests only in full regression cycles
|
||||
|
||||
## Continuous Adjustment
|
||||
|
||||
Review and adjust priorities based on:
|
||||
|
||||
- Production incident patterns
|
||||
- User feedback and complaints
|
||||
- Usage analytics
|
||||
- Test failure history
|
||||
- Business priority changes
|
||||
38
src/modules/bmm/testarch/trace-requirements.md
Normal file
38
src/modules/bmm/testarch/trace-requirements.md
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- Powered by BMAD-CORE™ -->
|
||||
|
||||
# Requirements Traceability v2.0 (Slim)
|
||||
|
||||
```xml
|
||||
<task id="bmad/bmm/testarch/trace" name="Requirements Traceability">
|
||||
<llm critical="true">
|
||||
<i>Set command_key="*trace"</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the matching row</i>
|
||||
<i>Load {project-root}/bmad/bmm/testarch/tea-knowledge.md emphasising assertions guidance</i>
|
||||
<i>Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags</i>
|
||||
<i>Split pipe-delimited values into actionable lists</i>
|
||||
<i>Focus on mapping reality: reference actual files, describe coverage gaps, recommend next steps</i>
|
||||
</llm>
|
||||
<flow>
|
||||
<step n="1" title="Preflight">
|
||||
<action>Validate prerequisites; halt per halt_rules if unmet</action>
|
||||
</step>
|
||||
<step n="2" title="Traceability Analysis">
|
||||
<action>Follow flow_cues to map acceptance criteria to implemented tests</action>
|
||||
<action>Leverage knowledge heuristics to highlight assertion quality and duplication risks</action>
|
||||
</step>
|
||||
<step n="3" title="Deliverables">
|
||||
<action>Create traceability report described in deliverables</action>
|
||||
<action>Summarize critical gaps and recommendations</action>
|
||||
</step>
|
||||
</flow>
|
||||
<halt>
|
||||
<i>Apply halt_rules from the CSV row</i>
|
||||
</halt>
|
||||
<notes>
|
||||
<i>Reference notes column for additional emphasis</i>
|
||||
</notes>
|
||||
<output>
|
||||
<i>Coverage matrix and narrative summary</i>
|
||||
</output>
|
||||
</task>
|
||||
```
|
||||
Reference in New Issue
Block a user