From bc92bf04c34e283a62d3ae962e390e6193e3d9bc Mon Sep 17 00:00:00 2001 From: Murat Ozcan Date: Tue, 30 Sep 2025 07:25:58 -0500 Subject: [PATCH] Port TEA commands into workflows and preload Murat knowledge --- src/modules/bmm/agents/tea.md | 18 +- src/modules/bmm/testarch/README.md | 34 +- src/modules/bmm/testarch/atdd.md | 40 - src/modules/bmm/testarch/automate.md | 38 - src/modules/bmm/testarch/ci.md | 39 - src/modules/bmm/testarch/framework.md | 41 - src/modules/bmm/testarch/gate.md | 38 - src/modules/bmm/testarch/nfr-assess.md | 38 - src/modules/bmm/testarch/tea-commands.csv | 9 - src/modules/bmm/testarch/tea-knowledge.md | 132 +- src/modules/bmm/testarch/test-design.md | 43 - .../testarch/test-resources-for-ai-flat.txt | 7607 +++++++++++++++++ .../bmm/testarch/trace-requirements.md | 38 - src/modules/bmm/workflows/testarch/README.md | 21 + .../workflows/testarch/atdd/instructions.md | 43 + .../bmm/workflows/testarch/atdd/workflow.yaml | 25 + .../testarch/automate/instructions.md | 43 + .../workflows/testarch/automate/workflow.yaml | 25 + .../bmm/workflows/testarch/ci/instructions.md | 43 + .../bmm/workflows/testarch/ci/workflow.yaml | 25 + .../testarch/framework/instructions.md | 43 + .../testarch/framework/workflow.yaml | 25 + .../workflows/testarch/gate/instructions.md | 39 + .../bmm/workflows/testarch/gate/workflow.yaml | 25 + .../testarch/nfr-assess/instructions.md | 39 + .../testarch/nfr-assess/workflow.yaml | 25 + .../testarch/test-design/instructions.md | 43 + .../testarch/test-design/workflow.yaml | 25 + .../workflows/testarch/trace/instructions.md | 39 + .../workflows/testarch/trace/workflow.yaml | 25 + 30 files changed, 8299 insertions(+), 369 deletions(-) delete mode 100644 src/modules/bmm/testarch/atdd.md delete mode 100644 src/modules/bmm/testarch/automate.md delete mode 100644 src/modules/bmm/testarch/ci.md delete mode 100644 src/modules/bmm/testarch/framework.md delete mode 100644 src/modules/bmm/testarch/gate.md delete mode 100644 src/modules/bmm/testarch/nfr-assess.md delete mode 100644 src/modules/bmm/testarch/tea-commands.csv delete mode 100644 src/modules/bmm/testarch/test-design.md create mode 100644 src/modules/bmm/testarch/test-resources-for-ai-flat.txt delete mode 100644 src/modules/bmm/testarch/trace-requirements.md create mode 100644 src/modules/bmm/workflows/testarch/README.md create mode 100644 src/modules/bmm/workflows/testarch/atdd/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/atdd/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/automate/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/automate/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/ci/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/ci/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/framework/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/framework/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/gate/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/gate/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/nfr-assess/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/nfr-assess/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/test-design/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/test-design/workflow.yaml create mode 100644 src/modules/bmm/workflows/testarch/trace/instructions.md create mode 100644 src/modules/bmm/workflows/testarch/trace/workflow.yaml diff --git a/src/modules/bmm/agents/tea.md b/src/modules/bmm/agents/tea.md index e26e0213..f8bba936 100644 --- a/src/modules/bmm/agents/tea.md +++ b/src/modules/bmm/agents/tea.md @@ -12,19 +12,21 @@ Load into memory {project-root}/bmad/bmm/config.yaml and set variable project_name, output_folder, user_name, communication_language + Load into memory {project-root}/bmad/bmm/testarch/tea-knowledge.md and {project-root}/bmad/bmm/testarch/test-resources-for-ai-flat.txt for Murat’s latest guidance and examples + Cross-check recommendations with the current official Playwright, Cypress, Pact, and CI platform documentation when repo guidance appears outdated Remember the users name is {user_name} ALWAYS communicate in {communication_language} Show numbered cmd list - Initialize production-ready test framework architecture - Generate E2E tests first, before starting implementation - Generate comprehensive test automation - Create comprehensive test scenarios - Map requirements to tests Given-When-Then BDD format - Validate non-functional requirements - Scaffold CI/CD quality pipeline - Write/update quality gate decision assessment + Initialize production-ready test framework architecture + Generate E2E tests first, before starting implementation + Generate comprehensive test automation + Create comprehensive test scenarios + Map requirements to tests Given-When-Then BDD format + Validate non-functional requirements + Scaffold CI/CD quality pipeline + Write/update quality gate decision assessment Goodbye+exit persona diff --git a/src/modules/bmm/testarch/README.md b/src/modules/bmm/testarch/README.md index ab0bf433..402c2499 100644 --- a/src/modules/bmm/testarch/README.md +++ b/src/modules/bmm/testarch/README.md @@ -18,7 +18,7 @@ last-redoc-date: 2025-09-30 - Architect `*solution-architecture` 2. Confirm `bmad/bmm/config.yaml` defines `project_name`, `output_folder`, `dev_story_location`, and language settings. 3. Ensure a test test framework setup exists; if not, use `*framework` command to create a test framework setup, prior to development. -4. Skim supporting references under `./testarch/`: +4. Skim supporting references (knowledge under `testarch/`, command workflows under `workflows/testarch/`). - `tea-knowledge.md` - `test-levels-framework.md` - `test-priorities-matrix.md` @@ -125,31 +125,35 @@ last-redoc-date: 2025-09-30 ## Command Catalog -| Command | Task File | Primary Outputs | Notes | -| -------------- | -------------------------------- | -------------------------------------------------------------------- | ------------------------------------------------ | -| `*framework` | `testarch/framework.md` | Playwright/Cypress scaffold, `.env.example`, `.nvmrc`, sample specs | Use when no production-ready harness exists | -| `*atdd` | `testarch/atdd.md` | Failing Acceptance-Test Driven Development, implementation checklist | Requires approved story + harness | -| `*automate` | `testarch/automate.md` | Prioritized specs, fixtures, README/script updates, DoD summary | Avoid duplicate coverage (see priority matrix) | -| `*ci` | `testarch/ci.md` | CI workflow, selective test scripts, secrets checklist | Platform-aware (GitHub Actions default) | -| `*test-design` | `testarch/test-design.md` | Combined risk assessment, mitigation plan, and coverage strategy | Handles risk scoring and test design in one pass | -| `*trace` | `testarch/trace-requirements.md` | Coverage matrix, recommendations, gate snippet | Requires access to story/tests repositories | -| `*nfr-assess` | `testarch/nfr-assess.md` | NFR assessment report with actions | Focus on security/performance/reliability | -| `*gate` | `testarch/gate.md` | Gate YAML + summary (PASS/CONCERNS/FAIL/WAIVED) | Deterministic decision rules + rationale | +| Command | Task File | Primary Outputs | Notes | +| -------------- | ------------------------------------------------ | ------------------------------------------------------------------- | ------------------------------------------------ | +| `*framework` | `workflows/testarch/framework/instructions.md` | Playwright/Cypress scaffold, `.env.example`, `.nvmrc`, sample specs | Use when no production-ready harness exists | +| `*atdd` | `workflows/testarch/atdd/instructions.md` | Failing acceptance tests + implementation checklist | Requires approved story + harness | +| `*automate` | `workflows/testarch/automate/instructions.md` | Prioritized specs, fixtures, README/script updates, DoD summary | Avoid duplicate coverage (see priority matrix) | +| `*ci` | `workflows/testarch/ci/instructions.md` | CI workflow, selective test scripts, secrets checklist | Platform-aware (GitHub Actions default) | +| `*test-design` | `workflows/testarch/test-design/instructions.md` | Combined risk assessment, mitigation plan, and coverage strategy | Handles risk scoring and test design in one pass | +| `*trace` | `workflows/testarch/trace/instructions.md` | Coverage matrix, recommendations, gate snippet | Requires access to story/tests repositories | +| `*nfr-assess` | `workflows/testarch/nfr-assess/instructions.md` | NFR assessment report with actions | Focus on security/performance/reliability | +| `*gate` | `workflows/testarch/gate/instructions.md` | Gate YAML + summary (PASS/CONCERNS/FAIL/WAIVED) | Deterministic decision rules + rationale |
Command Guidance and Context Loading -- Each task reads one row from `tea-commands.csv` via `command_key`, expanding pipe-delimited (`|`) values into checklists. -- Keep CSV rows lightweight; place in-depth heuristics in `tea-knowledge.md` and reference via `knowledge_tags`. -- If the CSV grows substantially, consider splitting into scoped registries (e.g., planning vs execution) or upgrading to Markdown tables for humans. +- Each task now carries its own preflight/flow/deliverable guidance inline. +- `tea-knowledge.md` still stores heuristics; update the brief alongside task edits. +- Consider future modularization into orchestrated workflows if additional automation is needed. - `tea-knowledge.md` encapsulates Murat’s philosophy—update both CSV and knowledge file together to avoid drift.
+## Workflow Placement + +We keep every Test Architect workflow under `workflows/testarch/` instead of scattering them across the phase folders. TEA steps show up during planning (`*framework`), implementation (`*atdd`, `*automate`, `*trace`), and release (`*gate`), so a single directory keeps the command catalog and examples coherent while still letting the orchestrator treat each command as a first-class workflow. When phase-specific navigation improves, we can add lightweight entrypoints without losing this central reference. + ## Appendix - **Supporting Knowledge:** - `tea-knowledge.md` – Murat’s testing philosophy, heuristics, and risk scales. - `test-levels-framework.md` – Decision matrix for unit/integration/E2E selection. - `test-priorities-matrix.md` – Priority (P0–P3) criteria and target coverage percentages. - s + - `test-resources-for-ai-flat.txt` – Flattened 347 KB bundle of Murat’s blogs, philosophy notes, and training material. Each `FILE:` section can be loaded on demand when the agent needs deeper examples or rationale. diff --git a/src/modules/bmm/testarch/atdd.md b/src/modules/bmm/testarch/atdd.md deleted file mode 100644 index b02699a8..00000000 --- a/src/modules/bmm/testarch/atdd.md +++ /dev/null @@ -1,40 +0,0 @@ - - -# Acceptance TDD v2.0 (Slim) - -```xml - - - Set command_key="*tdd" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the row where command equals command_key - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md into context - Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags to guide execution - Split pipe-delimited fields into individual checklist items - Map knowledge_tags to sections in the knowledge brief and apply them while writing tests - Keep responses concise and focused on generating the failing acceptance tests plus the implementation checklist - - - - Verify each preflight requirement; gather missing info from user when needed - Abort if halt_rules are triggered - - - Walk through flow_cues sequentially, adapting to story context - Use knowledge brief heuristics to enforce Murat's patterns (one test = one concern, explicit assertions, etc.) - - - Produce artifacts described in deliverables - Summarize failing tests and checklist items for the developer - - - - Apply halt_rules from the CSV row exactly - - - Use the notes column for additional constraints or reminders - - - Failing acceptance test files + implementation checklist summary - - -``` diff --git a/src/modules/bmm/testarch/automate.md b/src/modules/bmm/testarch/automate.md deleted file mode 100644 index f91f860c..00000000 --- a/src/modules/bmm/testarch/automate.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# Automation Expansion v2.0 (Slim) - -```xml - - - Set command_key="*automate" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the row where command equals command_key - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md for heuristics - Follow CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags - Convert pipe-delimited values into actionable checklists - Apply Murat's opinions from the knowledge brief when filling gaps or refactoring tests - - - - Confirm prerequisites; stop if halt_rules are triggered - - - Walk through flow_cues to analyse existing coverage and add only necessary specs - Use knowledge heuristics (composable helpers, deterministic waits, network boundary) while generating code - - - Create or update artifacts listed in deliverables - Summarize coverage deltas and remaining recommendations - - - - Apply halt_rules from the CSV row as written - - - Reference notes column for additional guardrails - - - Updated spec files and concise summary of automation changes - - -``` diff --git a/src/modules/bmm/testarch/ci.md b/src/modules/bmm/testarch/ci.md deleted file mode 100644 index 3db84e64..00000000 --- a/src/modules/bmm/testarch/ci.md +++ /dev/null @@ -1,39 +0,0 @@ - - -# CI/CD Enablement v2.0 (Slim) - -```xml - - - Set command_key="*ci" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the row where command equals command_key - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to recall CI heuristics - Follow CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags - Split pipe-delimited values into actionable lists - Keep output focused on workflow YAML, scripts, and guidance explicitly requested in deliverables - - - - Confirm prerequisites and required permissions - Stop if halt_rules trigger - - - Apply flow_cues to design the pipeline stages - Leverage knowledge brief guidance (cost vs confidence, sharding, artifacts) when making trade-offs - - - Create artifacts listed in deliverables (workflow files, scripts, documentation) - Summarize the pipeline, selective testing strategy, and required secrets - - - - Use halt_rules from the CSV row verbatim - - - Reference notes column for optimization reminders - - - CI workflow + concise explanation ready for team adoption - - -``` diff --git a/src/modules/bmm/testarch/framework.md b/src/modules/bmm/testarch/framework.md deleted file mode 100644 index d754f0ae..00000000 --- a/src/modules/bmm/testarch/framework.md +++ /dev/null @@ -1,41 +0,0 @@ - - -# Test Framework Setup v2.0 (Slim) - -```xml - - - Set command_key="*framework" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the row where command equals command_key - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to internal memory - Use the CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags to guide behaviour - Split pipe-delimited values (|) into individual checklist items - Map knowledge_tags to matching sections in the knowledge brief and apply those heuristics throughout execution - DO NOT expand beyond the guidance unless the user supplies extra context; keep instructions lean and adaptive - - - - Evaluate each item in preflight; confirm or collect missing information - If any preflight requirement fails, follow halt_rules and stop - - - Follow flow_cues sequence, adapting to the project's stack - When deciding frameworks or patterns, apply relevant heuristics from tea-knowledge.md via knowledge_tags - Keep generated assets minimal—only what the CSV specifies - - - Create artifacts listed in deliverables - Capture a concise summary for the user explaining what was scaffolded - - - - Follow halt_rules from the CSV row verbatim - - - Use notes column for additional guardrails while executing - - - Deliverables and summary specified in the CSV row - - -``` diff --git a/src/modules/bmm/testarch/gate.md b/src/modules/bmm/testarch/gate.md deleted file mode 100644 index 1bcc805e..00000000 --- a/src/modules/bmm/testarch/gate.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# Quality Gate v2.0 (Slim) - -```xml - - - Set command_key="*gate" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the matching row - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md to reinforce risk-model heuristics - Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags - Split pipe-delimited values into actionable items - Apply deterministic rules for PASS/CONCERNS/FAIL/WAIVED; capture rationale and approvals - - - - Gather latest assessments and confirm prerequisites; halt per halt_rules if missing - - - Follow flow_cues to determine status, residual risk, follow-ups - Use knowledge heuristics to balance cost vs confidence when negotiating waivers - - - Update gate YAML specified in deliverables - Summarize decision, rationale, owners, and deadlines - - - - Apply halt_rules from the CSV row - - - Use notes column for quality bar reminders - - - Updated gate file with documented decision - - -``` diff --git a/src/modules/bmm/testarch/nfr-assess.md b/src/modules/bmm/testarch/nfr-assess.md deleted file mode 100644 index 9985f6d8..00000000 --- a/src/modules/bmm/testarch/nfr-assess.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# NFR Assessment v2.0 (Slim) - -```xml - - - Set command_key="*nfr-assess" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the matching row - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md focusing on NFR guidance - Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags - Split pipe-delimited values into actionable lists - Demand evidence for each non-functional claim (tests, telemetry, logs) - - - - Confirm prerequisites; halt per halt_rules if unmet - - - Follow flow_cues to evaluate Security, Performance, Reliability, Maintainability - Use knowledge heuristics to suggest monitoring and fail-fast patterns - - - Produce assessment document and recommendations defined in deliverables - Summarize status, gaps, and actions - - - - Apply halt_rules from the CSV row - - - Reference notes column for negotiation framing (cost vs confidence) - - - NFR assessment markdown with clear next steps - - -``` diff --git a/src/modules/bmm/testarch/tea-commands.csv b/src/modules/bmm/testarch/tea-commands.csv deleted file mode 100644 index 4451a457..00000000 --- a/src/modules/bmm/testarch/tea-commands.csv +++ /dev/null @@ -1,9 +0,0 @@ -command,title,when_to_use,preflight,flow_cues,deliverables,halt_rules,notes,knowledge_tags -*automate,Automation expansion,After implementation or when reforging coverage,all acceptance criteria satisfied|code builds locally|framework configured,"Review story source/diff to confirm automation target; ensure fixture architecture exists (mergeTests for Playwright, commands for Cypress) and implement apiRequest/network/auth/log fixtures if missing; map acceptance criteria with test-levels-framework.md guidance and avoid duplicate coverage; assign priorities using test-priorities-matrix.md; generate unit/integration/E2E specs with naming convention feature-name.spec.ts, covering happy, negative, and edge paths; enforce deterministic waits, self-cleaning factories, and <=1.5 minute execution per test; run suite and capture Definition of Done results; update package.json scripts and README instructions",New or enhanced spec files grouped by level; fixture modules under support/; data factory utilities; updated package.json scripts and README notes; DoD summary with remaining gaps; gate-ready coverage summary,"If automation target unclear or framework missing, halt and request clarification",Never create page objects; keep tests <300 lines and stateless; forbid hard waits and conditional flow in tests; co-locate tests near source; flag flaky patterns immediately,philosophy/core|patterns/helpers|patterns/waits|patterns/dod -*ci,CI/CD quality pipeline,Once automation suite exists or needs optimization,git repository initialized|tests pass locally|team agrees on target environments|access to CI platform settings,"Detect CI platform (default GitHub Actions, ask if GitLab/CircleCI/etc); scaffold workflow (.github/workflows/test.yml or platform equivalent) with triggers; set Node.js version from .nvmrc and cache node_modules + browsers; stage jobs: lint -> unit -> component -> e2e with matrix parallelization (shard by file not test); add selective execution script for affected tests; create burn-in job that reruns changed specs 3x to catch flakiness; attach artifacts on failure (traces/videos/HAR); configure retries/backoff and concurrency controls; document required secrets and environment variables; add Slack/email notifications and local script mirroring CI",.github/workflows/test.yml (or platform equivalent); scripts/test-changed.sh; scripts/burn-in-changed.sh; updated README/ci.md instructions; secrets checklist; dashboard or badge configuration,"If git repo absent, test framework missing, or CI platform unspecified, halt and request setup",Target 20x speedups via parallel shards + caching; shard by file; keep jobs under 10 minutes; wait-on-timeout 120s for app startup; ensure npm test locally matches CI run; mention alternative platform paths when not on GitHub,philosophy/core|ci-strategy -*framework,Initialize test architecture,Run once per repo or when no production-ready harness exists,package.json present|no existing E2E framework detected|architectural context available,"Identify stack from package.json (React/Vue/Angular/Next.js); detect bundler (Vite/Webpack/Rollup/esbuild); match test language to source (JS/TS frontend -> JS/TS tests); choose Playwright for large or performance-critical repos, Cypress for small DX-first teams; create {framework}/tests/ and {framework}/support/fixtures/ and {framework}/support/helpers/; configure config files with timeouts (action 15s, navigation 30s, test 60s) and reporters (HTML + JUnit); create .env.example with TEST_ENV, BASE_URL, API_URL; implement pure function->fixture->mergeTests pattern and faker-based data factories; enable failure-only screenshots/videos and ensure .nvmrc recorded",playwright/ or cypress/ folder with config + support tree; .env.example; .nvmrc; example tests; README with setup instructions,"If package.json missing OR framework already configured, halt and instruct manual review","Playwright: worker parallelism, trace viewer, multi-language support; Cypress: avoid if many dependent API calls; Component testing: Vitest (large) or Cypress CT (small); Contract testing: Pact for microservices; always use data-cy/data-testid selectors",philosophy/core|patterns/fixtures|patterns/selectors -*gate,Quality gate decision,After review or mitigation updates,latest assessments gathered|team consensus on fixes,"Assemble story metadata (id, title); choose gate status using deterministic rules (PASS all critical issues resolved, CONCERNS minor residual risk, FAIL critical blockers, WAIVED approved by business); update YAML schema with sections: metadata, waiver status, top_issues, risk_summary totals, recommendations (must_fix, monitor), nfr_validation statuses, history; capture rationale, owners, due dates, and summary comment back to story","docs/qa/gates/{story}.yml updated with schema fields (schema, story, story_title, gate, status_reason, reviewer, updated, waiver, top_issues, risk_summary, recommendations, nfr_validation, history); summary message for team","If review incomplete or risk data outdated, halt and request rerun","FAIL whenever unresolved P0 risks/tests or security holes remain; CONCERNS when mitigations planned but residual risk exists; WAIVED requires reason, approver, and expiry; maintain audit trail in history",philosophy/core|risk-model -*nfr-assess,NFR validation,Late development or pre-review for critical stories,implementation deployed locally|non-functional goals defined or discoverable,"Ask which NFRs to assess; default to core four (security, performance, reliability, maintainability); gather thresholds from story/architecture/technical-preferences and mark unknown targets; inspect evidence (tests, telemetry, logs) for each NFR; classify status using deterministic pass/concerns/fail rules and list quick wins; produce gate block and assessment doc with recommended actions",NFR assessment markdown with findings; gate YAML block capturing statuses and notes; checklist of evidence gaps and follow-up owners,"If NFR targets undefined and no guidance available, request definition and halt","Unknown thresholds -> CONCERNS, never guess; ensure each NFR has evidence or call it out; suggest monitoring hooks and fail-fast mechanisms when gaps exist",philosophy/core|nfr -*tdd,Acceptance Test Driven Development,Before implementation when team commits to TDD,story approved with acceptance criteria|dev sandbox ready|framework scaffolding in place,Clarify acceptance criteria and affected systems; pick appropriate test level (E2E/API/Component); write failing acceptance tests using Given-When-Then with network interception first then navigation; create data factories and fixture stubs for required entities; outline mocks/fixtures infrastructure the dev team must supply; generate component tests for critical UI logic; compile implementation checklist mapping each test to source work; share failing tests with dev agent and maintain red -> green -> refactor loop,Failing acceptance test files; component test stubs; fixture/mocks skeleton; implementation checklist with test-to-code mapping; documented data-testid requirements,"If criteria ambiguous or framework missing, halt for clarification",Start red; one assertion per test; use beforeEach for visible setup (no shared state); remind devs to run tests before writing production code; update checklist as each test goes green,philosophy/core|patterns/test-structure -*test-design,Risk and test design planning,"After story approval, before development",story markdown present|acceptance criteria clear|architecture/PRD accessible,"Filter requirements so only genuine risks remain; review PRD/architecture/story for unresolved gaps; classify risks across TECH, SEC, PERF, DATA, BUS, OPS using category definitions; request clarification when evidence missing; score probability (1 unlikely, 2 possible, 3 likely) and impact (1 minor, 2 degraded, 3 critical) then compute totals; highlight risks >=6 and plan mitigations with owners and timelines; break acceptance criteria into atomic scenarios mapped to mitigations; reference test-levels-framework.md to pick unit/integration/E2E/component levels; avoid duplicate coverage, prefer lower levels when possible; assign priorities using test-priorities-matrix.md; outline data/tooling prerequisites and execution order",Risk assessment markdown in docs/qa/assessments; table of category/probability/impact/score; mitigation matrix with owners and due dates; coverage matrix with requirement/level/priority/mitigation; gate YAML snippet summarizing risk totals and scenario counts; recommended execution order,"If story missing or criteria unclear, halt for clarification","Category definitions: TECH=architecture flaws; SEC=missing controls/vulnerabilities; PERF=SLA risk; DATA=loss/corruption; BUS=user/business harm; OPS=deployment/run failures; rely on evidence, not speculation; tie scenarios back to risk mitigations; keep scenarios independent and maintainable",philosophy/core|risk-model|patterns/test-structure -*trace,Requirements traceability,Mid-development checkpoint or before review,tests exist for story|access to source + specs,"Gather acceptance criteria and implemented tests; map each criterion to concrete tests (file + describe/it) using Given-When-Then narrative; classify coverage status as FULL, PARTIAL, NONE, UNIT-ONLY, INTEGRATION-ONLY; flag severity based on priority (P0 gaps critical); recommend additional tests or refactors; generate gate YAML coverage summary",Traceability report saved under docs/qa/assessments; coverage matrix with status per criterion; gate YAML snippet for coverage totals and gaps,"If story lacks implemented tests, pause and advise running *tdd or writing tests","Definitions: FULL=all scenarios validated, PARTIAL=some coverage exists, NONE=no validation, UNIT-ONLY=missing higher level, INTEGRATION-ONLY=lacks lower confidence; ensure assertions explicit and avoid duplicate coverage",philosophy/core|patterns/assertions diff --git a/src/modules/bmm/testarch/tea-knowledge.md b/src/modules/bmm/testarch/tea-knowledge.md index aeb6c900..21e7ae5c 100644 --- a/src/modules/bmm/testarch/tea-knowledge.md +++ b/src/modules/bmm/testarch/tea-knowledge.md @@ -2,7 +2,7 @@ # Murat Test Architecture Foundations (Slim Brief) -This brief distills Murat Ozcan's testing philosophy used by the Test Architect agent. Use it as the north star after loading `tea-commands.csv`. +This brief distills Murat Ozcan's testing philosophy used by the Test Architect agent. Use it as the north star while executing the TEA workflows. ## Core Principles @@ -14,8 +14,10 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect - Composition over inheritance: prefer functional helpers and fixtures that compose behaviour; page objects and deep class trees hide duplication. - Setup via API, assert via UI. Keep tests user-centric while priming state through fast interfaces. - One test = one concern. Explicit assertions live in the test body, not buried in helpers. +- Test at the lowest level possible first: favour component/unit coverage before integration/E2E (target ~1:3–1:5 ratio of high-level to low-level tests). +- Zero tolerance for flakiness: if a test flakes, fix the cause immediately or delete the test—shipping with flakes is not acceptable evidence. -## Patterns and Heuristics +## Patterns & Heuristics - Selector order: `data-cy` / `data-testid` -> ARIA -> text. Avoid brittle CSS, IDs, or index based locators. - Network boundary is the mock boundary. Stub at the edge, never mid-service unless risk demands. @@ -44,9 +46,37 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect ...overrides, }); ``` +- Standard test skeleton keeps intent clear—`describe` the feature, `context` specific scenarios, make setup visible, and follow Arrange → Act → Assert explicitly: + + ```javascript + describe('Checkout', () => { + context('when inventory is available', () => { + beforeEach(async () => { + await seedInventory(); + await interceptOrders(); // intercept BEFORE navigation + await test.step('navigate', () => page.goto('/checkout')); + }); + + it('completes purchase', async () => { + await cart.fillDetails(validUser); + await expect(page.getByTestId('order-confirmed')).toBeVisible(); + }); + }); + }); + ``` + +- Helper/fixture thresholds: 3+ call sites → promote to fixture with subpath export, 2-3 → shared utility module, 1-off → keep inline to avoid premature abstraction. +- Deterministic waits only: prefer `page.waitForResponse`, `cy.wait('@alias')`, or element disappearance (e.g., `cy.get('[data-cy="spinner"]').should('not.exist')`). Ban `waitForTimeout`/`cy.wait(ms)` unless quarantined in TODO and slated for removal. +- Data is created via APIs or tasks, not UI flows: + ```javascript + beforeEach(() => { + cy.task('db:seed', { users: [createUser({ role: 'admin' })] }); + }); + ``` +- Assertions stay in tests; when shared state varies, assert on ranges (`expect(count).toBeGreaterThanOrEqual(3)`) rather than brittle exact values. - Visual debugging: keep component/test runner UIs available (Playwright trace viewer, Cypress runner) to accelerate feedback. -## Risk and Coverage +## Risk & Coverage - Risk score = probability (1-3) × impact (1-3). Score 9 => gate FAIL, ≥6 => CONCERNS. Most stories have 0-1 high risks. - Test level ratio: heavy unit/component coverage, but always include E2E for critical journeys and integration seams. @@ -60,7 +90,7 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect - **Media**: screenshot only-on-failure, video retain-on-failure - **Language Matching**: Tests should match source code language (JS/TS frontend -> JS/TS tests) -## Automation and CI +## Automation & CI - Prefer Playwright for multi-language teams, worker parallelism, rich debugging; Cypress suits smaller DX-first repos or component-heavy spikes. - **Framework Selection**: Large repo + performance = Playwright, Small repo + DX = Cypress @@ -71,7 +101,7 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect - Burn-in testing: run new or changed specs multiple times (e.g., 3-10x) to flush flakes before they land in main. - Keep helper scripts handy (`scripts/test-changed.sh`, `scripts/burn-in-changed.sh`) so CI and local workflows stay in sync. -## Project Structure and Config +## Project Structure & Config - **Directory structure**: ``` @@ -92,8 +122,10 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect }; export default configs[process.env.TEST_ENV || 'local']; ``` +- Validate environment input up-front (fail fast when `TEST_ENV` is missing) and keep Playwright/Cypress configs small by delegating per-env overrides to files under `config/`. +- Keep `.env.example`, `.nvmrc`, and scripts (burn-in, test-changed) in source control so CI and local machines share tooling defaults. -## Test Hygiene and Independence +## Test Hygiene & Independence - Tests must be independent and stateless; never rely on execution order. - Cleanup all data created during tests (afterEach or API cleanup). @@ -101,7 +133,7 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect - No shared mutable state; prefer factory functions per test. - Tests must run in parallel safely; never commit `.only`. - Prefer co-location: component tests next to components, integration in `tests/integration`, etc. -- Feature flags: centralise enum definitions (e.g., `export const FLAGS = Object.freeze({ NEW_FEATURE: 'new-feature' })`), provide helpers to set/clear targeting, and write dedicated flag tests that clean up targeting after each run. +- Feature flags: centralise enum definitions (e.g., `export const FLAGS = Object.freeze({ NEW_FEATURE: 'new-feature' })`), provide helpers to set/clear targeting, write dedicated flag suites that clean up targeting after each run, and exercise both enabled/disabled paths in CI. ## CCTDD (Component Test-Driven Development) @@ -117,6 +149,8 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect - **HAR recording**: Record network traffic for offline playback in CI. - **Selective reruns**: Only rerun failed specs, not entire suite. - **Network recording**: capture HAR files during stable runs so CI can replay network traffic when external systems are flaky. +- Stage jobs: cache dependencies once, run `test-changed` before full suite, then execute sharded E2E jobs with `fail-fast: false` so one failure doesn’t cancel other evidence. +- Ship burn-in scripts (e.g., `scripts/burn-in-changed.sh`) that loop 5–10x over changed specs and stop on first failure; wire them into CI for flaky detection before merge. ## Package Scripts @@ -127,25 +161,20 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect "test:component": "cypress run --component", "test:contract": "jest --testMatch='**/pact/*.spec.ts'", "test:debug": "playwright test --headed", - "test:ci": "npm run test:unit andand npm run test:e2e", + "test:ci": "npm run test:unit && npm run test:e2e", "contract:publish": "pact-broker publish" ``` -## Contract Testing (Pact) +## Online Resources & Examples -- Use for microservices with integration points. -- Consumer generates contracts, provider verifies. -- Structure: `pact/` directory at root, `pact/config.ts` for broker settings. -- Reference repos: pact-js-example-consumer, pact-js-example-provider, pact-js-example-react-consumer. +- Full-text mirrors of Murat's public repos live in the `test-resources-for-ai/sample-repos` knowledge pack so TEA can stay offline. Key origins include Playwright patterns (`pw-book`), Cypress vs Playwright comparisons, Tour of Heroes, and Pact consumer/provider examples. -## Online Resources and Examples - -- Fixture architecture: https://github.com/muratkeremozcan/cy-vs-pw-murats-version +- - Fixture architecture: https://github.com/muratkeremozcan/cy-vs-pw-murats-version - Playwright patterns: https://github.com/muratkeremozcan/pw-book - Component testing (CCTDD): https://github.com/muratkeremozcan/cctdd - Contract testing: https://github.com/muratkeremozcan/pact-js-example-consumer - Full app example: https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts -- Blog posts: https://dev.to/muratkeremozcan +- Blog essays at https://dev.to/muratkeremozcan provide narrative rationale—distil any new actionable guidance back into this brief when processes evolve. ## Risk Model Details @@ -156,7 +185,7 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect - BUS: Business or user harm, revenue-impacting failures, compliance gaps. - OPS: Deployment, infrastructure, or observability gaps that block releases. -## Probability and Impact Scale +## Probability & Impact Scale - Probability 1 = Unlikely (standard implementation, low risk). - Probability 2 = Possible (edge cases, needs attention). @@ -168,8 +197,8 @@ This brief distills Murat Ozcan's testing philosophy used by the Test Architect ## Test Design Frameworks -- Use `docs/docs-v6/v6-bmm/test-levels-framework.md` for level selection and anti-patterns. -- Use `docs/docs-v6/v6-bmm/test-priorities-matrix.md` for P0-P3 priority criteria. +- Use [`test-levels-framework.md`](./test-levels-framework.md) for level selection and anti-patterns. +- Use [`test-priorities-matrix.md`](./test-priorities-matrix.md) for P0–P3 priority criteria. - Naming convention: `{epic}.{story}-{LEVEL}-{sequence}` (e.g., `2.4-E2E-01`). - Tie each scenario to risk mitigations or acceptance criteria. @@ -270,6 +299,65 @@ history: - Describe blocks: `describe('Feature/Component Name', () => { context('when condition', ...) })`. - Data attributes: always kebab-case (`data-cy="submit-button"`, `data-testid="user-email"`). -## Reference Materials +## Contract Testing Rules (Pact) -If deeper context is needed, consult Murat's testing philosophy notes, blog posts, and sample repositories in https://github.com/muratkeremozcan/test-resources-for-ai/blob/main/gitingest-full-repo-text-version.txt. +- Use Pact for microservice integrations; keep a `pact/` directory with broker config and share contracts as first-class artifacts in the repo. +- Keep consumer contracts beside the integration specs that exercise them; version with semantic tags so downstream teams understand breaking changes. +- Publish contracts on every CI run and enforce provider verification before merge—failing verification blocks release and acts as a quality gate. +- Capture fallback behaviour (timeouts, retries, circuit breakers) inside interactions so resilience expectations stay explicit. +- Sample interaction scaffold: + ```javascript + const interaction = { + state: 'user with id 1 exists', + uponReceiving: 'a request for user 1', + withRequest: { + method: 'GET', + path: '/users/1', + headers: { Accept: 'application/json' }, + }, + willRespondWith: { + status: 200, + headers: { 'Content-Type': 'application/json' }, + body: like({ id: 1, name: string('Jane Doe'), email: email('jane@example.com') }), + }, + }; + ``` + +## Reference Capsules (Summaries Bundled In) + +- **Fixture Architecture Quick Wins** + - Compose Playwright or Cypress suites with additive fixtures; use `mergeTests`/`extend` to layer auth, network, and telemetry helpers without inheritance. + - Keep HTTP helpers framework-agnostic so the same function fuels unit tests, API smoke checks, and runtime fixtures. + - Normalize selectors (`data-testid`/`data-cy`) and lint new UI code for missing attributes to prevent brittle locators. + +- **Playwright Patterns Digest** + - Register network interceptions before navigation, assert on typed responses, and capture HAR files for regression. + - Treat timeouts and retries as configuration, not inline magic numbers; expose overrides via fixtures. + - Name specs and test IDs with intent (`checkout.complete-happy-path`) so CI shards and triage stay meaningful. + +- **Component TDD Highlights** + - Begin UI work with failing component specs; rebuild providers/stores per spec to avoid state bleed. + - Use factories to exercise prop variations and edge cases; assert through accessible queries (`getByRole`, `getByLabelText`). + - Document mount helpers and cleanup expectations so component tests stay deterministic. + +- **Contract Testing Cliff Notes** + - Store consumer contracts alongside integration specs; version with semantic tags and publish on every CI run. + - Enforce provider verification prior to merge to act as a release gate for service integrations. + - Capture fallback behaviour (timeouts, retries, circuit breakers) inside contracts to keep resilience expectations explicit. + +- **End-to-End Reference Flow** + - Prime end-to-end journeys through API fixtures, then assert through UI steps mirroring real user narratives. + - Pair burn-in scripts (`npm run test:e2e -- --repeat-each=3`) with selective retries to flush flakes before promotion. + +- **Philosophy & Heuristics Articles** + - Use long-form articles for rationale; extract checklists, scripts, and thresholds back into this brief whenever teams adopt new practices. + +These capsules distil Murat's sample repositories (Playwright patterns, Cypress vs Playwright comparisons, CCTDD, Pact examples, Tour of Heroes walkthrough) captured in the `test-resources-for-ai` knowledge pack so the TEA agent can operate offline while reflecting those techniques. + +## Reference Assets + +- [Test Architect README](./README.md) — high-level usage guidance and phase checklists. +- [Test Levels Framework](./test-levels-framework.md) — choose the right level for each scenario. +- [Test Priorities Matrix](./test-priorities-matrix.md) — assign P0–P3 priorities consistently. +- [TEA Workflows](../workflows/testarch/README.md) — per-command instructions executed by the agent. +- [Murat Knowledge Bundle](./test-resources-for-ai-flat.txt) — 347 KB flattened snapshot of Murat’s blogs, philosophy notes, and course material. Sections are delimited with `FILE:` headers; load relevant portions when deeper examples or rationales are required. diff --git a/src/modules/bmm/testarch/test-design.md b/src/modules/bmm/testarch/test-design.md deleted file mode 100644 index d86f9ab8..00000000 --- a/src/modules/bmm/testarch/test-design.md +++ /dev/null @@ -1,43 +0,0 @@ - - -# Risk and Test Design v3.0 (Slim) - -```xml - - - Set command_key="*test-design" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and parse the matching row - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md for risk-model and coverage heuristics - Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags as the execution blueprint - Split pipe-delimited values into actionable checklists - Stay evidence-based—link risks and scenarios directly to PRD/architecture/story artifacts - - - - Confirm story markdown, acceptance criteria, and architecture/PRD access. - Stop immediately if halt_rules trigger (missing inputs or unclear requirements). - - - Follow flow_cues to filter genuine risks, classify them (TECH/SEC/PERF/DATA/BUS/OPS), and score probability × impact. - Document mitigations with owners, timelines, and residual risk expectations. - - - Break acceptance criteria into atomic scenarios mapped to mitigations. - Choose test levels using test-levels-framework.md, assign priorities via test-priorities-matrix.md, and note tooling/data prerequisites. - - - Generate the combined risk report and test design artifacts described in deliverables. - Summarize key risks, mitigations, coverage plan, and recommended execution order. - - - - Apply halt_rules from the CSV row verbatim. - - - Use notes column for calibration reminders and coverage heuristics. - - - Unified risk assessment plus coverage strategy ready for implementation. - - -``` diff --git a/src/modules/bmm/testarch/test-resources-for-ai-flat.txt b/src/modules/bmm/testarch/test-resources-for-ai-flat.txt new file mode 100644 index 00000000..5218f822 --- /dev/null +++ b/src/modules/bmm/testarch/test-resources-for-ai-flat.txt @@ -0,0 +1,7607 @@ +Directory structure: +└── muratkeremozcan-test-resources-for-ai/ + ├── README.md + ├── murat-testing-philosophy-for-tea-agent.md + ├── Quotes about testing.md + ├── blog/ + │ ├── (mostly incomplete) List of Test Methodologies.mkd + │ ├── API e2e testing event driven systems.mkd + │ ├── Automating API Documentation A Journey from TypeScript to OpenAPI and Schema Governence with Optic.mkd + │ ├── Building Custom Request Filters for PactJs Verifications in Express and Non-Express Environments.mkd + │ ├── Building the test architecture, increasing adoption, improving the developer experience.mkd + │ ├── CI CD strategies for UI apps and deployed services.mkd + │ ├── Cypress Component Testing vs React Testing Library - the complete comparison.mkd + │ ├── Documenting and Testing Schemas of Serverless Stacks with Optic & Cypress.mkd + │ ├── Functional Programming Test Patterns with Cypress.mkd + │ ├── Handling Pact Breaking Changes Dynamically in CICD.mkd + │ ├── Improve Cypress e2e test latency by a factor of 20!!.mkd + │ ├── Page Objects vs. Functional Helpers.mkd + │ ├── Solving Cross-Execution Issues in Pact Testing with Kafka and Message Queues.mkd + │ └── Testing Email-Based Authentication Systems with Cypress, Mailosaur and cypress-data-session.mkd + └── Test_architect_content/ + └── Test_architect_course/ + ├── test design techniques.docx + └── Training itself/ + ├── Homework 1/ + │ ├── Horizon Cloud presentation - Murat .pptx + │ ├── Horizon Cloud presentation - Murat .url + │ ├── Marketplace Input.docx + │ └── Role - 5 Differences.docx + ├── Homework 3/ + │ ├── Horizon Cloud auto pen testing.pptx + │ └── Horizon Cloud BOIC Test Automation.pptx + ├── phase 1 slides/ + │ └── TeA_WS1-3-1a_RBT-Worksheet_A1.xls + └── testarchitectnotes/ + ├── README.md + ├── TeA_notes.docx + ├── Business_Understanding/ + │ └── README.md + ├── Requirements_Engineering/ + │ └── README.md + ├── slides/ + │ └── RBT-Worksheet.xls + ├── Social_Capability/ + │ └── README.md + ├── Test_Architecture/ + │ └── README.md + └── Testing_&_Quality/ + ├── README.md + └── RBT-Worksheet.xls + +================================================ +FILE: README.md +================================================ +Resources to help AI reference. + + + +================================================ +FILE: murat-testing-philosophy-for-tea-agent.md +================================================ +# Murat Ozcan Testing Philosophy & Patterns for TEA Agent Enhancement + +## Purpose + +This document captures the comprehensive testing philosophy, patterns, and implementation details extracted from Murat Ozcan's books, blog posts, and sample repositories. It serves as the knowledge base for enhancing the Test Architect (TEA) agent to generate tests, configurations, and architectures in Murat's distinctive style. + +## Reference Resources + +### Books + +- **CCTDD: Cypress Component Test Driven Design** - [https://github.com/muratkeremozcan/cctdd](https://github.com/muratkeremozcan/cctdd) +- **UI Testing Best Practices** - [https://github.com/NoriSte/ui-testing-best-practices](https://github.com/NoriSte/ui-testing-best-practices) + +### Blog Posts + +All blog posts available at: [https://dev.to/muratkeremozcan](https://dev.to/muratkeremozcan) + +Key posts include: +- Functional Programming Test Patterns with Cypress +- Page Objects vs. Functional Helpers +- Building the test architecture, increasing adoption, improving the developer experience +- Effective Test Strategies for Front-end Applications using LaunchDarkly Feature Flags +- CI CD strategies for UI apps and deployed services +- Cypress Component Testing vs React Testing Library - the complete comparison +- API e2e testing event driven systems +- Cypress and Pact contract testing patterns +- Testing Email-Based Authentication Systems +- The 32+ ways of selective testing with Cypress + +### Sample Repositories + +- **Cypress vs Playwright Examples** - [https://github.com/muratkeremozcan/cy-vs-pw-murats-version](https://github.com/muratkeremozcan/cy-vs-pw-murats-version) +- **Playwright Book Examples** - [https://github.com/muratkeremozcan/pw-book](https://github.com/muratkeremozcan/pw-book) +- **Tour of Heroes (React/Vite/Cypress/TS)** - [https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts](https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts) +- **Pact.js Consumer Example** - [https://github.com/muratkeremozcan/pact-js-example-consumer](https://github.com/muratkeremozcan/pact-js-example-consumer) +- **Pact.js React Consumer** - [https://github.com/muratkeremozcan/pact-js-example-react-consumer](https://github.com/muratkeremozcan/pact-js-example-react-consumer) +- **Pact.js Provider Example** - [https://github.com/muratkeremozcan/pact-js-example-provider](https://github.com/muratkeremozcan/pact-js-example-provider) + +## Core Philosophy + +### The Murat Testing Manifesto +1. **"Functional helpers over Page Objects"** - Composition wins over inheritance +2. **"Test at the lowest level possible"** - Component > Integration > E2E (1:3 to 1:5 ratio) +3. **"Network boundary is the test boundary"** - Mock at the edge, not service level +4. **"Visual debugging changes everything"** - See the component while testing +5. **"No flaky tests, ever"** - Deterministic or delete (0 tolerance) +6. **"Setup via API, assert via UI"** - Fast setup, user-centric assertions +7. **"One test = one concern"** - Focused and clear +8. **"Explicit over implicit"** - Assertions in tests, not hidden in helpers +9. **"Data factories over fixtures"** - Dynamic > Static +10. **"Shift left, test often"** - Early and continuous + +## Testing Patterns + +### 1. Test Structure Pattern +```javascript +// ALWAYS this structure: +describe('Feature/Component', () => { + context('specific scenario', () => { // Group related tests + beforeEach(() => { + // Setup that's VISIBLE in the test + // Network mocks BEFORE navigation + // Data setup via API, not UI + }) + + it('should when ', () => { + // Arrange - Act - Assert clearly separated + // Assertions explicit in test, not helpers + }) + }) +}) +``` + +### 2. Fixture & Helper Architecture + +#### Composable Fixture System (SEON Production Pattern) + +```typescript +// playwright/support/merged-fixtures.ts - The Murat Way +import { test as base, mergeTests } from '@playwright/test' +import { test as apiRequestFixture } from './fixtures/api-request-fixture' +import { test as networkFixture } from './fixtures/network-fixture' +import { test as authFixture } from './fixtures/auth-fixture' +import { test as logFixture } from './fixtures/log-fixture' + +// Merge all fixtures for comprehensive capabilities +export const test = mergeTests( + base, + apiRequestFixture, + networkFixture, + authFixture, + logFixture +) +``` + +#### Pure Function → Fixture Pattern + +```typescript +// Step 1: Pure function (always first!) +export async function apiRequest({ request, method, url }) { + // Core implementation - testable independently +} + +// Step 2: Fixture wrapper +export const apiRequestFixture = base.extend({ + apiRequest: async ({ request }, use) => { + await use((params) => apiRequest({ request, ...params })) + } +}) + +// Step 3: Export for subpath imports +// package.json exports: "./api-request", "./api-request/fixtures" +``` + +#### Helper Function Rules + +- **3+ uses** → Create fixture with subpath export +- **2-3 uses** → Create utility module +- **1 use** → Keep inline +- **Complex logic** → Factory function pattern + +### 3. Network Interception Strategy + +#### The Network-First Pattern +```javascript +// ALWAYS intercept before action +const networkCall = interceptNetworkCall({ url: '**/api/data' }) +await page.goto('/page') // THEN navigate +const response = await networkCall // THEN await + +// Cypress equivalent +cy.intercept('GET', '**/api/data').as('getData') +cy.visit('/page') +cy.wait('@getData') +``` + +### 4. Selector Strategy (Non-Negotiable) +```javascript +// Priority order - ALWAYS +1. data-cy="element" // Cypress +2. data-testid="element" // Playwright/RTL +3. ARIA attributes // Future-proof +4. Text content // User-centric + +// Never use: +- CSS classes (unless no other option) +- IDs (unless absolutely necessary) +- Complex XPath +- Index-based selectors +``` + +### 5. Waiting Strategies +```javascript +// ✅ Deterministic waiting +await page.waitForResponse('**/api/data') +cy.wait('@getUsers') + +// ✅ Event-based waiting +await page.waitForLoadState('networkidle') +cy.get('[data-cy="spinner"]').should('not.exist') + +// ❌ NEVER use hard waits +await page.waitForTimeout(3000) // NEVER +cy.wait(3000) // NEVER +``` + +### 6. Test Data Management + +#### Factory Pattern (Always) +```typescript +export const createUser = (overrides: Partial = {}): User => ({ + id: faker.string.uuid(), + email: faker.internet.email(), + name: faker.person.fullName(), + role: 'user', + ...overrides +}) + +// Usage in tests: +const adminUser = createUser({ role: 'admin' }) +``` + +#### API-First Setup +```javascript +beforeEach(() => { + // ✅ Setup via API + cy.task('db:seed', { users: [createUser()] }) + + // ❌ NOT via UI + // cy.visit('/signup') + // cy.fill('#email', 'test@example.com') +}) +``` + +### 7. Assertion Patterns + +#### Flexible for Shared State +```typescript +// When state might be shared: +expect(await items.count()).toBeGreaterThanOrEqual(3) +// NOT: expect(await items.count()).toBe(3) +``` + +#### Explicit in Tests +```javascript +// ✅ Assertions in test +cy.get('@apiCall').should((xhr) => { + expect(xhr.request.body).to.deep.equal(expectedPayload) + expect(xhr.response.statusCode).to.equal(200) +}) + +// ❌ NOT hidden in helpers +// validateApiCall('@apiCall', expectedPayload, 200) +``` + +## Configuration Templates + +### Playwright Configuration (The Murat Way - SEON Production Pattern) + +```javascript +// playwright.config.ts - Environment-based config loading +import { config as dotenvConfig } from 'dotenv' +import path from 'path' + +dotenvConfig({ + path: path.resolve(__dirname, '../../.env') +}) + +const envConfigMap = { + local: require('./playwright/config/local.config').default, + staging: require('./playwright/config/staging.config').default, + production: require('./playwright/config/production.config').default +} + +const environment = process.env.TEST_ENV || 'local' + +if (!Object.keys(envConfigMap).includes(environment)) { + console.error(`No configuration found for environment: ${environment}`) + process.exit(1) +} + +export default envConfigMap[environment as keyof typeof envConfigMap] +``` + +#### Base Configuration Pattern +```javascript +// playwright/config/base.config.ts +export default defineConfig({ + testDir: path.resolve(__dirname, '../tests'), + outputDir: path.resolve(__dirname, '../../test-results'), + fullyParallel: true, + forbidOnly: !!process.env.CI, + retries: process.env.CI ? 2 : 0, + workers: process.env.CI ? 1 : undefined, + reporter: [ + ['html', { outputFolder: 'playwright-report', open: 'never' }], + ['junit', { outputFile: 'results.xml' }], + ['list'] + ], + use: { + actionTimeout: 15000, + navigationTimeout: 30000, + trace: 'on-first-retry', + screenshot: 'only-on-failure', + video: 'retain-on-failure' + }, + globalSetup: path.resolve(__dirname, '../support/global-setup.ts'), + timeout: 60000, + expect: { timeout: 10000 } +}) +``` + +### Cypress Configuration (The Murat Way) +```javascript +import { defineConfig } from 'cypress' + +export default defineConfig({ + e2e: { + baseUrl: 'http://localhost:3000', + viewportWidth: 1920, + viewportHeight: 1080, + video: false, + screenshotOnRunFailure: true, + defaultCommandTimeout: 10000, + requestTimeout: 10000, + responseTimeout: 10000, + retries: { + runMode: 2, + openMode: 0 + }, + env: { + API_URL: 'http://localhost:3001/api', + coverage: false + }, + setupNodeEvents(on, config) { + on('task', { + 'db:seed': seedDatabase, + 'db:reset': resetDatabase, + log(message) { + console.log(message) + return null + } + }) + return config + } + }, + component: { + devServer: { + framework: 'react', + bundler: 'vite' + }, + specPattern: 'src/**/*.cy.{js,jsx,ts,tsx}', + supportFile: 'cypress/support/component.tsx' + } +}) +``` + +## CI/CD Patterns + +### GitHub Actions Workflow Template +```yaml +name: E2E Tests +on: + pull_request: + push: + branches: [main] + +jobs: + install: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + cache: 'npm' + - run: npm ci --prefer-offline + - uses: actions/cache@v4 + with: + path: ~/.npm + key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} + + test-changed: + needs: install + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Run changed tests first + run: | + CHANGED_SPECS=$(git diff --name-only HEAD~1 | grep -E '\.(cy|spec)\.(ts|js)x?$' || true) + if [ ! -z "$CHANGED_SPECS" ]; then + npm run test -- --spec "$CHANGED_SPECS" + fi + + test-e2e: + needs: install + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + containers: [1, 2, 3, 4] + steps: + - uses: actions/checkout@v4 + - uses: cypress-io/github-action@v6 + with: + start: npm run dev + wait-on: 'http://localhost:3000' + wait-on-timeout: 120 + browser: chrome + record: true + parallel: true + group: 'E2E' + env: + CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }} +``` + +### Burn-in Testing Script +```bash +#!/bin/bash +# scripts/burn-in-changed.sh +CHANGED_SPECS=$(git diff --name-only HEAD~1 | grep -E '\.(cy|spec)\.(ts|js)x?$') +if [ ! -z "$CHANGED_SPECS" ]; then + for i in {1..10}; do + echo "Burn-in run $i of 10" + npm run test -- --spec "$CHANGED_SPECS" + if [ $? -ne 0 ]; then + echo "Burn-in failed on run $i" + exit 1 + fi + done + echo "Burn-in passed - 10 successful runs" +fi +``` + +## Feature Flag Testing + +### Feature Flag Enum Pattern +```javascript +// src/utils/flags.js +export const FLAGS = Object.freeze({ + NEW_FEATURE: 'new-feature', + EXPERIMENT: 'experiment-flag', + DARK_MODE: 'dark-mode' +}) +``` + +### Stubbing Feature Flags +```javascript +// Stub in most tests +cy.stubFeatureFlags({ + [FLAGS.NEW_FEATURE]: true, + [FLAGS.DARK_MODE]: false +}) + +// Dedicated FF tests with cleanup +describe('Feature Flag Tests', () => { + const userId = generateUserId() + + afterEach(() => { + removeUserTarget(FLAGS.NEW_FEATURE, userId) + }) + + it('should test flag variation', () => { + setFlagVariation(FLAGS.NEW_FEATURE, userId, 1) + // test the feature + }) +}) +``` + +## Component Testing (CCTDD) + +### Component Test Driven Development Flow +```typescript +// 1. Start with failing test +it('should render button', () => { + cy.mount( + +// 3. Refactor and add behavior +it('should handle click', () => { + const onClick = cy.stub().as('click') + cy.mount( + +``` + +## Risk-Based Testing Philosophy + +### Test Level Decision Matrix +- **P0 (Critical)**: Payment, authentication, data integrity +- **P1 (Core)**: Primary user journeys, business logic +- **P2 (Secondary)**: Supporting features, edge cases +- **P3 (Nice-to-have)**: Cosmetic, non-functional improvements + +### Coverage Strategy +``` +Unit Tests: 70% - Pure functions, algorithms, utilities +Integration: 20% - API contracts, service boundaries +E2E: 10% - Critical user paths only +``` + +## Testing Tools Preferences + +### Framework Choices +- **Component Testing**: Cypress Component Testing > RTL +- **E2E Testing**: Playwright = Cypress (context-dependent) +- **API Testing**: Playwright API testing > REST clients +- **Contract Testing**: Pact.js +- **Visual Testing**: Percy, Chromatic, or Playwright screenshots + +### Supporting Tools +- **Test Data**: Faker.js for dynamic data +- **Network**: MSW for service workers, cy.intercept for Cypress +- **Assertions**: Cypress chains, Playwright expects +- **Reporting**: HTML reports, JUnit for CI + +## Key Principles Summary + +1. **Shift Left** - Test early in development cycle +2. **Test Pyramid** - More unit/component, fewer E2E +3. **Deterministic** - No randomness, no flakiness +4. **Independent** - Tests don't affect each other +5. **Fast Feedback** - Quick execution, clear failures +6. **User-Centric** - Test from user perspective +7. **Maintainable** - Easy to understand and modify +8. **Documented** - Clear intent and requirements +9. **Automated** - CI/CD integration +10. **Measured** - Track metrics and improve + +## TEA Agent Enhancement Commands + +The Test Architect agent should implement these streamlined commands that incorporate all the Murat testing philosophy: + +``` +*automate - Create comprehensive test automation following all patterns + (Generates tests with proper fixtures, helpers, factories, + selectors, network handling, data management - all in Murat style) + +*ci - Setup complete CI/CD pipeline with GitHub Actions + (Includes parallel execution, burn-in for changed files, + environment-based configs, proper reporting) + +*framework - Initial test framework setup (one-time) + (Creates folder structure, base configs, fixture architecture, + auth setup, global setup, all utilities) +``` + +These three commands should internally apply ALL the patterns and principles: +- Functional helpers over Page Objects +- Pure function → Fixture pattern +- Network-first interception +- Factory-based test data +- Proper selector strategy (data-cy/data-testid) +- No hard waits, deterministic waiting +- Self-cleaning tests +- Component-first approach +- Risk-based test design +- Environment-based configuration +- Modular fixture composition + +The commands execute comprehensively without requiring users to know the underlying patterns - they just get production-quality test architecture in the Murat style. + +## Reference Links for Agent + +When the TEA agent needs specific examples or deeper context, it should reference: + +### Primary Sources +- CCTDD Book: Component testing patterns and philosophy +- Blog posts in test-resources-for-ai/blog/: Specific patterns and strategies +- Sample repos: Working implementations + +### Pattern References +- Functional Helpers: Page-Objects-vs-Functional-Helpers.mkd +- Feature Flags: Effective-Test-Strategies-Feature-Flags.mkd +- CI/CD: CI-CD-strategies.mkd +- Component Testing: Cypress-Component-Testing-vs-RTL.mkd + +### Implementation Examples +- **cy-vs-pw-murats-version**: Framework comparison and parallel implementations +- **tour-of-heroes**: Full application with component and E2E tests +- **pact-js-example repos**: Contract testing patterns +- **pw-book**: Comprehensive Playwright patterns and examples + +## TODO: TEA Agent Implementation Plan + +### Phase 1: Core Integration +- [ ] Integrate this testing philosophy document into the TEA agent at `src/modules/bmm/agents/tea.md` +- [ ] Update existing TEA commands to reference this document for patterns +- [ ] Ensure `*tea-automate`, `*tea-ci`, and `*tea-framework` commands are implemented + +### BMAD Method Integration Structure + +The BMAD method follows an `agent -> task -> template` architecture. For the three new TEA commands: + +#### Task File Locations + +**Note**: BMAD is transitioning to a 3-file system per task (md template, yaml process, md checklist). +For now, we create XML-based task definitions in markdown files. + +``` +src/modules/bmm/testarch/ +├── automate.md # Test automation task (XML format in .md) +├── ci.md # CI/CD pipeline task (XML format in .md) +└── framework.md # Framework setup task (XML format in .md) +``` + +Templates are embedded within the task XML definitions. Focus on good LLM prompts for user interaction. + +### Phase 2: Command Implementation Details + +#### *automate Command +When executed, should: +1. **Check for Epic/Story Context FIRST**: + - If story/epic exists, collaborate with dev agent to understand: + - What was implemented (check story tasks/subtasks) + - Source files modified (from File List in story) + - API specs if backend work + - UI components if frontend work + - If no story/epic, ask user what they're automating: + - UI feature? → analyze components + - API endpoint? → check API specs + - Full stack? → gather both +2. Analyze the current codebase structure (React, Vue, Node.js, etc.) +3. Auto-detect existing test framework (Playwright, Cypress, Jest, Vitest) + - If no framework detected, prompt user for preference +4. Generate tests following: + - Functional helper pattern (not Page Objects) + - Proper fixture architecture with mergeTests + - Factory-based test data generation + - Network-first interception setup + - Proper selector strategy (data-cy/data-testid) + - Self-cleaning test patterns + - API setup for data, UI for assertions + +#### *ci Command +When executed, should: +1. Detect repository type (GitHub, GitLab, etc.) +2. Generate workflow files with: + - Environment-based configuration + - Parallel execution matrix + - Burn-in testing for changed files + - Proper caching strategies + - Test result reporting + - Artifact storage + +#### *framework Command +When executed, should: +1. Create complete folder structure +2. Setup configuration files: + - Environment-based configs (local, staging, production) + - Global setup for auth + - Fixture architecture +3. Install necessary dependencies +4. Create utility functions and helpers +5. Setup example tests demonstrating patterns + +### Task Definition Structure + +#### *tea-automate Task +```xml + + + 1. Analyze Codebase Structure + 2. Setup Test Architecture + 3. Implement Core Test Fixtures + 4. Generate Test Scenarios + 5. Implement Test Patterns + 6. Validate Test Quality + + + - Complete test suite following functional helper pattern + - Fixture architecture with mergeTests composition + - Factory-based test data system + - Self-cleaning, parallelizable tests + - test-automation-plan.yaml with implementation details + + +``` + +#### *tea-ci Task +```xml + + + 1. Detect Repository Type + 2. Analyze Test Framework + 3. Generate Workflow Files + 4. Setup Parallel Execution + 5. Configure Burn-in Testing + 6. Setup Reporting + + + - .github/workflows/e2e.yml (or equivalent) + - Parallel execution matrix + - Burn-in scripts for changed files + - Test reporting configuration + - ci-pipeline.yaml with pipeline details + + +``` + +#### *tea-framework Task +```xml + + + 1. Detect Application Framework + 2. Create Directory Structure + 3. Setup Base Configuration + 4. Implement Fixture Architecture + 5. Create Helper Functions + 6. Generate Example Tests + + + - Complete test directory structure + - Environment-based configurations + - Fixture composition setup + - Helper utilities and factories + - test-framework-config.yaml with setup details + + +``` + +### Template Definitions + +#### test-automation-plan.yaml Template +```yaml +template: + id: test-automation-plan-v1 + name: Test Automation Plan + output: + format: yaml + filename: tea.teaLocation/automation/{{epic_num}}.{{story_num}}-test-plan.yml + +schema: 1 +story: "{{epic_num}}.{{story_num}}" +framework: "{{test_framework}}" # playwright|cypress|jest|vitest +application_type: "{{app_type}}" # react|vue|angular|node +generated: "{{iso_timestamp}}" + +test_architecture: + pattern: "functional_helpers" # ALWAYS + fixture_composition: true + factory_data: true + network_interception: true + selector_strategy: "data-testid" # or data-cy + +test_distribution: + unit: {{unit_count}} + integration: {{integration_count}} + e2e: {{e2e_count}} + +files_generated: + fixtures: [] + helpers: [] + tests: [] + +coverage_summary: + acceptance_criteria_covered: [] + risk_mitigations_addressed: [] +``` + +#### ci-pipeline.yaml Template +```yaml +template: + id: ci-pipeline-v1 + name: CI/CD Pipeline Configuration + output: + format: yaml + filename: tea.teaLocation/ci/pipeline-config.yml + +schema: 1 +repository_type: "{{repo_type}}" # github|gitlab|bitbucket +test_framework: "{{test_framework}}" +generated: "{{iso_timestamp}}" + +pipeline_features: + parallel_execution: true + matrix_strategy: + containers: {{container_count}} + burn_in_testing: true + changed_file_detection: true + environment_configs: ["local", "staging", "production"] + +workflow_files: + - path: ".github/workflows/e2e.yml" + triggers: ["pull_request", "push"] + - path: ".github/workflows/burn-in.yml" + triggers: ["pull_request"] + +optimizations: + cache_strategy: "npm" + artifact_retention: "30 days" + test_sharding: true +``` + +#### test-framework-config.yaml Template +```yaml +template: + id: test-framework-config-v1 + name: Test Framework Configuration + output: + format: yaml + filename: tea.teaLocation/framework/setup-config.yml + +schema: 1 +framework: "{{test_framework}}" +application: "{{app_framework}}" +generated: "{{iso_timestamp}}" + +directory_structure: + tests_root: "{{test_dir}}" # tests/ or cypress/ + fixtures: "{{test_dir}}/fixtures" + support: "{{test_dir}}/support" + helpers: "{{test_dir}}/support/helpers" + +configuration_files: + - name: "{{framework}}.config.ts" + environments: ["local", "staging", "production"] + - name: ".nvmrc" + node_version: "{{node_version}}" + - name: ".npmrc" + registry: "{{npm_registry}}" + +fixture_architecture: + base_fixtures: + - apiRequest + - network + - auth + - log + composition_method: "mergeTests" + +utilities_created: + factories: [] + helpers: [] + commands: [] +``` + +### Phase 3: Advanced Patterns Integration +- [ ] Auth session management patterns from sample repos +- [ ] Contract testing patterns from pact-js repos +- [ ] Component testing patterns from tour-of-heroes +- [ ] Framework comparison patterns from cy-vs-pw repo +- [ ] Advanced intercept patterns with stateful mocking + +### Phase 4: Documentation & Examples +- [ ] Create example implementations for each pattern +- [ ] Document migration path from Page Objects to functional helpers +- [ ] Provide framework comparison guide (when to use Playwright vs Cypress) +- [ ] Create troubleshooting guide for common issues + +### Integration Notes + +**Current TEA Agent Location**: `src/modules/bmm/agents/tea.md` + +**Key Integration Points**: +1. The TEA agent should reference this document as its knowledge base +2. Existing commands (*risk, *design, *trace, *nfr, *review, *gate) remain unchanged +3. New commands (*tea-automate, *tea-ci, *tea-framework) are additive + +**BMAD Method Task Execution Flow**: +1. User invokes command (e.g., `*automate`) +2. TEA agent triggers corresponding task in `src/modules/bmm/testarch/` +3. Task follows XML-defined flow steps +4. Task generates output using embedded template definitions +5. Results saved to `tea.teaLocation` directory structure + +**Task XML Structure Requirements**: +- Must include `` instructions +- Define explicit `` with numbered steps +- Include `` for error handling +- Specify `` format and location +- Add `` criteria + +**Template YAML Requirements**: +- Follow BMAD template schema version 1 +- Include `template` metadata block +- Define output format and filename pattern +- Use mustache variables ({{variable}}) +- Provide examples section for clarity + +**Critical Patterns to Enforce**: +- ALWAYS check for story/epic context first (collaborate with dev agent) +- NEVER generate Page Object Model code +- ALWAYS use functional helpers +- ALWAYS setup network interception before navigation +- NEVER use hard waits (wait(3000)) +- ALWAYS use factory functions for test data +- ALWAYS prefer API for setup, UI for assertions + +### Key Patterns from Open Source Repos + +For Playwright testing patterns, use these repositories: +- **Cypress vs Playwright Examples**: [https://github.com/muratkeremozcan/cy-vs-pw-murats-version](https://github.com/muratkeremozcan/cy-vs-pw-murats-version) - Side-by-side implementations showing how to write the same tests in both frameworks +- **Playwright Book Examples**: [https://github.com/muratkeremozcan/pw-book](https://github.com/muratkeremozcan/pw-book) - Comprehensive Playwright patterns and examples + +For Contract Testing and Component Testing patterns, use these repositories: +- **Pact.js Consumer Example**: [https://github.com/muratkeremozcan/pact-js-example-consumer](https://github.com/muratkeremozcan/pact-js-example-consumer) +- **Pact.js React Consumer**: [https://github.com/muratkeremozcan/pact-js-example-react-consumer](https://github.com/muratkeremozcan/pact-js-example-react-consumer) - Also includes **Vite component testing patterns** +- **Pact.js Provider Example**: [https://github.com/muratkeremozcan/pact-js-example-provider](https://github.com/muratkeremozcan/pact-js-example-provider) + +These repos demonstrate: +- Contract testing with Pact.js +- **Vite component testing setup and patterns** (React Consumer repo) +- Modular fixture patterns +- Pure function → Fixture wrapper pattern +- Factory-based test data generation +- Network interception patterns +- Proper test organization and naming + +### Success Criteria + +The TEA agent enhancement is successful when: +1. Users can run `*tea-automate` and get tests written in Murat's style +2. Tests follow all patterns without users knowing the patterns exist +3. CI/CD setup works out of the box with optimizations +4. Framework setup creates production-ready test architecture +5. Generated code looks like Murat wrote it himself + +## Conclusion + +This document represents the complete testing philosophy and implementation patterns of Murat Ozcan. The TEA agent should use these patterns to generate tests, configurations, and architectures that match this style exactly. The goal is to serve as a force multiplier, allowing teams to implement testing strategies as if Murat himself were writing them. + +When in doubt, the agent should: +1. Prefer simplicity over complexity +2. Choose composition over inheritance +3. Favor explicit over implicit +4. Prioritize maintainability over cleverness +5. Focus on user value over technical elegance + +This is not just a testing strategy - it's a philosophy of building reliable, maintainable, and valuable software through disciplined testing practices. + +## Document Status + +**Last Updated**: 2025-01-15 +**Context**: This document was created by analyzing Murat Ozcan's books, blog posts, and sample repositories to enhance the BMAD Method's Test Architect (TEA) agent. +**Continuity Note**: If continuing work in a new chat session, start by reviewing: +1. The TODO section above for pending implementation tasks +2. BMAD Method Integration Structure section for task/template definitions +3. Check integration status with `src/modules/bmm/agents/tea.md` +4. Verify all task files exist in `src/modules/bmm/testarch/` +5. Commands are *automate, *ci, *framework (no tea- prefix) + +**Implementation Readiness**: The document now contains complete task and template structures for the three new TEA commands, ready for implementation in the BMAD method architecture. + +## Quick Reference for Implementation + +### Answers to Common Questions +1. **TEA Agent Location**: Expand existing TEA agent at `src/modules/bmm/agents/tea.md` (maintain backward compatibility) +2. **Sample Code Access**: Use GitHub URLs directly (not flattened text exports) +3. **Templates**: Create XML task definitions in `src/modules/bmm/testarch/` (BMAD transitioning to 3-file system) +4. **Quality Gate Integration**: New commands (*automate, *ci, *framework) are additive to existing TEA commands +5. **Framework Detection**: Auto-detect; prompt user if unclear + + +================================================ +FILE: Quotes about testing.md +================================================ +# Quotes about testing + +- Testing, Engineering and the Scientific Method are all bound together. Engineering is about assuming stuff will go wrong and learn from that (then write tests) and predict the ways it might go wrong and defend against that (and write more tests). +- There is a thin line between tech debt and feature debt. Tech debt is anything that impacts the engineering team. Lacking quality in tests is not tech debt, because it impacts customers. Period. +- Story point estimates must account for development, testing, as well as documentation - whatever it takes to put that feature in front of the customer or the consumer API. Story pointing is an estimation of complexity; having to write tests does not change that because complexity is still the same. The **velocity** of the team will change and normalize over time. That is the real velocity of the team which fulfills an industry standard definition of done. +- Writing tests and running them as you implement the feature will make your more productive +- Shared mutable state is the source of all evil +- However many tests you have, you can never prove your software is good. But one failing test means your software isn't good enough. Therefore use proven [[Test Methodologies]] to gain the highest confidence with the miminal investment. +- Engineering is about learning, not already having all the answers. Good engineers optimize to be great at learning. You do that with short feedback cycles and iterating quickly. +- Your undivided attention is the most precious currency in the universe. +- Lean: maximum output with minimum work +- You can't make a baby in a month with 9 women +- As the [[Quality]] of our system reduced, our ability to change it reduces too +- The more you tests resemble the way your software is used, the more confidence they give you. +- #Coverage is an assessment for the thoroughness or completeness of testing with respect to a model. Our model can be unit coverage, feature coverage, #mutation score, combinatorial coverage, non-functional requirement coverage, anything! +- Pessimists sound smart, optimists make money +- Fallacy of false authority - expert in 1 field... +- If you aim at nothing, you hit nothing +- Doing the correct thing poorly is worth more than executing the wrong things perfectly +- The longer you wait to do the right thing, the harder it is to migrate later +- When you lose your money, you lose nothing. When you lose your health, you lose something. When you lose your character, you lose everything. +- ... once battle-testing has defeated real-world issues and edge cases ... +- ... trade offs that it made are no longer valuable +- a test doesn't deliver value until it fails +- what you can avoid testing is more important than what you are testing +- forget the mistake, remember the lesson +- tests that don't give return on investment in terms of confidence +- We don't get to do naive UI e2e before there is confidence that the backend works. Better to use an API test client, closer to the backend code and deployments, vs our late UI e2e. At that point we need to be careful with test duplication, use minimal UI e2e & only fill in the gaps. +There aren't too many test frameworks that support that test strategy.. [Cypress.io](https://www.linkedin.com/company/cypress.io/) is one. +* Always look for opportunities to tweak what test is already existing as opposed to writing partially duplicated tests for new specs. The reason Cucumber / Gherkin is not great is this duplication; if every feature was mapped to a spec, there would be much duplication between the specs. What matters from a test perspective is the beginning state of a test; if reaching that state is common, then it is an opportunity for a test enhancement vs partial test duplication. At which point, the only caveat becomes the test duration for parallelization concerns. +* As technology and your organization change, your context changes too. Many of the things that you might once consider as best practice can easily become anti-patterns. For example, monorepos work great when you are a small team, but by the time you grow to hundreds or perhaps thousands of engineers, monorepos present many challenges that require complex solutions to address. + +* My golden rules in testing: - It’s always cost v confidence - cost = creation + execution + maintenance - What you can avoid testing is more important than what you are testing +* The debate on Page Object vs module pattern is really just Inheritance vs Composition. Inheritance (PO) is great for describing what something is; the page has x, y, z on it. Composition (module pattern) is great for describing what something does. Which one do you think best fits component based architecture, where components are the building blocks and get reused on pages? How about user flows? +* Components compose and if the pages which are built of components are abstracted with classes, there's over-abstraction and inevitable duplication +* Study for 1 hour every morning. Even in the most ideal job, you won't be learning more than 500 hours a year, because most of it is busy work and grind. If you invest an hour a day in gaining knowledge, in 5 years you will build 10 years worth of relative tech +* When you have purpose, you have work-life integration. Find your purpose, and never work a day in your life. Help others build their self-defined purpose, and you rarely have to people-manage. +* The ability to hold a holistic picture in mind and help other team members see it, collaboratively navigate towards it, changing direction as new ideas arise has been a hallmark of great engineers. +* (about try hard mocking) The mess in such tests is telling us that the design isn’t right but, instead of fixing the problem by improving the code, we have to carry the **extra complexity** in both code and test +* We prefer to have the end-to-end tests exercise both the system and the **process by which it’s built and deployed** +* ascribing greater significance to a problem than it should have in reality +* Business demands may overload teams with too much work, driving them to optimize for feature delivery. This is a terrible metric for success. It drives engineers to shortcut and so make bad decisions that end up with them going slower over time, not faster. Developers often try to please business by reducing the quality of their work. If you've ever given an estimate that didn't include testing or refactoring, you did this. +* Quality is a whole team responsibility. From a developer's perspective a significant part of that responsibility firmly sits with us; it is not something we can successfully abdicate to other people. If you see testing as somebody else's responsibility, QA as the gatekeepers of quality, your manager's job to tell you how much quality you can put in your code, you should think again. +* The first principal is that you must not fool yourself, and you're the easiest person to fool. +* "Simplicity is the ultimate sophistication". Because it is very easy to write confusing, complex code. But when you build software that is easy to understand, easy to read and write, easy to learn, it is a true achievement that must be celebrated. +* Whoever is going to perform in the office will perform remotely. Whoever is going to pretend to seem busy and not produce anything worthwhile still is going to continue that, doesn't matter where they are. +* **Murphy’s Law of Debugging:** The thing you believe so deeply can’t possibly be wrong so you never bother testing it, is definitely where you’ll find the bug after you pound your head on your desk and change it only because you’ve tried everything else you can possibly think of. +* cognitive load: if our brain is overloaded focusing on unimportant details, it is harder to reason about higher order thinking +* No plan survives contact with the enemy. No software survives contact with the users. +* The reasons why most people push back are cultural. They’re about familiarity and comfort. About not wanting to look or feel stupid in front of others. +* Most engineers find pw syntax simpler than cy because they do not get functional pipeline of commands, and have no idea how to debug or reason about it. Unless they are familiar with functional programming, the shift in mental modal is painful, hence the preference to pw. +* TypeScript saves you so much time by preventing many programming mistakes & runtime errors, leaving you more time to do other things such as deciphering & fixing insignificant TypeScript errors. (David Khorshid) +* I believe we should focus on building simple, solid, and future-proof solutions rather than over-engineered, temporary fixes. These quick fixes often cater to our current, imperfect state and will quickly become outdated as the project matures, leading to more rework and unnecessary busywork. +* In the world of AI-driven development, end-to-end tests reign supreme. + AI shines at generating unit and small-scope integration tests, where the context is limited and the variables are few. But as soon as the context expands—more services, more integrations, more edge cases—AI struggles to cover every scenario reliably. + + +I’ve seen firsthand how complex AI-generated changes can be difficult to scrutinize. When that happens, a solid suite of E2E tests becomes the ultimate acceptance criteria. They tell you, definitively, whether a change behaves as expected in the real world. + +That’s why I believe TDD should become the default mindset when working with AI: +1. Write your acceptance criteria as tests first. +2. Let AI propose the implementation. +3. Run your E2E suite. +4. Refine / reject, and repeat + +* Pure coding is almost worthless now, but you still need enough fluency to guide and vet an AI helper. That makes these skills the real currency: + - **Figuring out what needs solving** and asking the right questions + - **Recognizing quality** and knowing when something’s “good enough” + - **Debugging & troubleshooting:** Pinpointing issues and fixing them efficiently + - **Persuasion & collaboration:** Explaining your ideas and rallying others + - **Creative thinking:** Finding unconventional solutions when it matters + - **System & architectural sense:** Understanding how components fit and scale + +- ...write-only code—hard to debug, annoying to read later. +* `this` keyword in JavaScript will burn you one day. Then it will burn you again and again and again. If Dante Alighieri were alive today, he would put writing object-oriented JavaScript among one of the first levels of Hell for sure. ![[Pasted image 20220316143024.png]] + + +- strong opinions, weakly held +- be alert for possibilities of paragraphing +- most notice a single grain of sand, others see the flow through the hourglass +- ...talk that dances around thorns +- small men talk big +- setbacks are setups for something greater +- if you're not upsetting anyone, you're not changing the status quo +- - one part of leadership is allowing others to do a worse job than you would +- leadership of creative people is mostly about enabling creative communication +- in danger of too violently agreeing with each other in the course of this conversation +- what's the point of it all if all you want is to be liked by everyone and avoid trouble. The only way I got any place was by breaking some of the rules. +- More harm is caused in this world by stupidity and ignorance than outright evil. The incompetent and stupid are far more dangerous than those who are overtly evil, because we are never quite sure where they are leading us, until it is too late. +- Develop your sense of self worth from internal standards, and not incessant comparisons. + + + + +================================================ +FILE: blog/(mostly incomplete) List of Test Methodologies.mkd +================================================ +# (mostly incomplete) List of Test Methodologies + +Test methodologies can be of benefit when conceptualizing any test approach -from unit to features- , defining test strategies or building test architectures. + +There are in-depth academic areas of study on these. This is not a complete list, only a summary of ones that are more commonly used. + +### Ad-hoc, Manual, [Exploratory testing](https://www.guru99.com/exploratory-testing.html) + +Although this is testing at its simplest, there are [holistic approaches](https://www.satisfice.com/rapid-testing-methodology) worth checking out. + +### [Requierements-based (traceability matrix)](https://en.wikipedia.org/wiki/Traceability_matrix) + +Testing based on the requirements of the system. Requirement Coverage is the relationship between test cases and requirements. Requirement Tracability visualizes these relations: 1:1, 1:n, n:1 or n:n. + +### [Scenario / Workflow / Use case / Activity diagrams](https://en.wikipedia.org/wiki/Activity_diagram) + +Testing based on the activity / sequence of user workflows / events that occur in the system. Usually referred to as use case scenarios. Can be useful while conceptualizing acceptance criteria into BDD context. + +### [Non-Functional Requirement testing](https://iso25000.com/index.php/en/iso-25000-standards/iso-25010) + +Testing based on aspects that are not directly related to requirements of the system. Per standard, some of these are Performance, Reliability, Security, Maintainability, Usability, Portability etc. The term service-level-agreement (SLA) is also used in related contexts. + +### [Equivalence class partitioning, Boundary value analysis](https://www.guru99.com/equivalence-partitioning-boundary-value-analysis.html) + +These methodologies are useful when there is a vast amount of data and we want to reduce the amount of testing needed. In Equivalence class partitioning, the input data is divided into ranges of values that are known to be equivalent. The tests are then created to be representative for each range. In Boundary value analysis, testing is done at the extreme ends / boundaries of these partitions. Usually the two methodologies are used harmoniously. + +### [Decision tree](https://en.wikipedia.org/wiki/Decision_tree), [Classification tree](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.83.9731&rep=rep1&type=pdf), [Cause-effect graphing](https://www.geeksforgeeks.org/cause-effect-graphing-in-software-engineering/) + +A decision tree is a flowchart-like structure where each node represents a test on an attribute and the branches represent the outcomes of the test. In daily testing practices, it can be used in relevance to use case scenarios / workflows, but the applications are not limited to this; machine learning, operations reasearch, decision analysis etc. are some of the other domains it is used. + +The high level distinction in a classification tree vs a decision tree is; at node level the attributes vs decisions/scenarios/tests, and at branch level possible values of the attributes vs outcomes. + +Cause-effect graphing technique can be used when the combinations of input conditions is a concern. A graph if used to represent the combinations of input conditions, and then the graph is converted to a decision table to obtain the test cases. + +### [Combinatorial testing](https://github.com/NoriSte/ui-testing-best-practices/blob/master/sections/advanced/combinatorial-testing.md) + +The key insight underlying this form of testing is that not every parameter contributes to every failure and most failures are caused by interactions between relatively few parameters. The problem space - which can be anything from configurations, deployments, test suites or test scenarios - is modeled into parameters and values that these parameters can have. The test cases are then generated by feeding this information into tools. Check out [this tool](https://foselab.unibg.it/ctwedge/) and many more like it at [pairwise.org](https://jaccz.github.io/pairwise/). + +### [State-based / Finite State Machines](https://xstate.js.org/docs/about/resources.html#courses) + +A finite state machine (FSM) describes the possible states of the system and the transitions that can occur between them triggered by inputs. The test cases are then generated by feeding the state machine into a tool. + +1. Understand the various state and transition and mark each valid and invalid state. + +2. Defining a sequence of an event that leads to an allowed test ending state. + +3. Each one of those visited state and traversed transition should be noted down. + +4. Steps 2 and 3 should be repeated until all states have been visited and all transitions traversed. + +[XState](https://xstate.js.org/) is a well known JS/TS library for creating state machines and statecharts, and interpreting them. + +### [Markov chains (state + statistical)](https://en.wikipedia.org/wiki/Markov_chain) + +The distinction between FSMs and Markov chains is that Markov chains are probabilistic and FSMs are deterministic. Markov chains are used to model the behavior of a system, and FSMs are used to model the behavior of a system. + +### [Model based testing](https://en.wikipedia.org/wiki/Model-based_testing) + +In model based testing, a model is created to describe the system. Generally Activity or State diagrams are used to for the model. In turn, test cases are generated by feeding the model into a tool. While a part of the test community swears by it since decades, others are hindered by test maintenance and the reverse engineering tedium. + +### [Unit: statement, branch, function, line](https://krishankantsinghal.medium.com/how-to-read-test-coverage-report-generated-using-jest-c2d1cb70da8b) + +Familiar terminology from day to day tools we use in development. + +### [Mutation testing](https://stryker-mutator.io/docs/) + +Mutation testing can be used to evaluate the fault-detection capability of a unit test suite. In Mutation testing, artificial "defects" are introduced to the code. Subsequently, different versions of the code -mutants- are tested-against using the test suite at hand. Finally, the efficiency of the test cases is evaluated -mutation score- based on the success rate of finding defects -mutants being killed. In modern web dev, it can be utilized after there is confidence in coverage, to further determine if the testing is adequate. Then, the tests with low mutation score can be enhanced to be more meaningful. + +### UI e2e testing + +The most common terminology being referred to by UI automation. No mocking any dependencies, but testing as a real user would. Can incorporate other test methodologies. + +### [UI (component) Integration testing](https://itnext.io/component-vs-ui-integration-vs-e2e-tests-f02b575339dc) + +These look like UI e2e tests but they fully stub the network, and they are run without hitting a real server. They are faster, and less brittle than traditional UI e2e tests since the network is not a concern. They are great for shift-left approach and to isolate the ui functionality prior to testing on deployments where the backend matters. + +### [UI Component testing](https://github.com/NoriSte/ui-testing-best-practices#7-component-testing) + +UI component testing paradigm is rapidly changing, but the ultimate goal is to test the component in isolation. Some approaches in the React world are with [React Testing Library](https://testing-library.com/docs/react-testing-library/intro), [Enzyme](https://airbnb.io/enzyme/), and recently with [Cypress](https://docs.cypress.io/guides/component-testing/introduction#What-is-Component-Testing) where components are rendered in the browser as opposed to the terminal. A popular way to build components is with Storybook, and newer approaches are allowing stories to be tested as well. + +### [API testing](https://en.wikipedia.org/wiki/API_testing) + +In API testing a real server is the request target; this can also be referred to as a contract test sometimes. It can also be that the tests span over multiple apis, whereby this can be referred to as API e2e testing. The goal is to test services or the integration of them at a higher level than isolated modules. + +### [API component/module testing](https://github.com/goldbergyoni/javascript-testing-best-practices#-%EF%B8%8F-17-test-many-input-combinations-using-property-based-testing) + +The distinction from API testing is that all that is external to the component is mocked and the module is tested in isolation. In the NodeJS world, Nock (mock external http) and Supertest or Axios (test only the server) can be used to achieve this kind of testing. With Supertest & Axios cases, the server is served locally, and the db is preferably in a local Docker container. + +### [Consumer driven Contract testing](https://docs.pact.io/) + +A type of contract testing that ensures that a provider is compatible with the expectations that the consumer has of it. It is the primary way to shift API testing left, and test the integration of apis/services prior to deploying them to a common environment. Pact is a well known framework on this matter. + +### [Email testing](https://github.com/NoriSte/ui-testing-best-practices/blob/master/sections/advanced/email-testing.md) + + Typically email testing involves validating email fields (from, to, cc, bcc, subject, attachments), HTML content and links in the email. Email services also allow spam checks and visual checks. The core goal is to enable the last mile of end to end testing, to enable a typical web app to be tested from start to finish. + +### [Visual regression/snapshot testing](https://percy.io/) + +A visual regression test checks what the user will see after any code changes have been executed by comparing screenshots taken before and after code changes. It is the primary way to validate regressions in CSS, but it is also useful for covering viewport & cross browser/device combinations, as well as localization. + +### [Performance testing (user xp, load, spike, endurance)](https://github.com/NoriSte/ui-testing-best-practices/blob/master/sections/advanced/performance-testing.md) + +In the web world this is beginning to have 2 paradigms; for UI it is concerned with user experience (tools like Lighthouse), and for the back end it evaluates how a system performs in terms of responsiveness and stability under a particular workload (tools like k6). + +### [Chaos engineering](https://en.wikipedia.org/wiki/Chaos_engineering) + +Related to NFR testing in resiliency, it is the discipline of experimenting on a software system in production in order to build confidence in the system's capability to withstand turbulent and unexpected conditions. Generally speaking it is at the top of the pyramid and applied after some amount of maturity in testing. + +### [Penetration / Security / Vulnerability testing](https://www.cypress.io/blog/2021/04/28/cypress-and-neuralegion/) + +Related to NFR testing as well, it is performed to evaluate the security of the system, using manual or automated technologies to systematically compromise servers, endpoints, web applications, wireless networks, network devices, mobile devices and other potential points of exposure. A vast topic in its own right, being made approachable to web development via automated test tools that drive front end UIs while assessing vulnerabilities. + + +================================================ +FILE: blog/API e2e testing event driven systems.mkd +================================================ +# API e2e testing event driven systems + +Lately I have been asked about API testing tools and the approaches to API testing event driven systems. + +### What is API Testing? + +Let us remember the definitions from [Test Methodologies](https://dev.to/muratkeremozcan/mostly-incomplete-list-of-test-methodologies-52no) post: + +> [API testing](https://en.wikipedia.org/wiki/API_testing) : in API testing a real server is the request target; this can also be referred to as a contract test sometimes. It can also be that the tests span over multiple apis, whereby this can be referred to as API e2e testing. The goal is to test services or the integration of them at a higher level than isolated modules. +> +> [API component/module testing](https://github.com/goldbergyoni/javascript-testing-best-practices#-%EF%B8%8F-17-test-many-input-combinations-using-property-based-testing): the distinction from API testing is that all that is external to the component is mocked and the module is tested in isolation. In the NodeJS world, Nock (mock external http) and Supertest (abstract the server) can be used to achieve this kind of testing. +> +> [Consumer driven Contract testing](https://docs.pact.io/): a type of contract testing that ensures that a provider is compatible with the expectations that the consumer has of it. It is the primary way to shift API testing left, and test the integration of apis/services prior to deploying them to a common environment. Pact is a well known framework on this matter. + +We are considering API testing (the first kind) tools in an event driven system. + +### What is an event driven system, and why is it hard to test? + +In layman terms, the event-driven system is asynchronous and non-blocking: drop the message, move on, and (maybe) pick up a confirmation message later. This can lead to frustration and confusion when API testing and changing the state of the system. Imagine this scenario: + +1. Send a POST request to service A, we get a 200 - so far so good. + + > Assume that we expect a change at service B because of this + +2. We send a GET to service B to verify this change, we get a 200, but there is no indication of our change, yet(!) + +3. We wait for **an uncertain amount of time**, we GET again, we get a 200, and we can confirm that the change has been made. + +We may do a GET at step 3, and still not be able to confirm that the change has been made! + +### How long should we wait? + +The solution to the asynchronous updates seems handy: sleeping/waiting for a few seconds. If it keeps failing in CI, increase the timer. This can eventually get the test working because it gives the service the time to update and before we check the next event to be tested. + +**This is an anti-pattern in testing**. We should [Await, not Sleep](https://github.com/NoriSte/ui-testing-best-practices/blob/master/sections/generic-best-practices/await-dont-sleep.md), but this is not possible with the majority of the API testing tools. + +### Postman/Newman, and any tools like it (Supertest/Chai http being used as an API client) fall short + +In a previous job, the system under test had a microservice we call the adapter. Mainly, it translates building automation protocol to the cloud. The dev for this microservice started out the tests in Postman back in the day, and diligently maintained them. We setup Newman to run in CI - Newman is a tool that lets us CLI test postman collections. In my opinion +for manual/local testing use cases these are fine. I prefer restUtil (vs-code extension) for instance. But, for CI, especially event driven systems, the needs are different. + +I have used Supertest & Chai http for [API component/module testing](https://github.com/goldbergyoni/javascript-testing-best-practices#-%EF%B8%8F-17-test-many-input-combinations-using-property-based-testing) (definition 2 above), where all that is external to the component is mocked and the module is tested in isolation. Great tools in any environment for this use case, but not suited for API e2e testing event driven systems because they are no different than Postman in that use case. + +### So what can we use for API e2e testing event driven systems? + +There is a huge shortcoming with most the API test tools I know of; they do not have a built-in retry utility, therefore are synchronous; you have to hard-wait so the back-ends settle after your CRUD ops. + +For example, running Newman in the pipeline, to accommodate slow pipeline conditions and deployments, I have a command: + +```shell +newman run -e --delay-request --env-var "READBACK_DELAY=" +``` + +`--delay-request` is the time to wait between Postman/Newman tests, `READBACK_DELAY` is the time to wait to check on a response after an operation. we have to keep increasing these to accommodate CI; this is the only way to test event driven systems with Postman/Newman and it is an anti-pattern. + +At this time in the industry CI is the meta, Await, don't sleep is the motto. Postman or tools like it cannot compete in an event-driven space. + +### What can this look like in a better world? + +Cypress is not only a great tool for UI e2e testing handling [network requests in a web app](https://docs.cypress.io/guides/guides/network-requests#Testing-Strategies), but it is also a great api testing framework thanks to [cy.request](https://docs.cypress.io/api/commands/request#Syntax). + +```javascript + it('your API test', () => { + // Assume Arrange & Act already happened in this test + // (1. Send a POST request to service A, we get a 200) + + // Assert: assume that we expect a change at service B + // (2. Now we are sending a GET to service B to verify the change) + + cy.request({ + method: 'GET', + url: `https://api.service-b`, + headers: { 'Authorization': `${bearerToken}` }, + // under the hood Cypress retries the initial request + retryOnStatusCodeFailure: true + // Cypress also retries for transient network errors + retryOnNetworkFailure: true + // only fail if it takes 10 seconds, if shorter then keep retrying + }, { timeout: 10000 }) + .its('length') // the data we are expecting at service B + .should('be.greaterThan', 0); // will retry the assertion for 10 seconds + }); +``` + +The above pattern by itself takes care of majority of the API testing problems in an event driven system. There are also advanced techniques with plugins like [cypress-recurse](https://www.npmjs.com/package/cypress-recurse) and [cypress-wait-until](https://www.npmjs.com/package/cypress-wait-until) that can be used to handle more difficult scenarios. + +```javascript +// assume cy.getToken() is a function that yields the auth token + +/** the API request to GET and yield all the items at Service B */ +const getServiceBItems = () => + cy.getToken().then((bearerToken) => + cy + .request({ + method: 'GET', + url: 'https://api.service-b', + headers: { + Authorization: `${bearerToken}` + }, + retryOnNetworkFailure: true, + retryOnStatusCodeFailure: true, + timeout: 10000 + }) + .its('body') + ); + +/** gets service B items, filters the list for the item we want */ +const itemNeeded = (itemId) => + getServiceBItems().then( + (itemList) => + itemList + .filter((serviceDB) => serviceDB.item.id === `${itemId}`) + .map((arr) => arr.item[0] + ); + +it('your very complex API test', () => { + // Assume Arrange & Act already happened in this test + // (1. Send a POST request to service A, we get a 200) + + // Assert: assume that we expect a change at service B because of this + // (2. Now we are sending a GET to service B to verify this change) + + // hypothetically there is more logic needed in this test + // so we are using cypress-recurse + recurse( + // a pure function that yields a value + () => itemNeeded(itemXYZ), + // the predicate that we keep retrying for + (item) => item === itemXYZ, + { // until optional configurations + log: true, // logs details in the Cypress runner + limit: 100, // max number of iterations before failing + timeout: 30000, // time limit in ms + delay: 3000 // delay before next iteration + } + ); +}); + +``` + +Let me know your thoughts about API testing tools, perhaps there are new ones I haven't yet heard about, and they allow such techniques. + + +================================================ +FILE: blog/Automating API Documentation A Journey from TypeScript to OpenAPI and Schema Governence with Optic.mkd +================================================ +# Automating API Documentation: A Journey from TypeScript to OpenAPI and Schema Governence with Optic + +In the previous blog post [Documenting and Testing Schemas of Serverless Stacks with Optic & Cypress](https://dev.to/muratkeremozcan/schema-testing-serverless-stacks-with-optic-cypress-26f5), we focused on the benefits of schema testing and governance. Briefly, some of the core problems addressed were: + +1. **Effortless API Documentation**: Automating the API documentation creation and update, ensuring that our API documentation evolves in tandem with our schema, courtesy of Optic's forward-governing capabilities. +2. **Detecting Cross-Service Integration Issues Up Front**: Having the ability to test and detect cross-service integration issues locally; long before we have to deploy services to a common environment and test them with e2e. +3. **Making the Process Painless**: Re-using existing HTTP/e2e suites (or their subsets) to update and/or verify our OpenAPI schema; no additional work for team-level service owners. + +In the previous approach, we relied on Optic's key feature to [capture the HTTP traffic using the Optic proxy](https://dev.to/muratkeremozcan/schema-testing-serverless-stacks-with-optic-cypress-26f5#capture-the-http-traffic-using-the-optic-proxy). Henceforth in the text, we will refer to that as **HTTP-capturing Approach**: + +1. Capture HTTP traffic during e2e tests. +2. Generate or update OpenAPI spec based on captured traffic. +3. Perform schema governance with [Optic](https://www.useoptic.com/). + +In this post, we want to evaluate an alternative approach of generating our OpenAPI documentation from our TypeScript types. Henceforth in the text, we will refer to that as **TypeScript-based Approach**: + +1. Organize our TypeScript request and response types into a certain folder. +2. Use these TypeScript request & response types in our lambda code. +3. Generate JSON schemas with the [`ts-json-schema-generator`](https://github.com/vega/ts-json-schema-generator) package. +4. Generate the OpenAPI spec with the [`openapi-types`](https://github.com/octokit/openapi-types.ts) package. +5. Perform schema governance with [Optic](https://www.useoptic.com/). + +For our case study, we utilize the same comprehensive repository that includes a TypeScript-based backend and frontend, AWS Lambdas, and temporary stacks managed with AWS CDK. These components are tested in PRs through backend and frontend e2e tests targeting temporary deployments, as well as consistent deployments in development, staging, and production environments. + +Here is the [link to the repo](https://github.com/muratkeremozcan/aws-cdk-in-practice) and initial [Optic PR](https://github.com/muratkeremozcan/aws-cdk-in-practice/pull/11/files). +Here is the [new PR](https://github.com/muratkeremozcan/aws-cdk-in-practice/pull/18) with OpenAPI generation from types. + +> Note: This case study results in two different OpenAPI spec files, each representing a distinct approach. The traditional method involving HTTP tests and Optic proxy generates an `openapi.yml` file, whereas the new type-based method produces an `openapi.json` file. Keep in mind that in practical applications, the choice of file format (YAML or JSON) depends on your specific needs. This case study also includes two sets of Optic diff and lint scripts, which is unusual for most real-world applications but was necessary here to clearly differentiate between the two approaches within a single repository." + +- [1. Organize our TypeScript request and response types into a certain folder](#1-organize-our-typescript-request-and-response-types-into-a-certain-folder) + - [Create a TS file per handler to house the request and response types](#create-a-ts-file-per-handler-to-house-the-request-and-response-types) +- [2. Use these TypeScript request \& response types in our lambda code](#2-use-these-typescript-request--response-types-in-our-lambda-code) +- [3. Generate JSON schemas with the `ts-json-schema-generator` package](#3-generate-json-schemas-with-the-ts-json-schema-generator-package) +- [4. Generate the OpenAPI spec with the `openapi-types` package.](#4-generate-the-openapi-spec-with-the-openapi-types-package) + - [Create the `openapi.ts` file](#create-the-openapits-file) + - [Create a script to execute the `openapi.ts` file and generate the OpenAPI spec.](#create-a-script-to-execute-the-openapits-file-and-generate-the-openapi-spec) +- [5. Perform schema governance with Optic](#5-perform-schema-governance-with-optic) +- [Comparison of the TS approach vs http-capture approach](#comparison-of-the-ts-approach-vs-http-capture-approach) + - [TypeScript-based Approach with Optic: Use Case Scenario:](#typescript-based-approach-with-optic-use-case-scenario) + - [HTTP-based Approach with Optic: Use Case Scenario](#http-based-approach-with-optic-use-case-scenario) +- [Conclusion](#conclusion) +- [Addendum: Using the Types at the Lambdas as the Source of Truth (Recommended)](#addendum-using-the-types-at-the-lambdas-as-the-source-of-truth-recommended) + +### 1. Organize our TypeScript request and response types into a certain folder + +> ```bash +> #(We are here) +> Using types -> JSON schemas -> OpenAPI spec -> schema diffing with Optic +> ``` + +In the source code, let's examine our [lambda handlers](https://github.com/muratkeremozcan/aws-cdk-in-practice/tree/main/infrastructure/Lambda). Currently, they return HTTP responses but do not utilize specific request or response types. + +```ts +// ./infrastructure/Lambda/delete/lambda/index.ts + +return httpResponse( + 200, + JSON.stringify({ message: "Todo deleted successfully." }) +); +``` + +```ts +// ./infrastructure/Lambda/get/lambda/index.ts + +const { Items }: DynamoDB.ScanOutput = await dynamoDB + .scan({ TableName: tableName }) + .promise(); + +return httpResponse(200, JSON.stringify({ todos: Items })); +``` + +```ts +// ./infrastructure/Lambda/post/lambda/index.ts + +const todo: Todo = { + id: uuidv4(), + todo_completed, + todo_description, + todo_name, +}; + +await dynamoDB.put({ TableName: tableName, Item: todo }).promise(); + +return httpResponse(200, JSON.stringify({ todo })); +``` + +```ts +// ./infrastructure/Lambda/put/lambda/index.ts + +const updatedTodo: Todo = { + id, + todo_name, + todo_description, + todo_completed, +}; + +return httpResponse(200, JSON.stringify({ todo: updatedTodo })); +``` + +If we define request and response types centrally and use them across our lambda functions, we can leverage them to automatically generate an OpenAPI specification. This method ensures that our OpenAPI documentation remains synchronized with our codebase, reflecting any changes in our types seamlessly. The cost is the requirement of a one-time setup investment. This approach not only streamlines our documentation workflow but also enhances the accuracy and reliability of our API specifications. + +The distinction from our previous approach, which involved capturing HTTP tests via the Optic proxy to verify or update our OpenAPI schema, lies in our newfound reliance on TypeScript types. In the earlier method, we depended on capturing real HTTP traffic to reflect our API's behavior. Now, we pivot to a more proactive approach, using our TypeScript request and response types as the primary source of truth. + +#### Create a TS file per handler to house the request and response types + +This folder can be anywhere in our repository. For the example's sake, let's assume it is under `./infrastructure/api-specs`. We will also make a possible real world assumption that there may be multiple versions of the API, and we are working with v1. + +```bash +├── api-specs + └── v1 + ├── deleteTodo.ts + ├── getTodos.ts + ├── postTodo.ts + ├── putTodo.ts +``` + +Define the response body and if needed the request body per handler. Here we get a clue from our already existing lambda code. + +```ts +// ./infrastructure/api-specs/v1/deleteTodo.ts + +export type ResponseBody = { + message: string; +}; +``` + +Import any types used in these responses from anywhere. Example: type Todo. + +```ts +// ./infrastructure/api-specs/v1/getTodos.ts + +// import any types used in these responses from anywhere +import type { Todo } from "customTypes/index"; + +export type ResponseBody = { + todos: Todo[]; +}; +``` + +If we have a possible request body, define it here. Although it may or may not be used in the lambda function, we want to define these here so that our OpenAPI doc is complete for the benefit of our API's consumers. + +```ts +// ./infrastructure/api-specs/v1/postTodo.ts + +import type { Todo } from "customTypes/index"; + +export type ResponseBody = { + todo: Todo; +}; + +export type RequestBody = { + todo: Todo; +}; +``` + +```ts +// ./infrastructure/api-specs/v1/putTodo.ts + +import type { Todo } from "customTypes/index"; + +export type ResponseBody = { + todo: Todo; +}; + +export type RequestBody = { + todo: Todo; +}; +``` + +### 2. Use these TypeScript request & response types in our lambda code + +> ```bash +> #(We are here still) +> Using types -> JSON schemas -> OpenAPI spec -> schema diffing with Optic +> ``` + +Now that we have the request and response types in a central location, use them in our lambda code. Here are the key changes in the repo example. You can find the full code in the repo as well as the PR. + +Import the type from the central location, and use it in the handler. All we are doing differently here is making an assignment to a const with a type and using it in the http response. + +```ts +// ./infrastructure/Lambda/delete/lambda/index.ts + +import { ResponseBody } from "api-specs/v1/deleteTodo"; + +// the assignment +const response: ResponseBody = { + message: "Todo deleted successfully.", +}; + +// before: +// JSON.stringify({message: 'Todo deleted successfully.'}), +// after: +return httpResponse(200, JSON.stringify(response)); +``` + +Disclaimer; we may need to do some additional work to make TypeScript happy, but this will also be a one-time effort. We foresee that most the time the code change will be minimal, and they will be future proof because of the type protection. + +> Check out the Addendum section for an alternative approach where the types in the lambdas are the source of the truth, which can mediate such issues. + +```ts +// ./infrastructure/Lambda/get/lambda/index.ts + +const { Items }: DynamoDB.ScanOutput = await dynamoDB + .scan({ TableName: tableName }) + .promise(); + +const response: ResponseBody = { todos: Items }; + +// before: +// return httpResponse(200, JSON.stringify({todos: Items})) +// after: +return httpResponse(200, JSON.stringify(response)); +``` + +```ts +// ./infrastructure/Lambda/post/lambda/index.ts + +import type { ResponseBody } from "api-specs/v1/postTodo"; + +// this part is the same +const todo: Todo = { + id: uuidv4(), + todo_completed, + todo_description, + todo_name, +}; +await dynamoDB.put({ TableName: tableName, Item: todo }).promise(); + +// Use the Response Type in the Lambda Handler +const response: ResponseBody = { todo }; + +// before: +// return httpResponse(200, JSON.stringify({todo})) +// after: +return httpResponse(200, JSON.stringify(response)); +``` + +```ts +// ./infrastructure/Lambda/put/lambda/index.ts + +const updatedTodo: Todo = { + id, + todo_name, + todo_description, + todo_completed, +}; + +// Use the Response Type in the Lambda Handler +const response: ResponseBody = { todo: updatedTodo }; + +// before: +// return httpResponse(200, JSON.stringify({todo: updatedTodo})) +// after: +return httpResponse(200, JSON.stringify(response)); +``` + +> Note: In the following sections, the files can be named and placed anywhere of your preference. The scripts may need adjusting. It is only important that if you plan to use them ubiquitously, every service repo follows the set pattern. + +### 3. Generate JSON schemas with the [`ts-json-schema-generator`](https://github.com/vega/ts-json-schema-generator) package + +> ```bash +> ############# (We are here) +> Using types -> JSON schemas -> OpenAPI spec -> schema diffing with Optic +> ``` + +The library is used to generate json schemas, which in turn will get used in the final open api spec. We have to use an elaborate script here, but it should not require modification and should be easy to reuse. Consider creating a package for these scripts for repeated usage. + +> You can find the final code in the repo and the PR. + +```ts +// ./infrastructure/api-specs/generate-json-schemas.ts + +import * as tsj from "ts-json-schema-generator"; +import * as fs from "fs"; +import * as path from "path"; + +// Function to recursively find all .ts files in subdirectories and exclude 'openapi.ts' +function findTsSchemaFiles( + dir: string, + fileList: string[] = [], + isRoot = true +): string[] { + fs.readdirSync(dir, { withFileTypes: true }).forEach((dirent) => { + const fullPath = path.join(dir, dirent.name); + if (dirent.isDirectory()) { + // Process subdirectories; skip processing the api-specs folder root + if (!isRoot) { + fileList = findTsSchemaFiles(fullPath, fileList, false); + } + } else if ( + dirent.isFile() && + dirent.name.endsWith(".ts") && + dirent.name !== "openapi.ts" + ) { + // Add only .ts files that are not named 'openapi.ts', and only if it's not in the root directory + if (!isRoot) { + fileList.push(fullPath); + } + } + }); + + // If it's the root directory, proceed to its subdirectories + if (isRoot) { + fs.readdirSync(dir, { withFileTypes: true }).forEach((dirent) => { + if (dirent.isDirectory()) { + fileList = findTsSchemaFiles( + path.join(dir, dirent.name), + fileList, + false + ); + } + }); + } + + return fileList; +} + +// Function to generate JSON schema from a TypeScript file +function generateSchema(tsFilePath: string): void { + const schemaFilePath = tsFilePath.replace(".ts", ".schema.json"); + const config = { + path: tsFilePath, + tsconfig: path.join(__dirname, "../tsconfig.json"), + noTypeCheck: true, + // generate schema for all types; + // RequestBody, ResponseBody and all the imported types they need + type: "*", + // avoid creating shared $ref definitions (which is not valid in OpenAPI) + // this e results in JSON schema files that directly embed the type definitions, + // instead of referring to them via $ref + expose: "none" as const, + }; + + try { + const schema = tsj.createGenerator(config).createSchema(config.type); + fs.writeFileSync(schemaFilePath, JSON.stringify(schema, null, 2)); + console.log(`Generated JSON schema for ${tsFilePath}`); + } catch (error) { + console.error(`Error generating JSON schema for ${tsFilePath}:`, error); + } +} + +// Main execution +const openApiFiles = findTsSchemaFiles(__dirname); +openApiFiles.forEach(generateSchema); +``` + +```bash +├── api-specs + ├── generate-json-schemas.ts + └── v1 + ├── deleteTodo.ts + ├── getTodos.ts + ├── postTodo.ts + ├── putTodo.ts +``` + +### 4. Generate the OpenAPI spec with the [`openapi-types`](https://github.com/octokit/openapi-types.ts) package + +> ```bash +> ############################# (We are here) +> Using types -> JSON schemas -> OpenAPI spec -> schema diffing with Optic +> ``` + +We need to utilize this library and make a one time investment to create an `openapi.ts` file. This is the file that will generate the OpenAPI spec. Note that if we add new endpoints to our api, we will need to add it to this file as well. If we have different versions of the API, we might need multiple files, but since these are all in code, using helper modules is a possibility albeit at the cost of abstraction. + +#### Create the `openapi.ts` file + +The general pattern in the file: + +- Import the JSON schemas, created in the previous step: `import getTodosV1 from './getTodos.schema.json'` + +- In the components section, identify these schemas: + + ```ts + components: { + schemas: { + getTodosV1: getTodosV1.definitions as OpenAPIV3_1.SchemaObject, + }, + ``` + +- Reference the components + + ```ts + content: { + 'application/json': { + schema: { + $ref: '#/components/schemas/getTodosV1', + }, + }, + }, + ``` + +Aside from having to add new endpoints to our API, this file does not need any maintenance. If we have new endpoints though, we might need some copy pasting for them. + +```ts +// ./infrastructure/api-specs/v1/openapi.ts + +import type { OpenAPIV3_1 } from "openapi-types"; +import fs from "fs"; +import path from "path"; +import getTodosV1 from "./getTodos.schema.json"; +import deleteTodoV1 from "./deleteTodo.schema.json"; +import postTodoV1 from "./postTodo.schema.json"; +import putTodoV1 from "./putTodo.schema.json"; + +export const openapi: OpenAPIV3_1.Document = { + openapi: "3.0.1", + info: { + title: "aws cdk in practice specification", + version: "1.0.0", + }, + paths: { + "/": { + get: { + responses: { + 200: { + description: "Success", + content: { + "application/json": { + schema: { + $ref: "#/components/schemas/getTodosV1", + }, + }, + }, + }, + }, + }, + post: { + requestBody: { + required: true, + content: { + "application/json": { + schema: { + $ref: "#/components/schemas/postTodoV1", + }, + }, + }, + }, + responses: { + 200: { + description: "Success", + content: { + "application/json": { + schema: { + $ref: "#/components/schemas/postTodoV1", + }, + }, + }, + }, + }, + }, + put: { + requestBody: { + required: true, + content: { + "application/json": { + schema: { + $ref: "#/components/schemas/putTodoV1", + }, + }, + }, + }, + responses: { + 200: { + description: "Success", + content: { + "application/json": { + schema: { + $ref: "#/components/schemas/putTodoV1", + }, + }, + }, + }, + }, + }, + }, + "/{id}": { + delete: { + parameters: [ + { + name: "id", + in: "path", + required: true, + schema: { + type: "string", + }, + }, + ], + responses: { + 200: { + description: "Success", + content: { + "application/json": { + schema: { + $ref: "#/components/schemas/deleteTodoV1", + }, + }, + }, + }, + }, + }, + }, + }, + components: { + schemas: { + getTodosV1: getTodosV1.definitions as OpenAPIV3_1.SchemaObject, + + deleteTodoV1: deleteTodoV1.definitions as OpenAPIV3_1.SchemaObject, + + postTodoV1: postTodoV1.definitions as OpenAPIV3_1.SchemaObject, + + putTodoV1: putTodoV1.definitions as OpenAPIV3_1.SchemaObject, + }, + }, +}; + +const filePath = path.join(__dirname, "openapi.json"); +fs.writeFileSync(filePath, JSON.stringify(openapi, null, 2)); +``` + +``` +├── api-specs + ├── generate-json-schemas.ts + └── v1 + ├── openapi.ts + ├── deleteTodo.ts + ├── getTodos.ts + ├── postTodo.ts + ├── putTodo.ts +``` + +#### Create a script to execute the `openapi.ts` file and generate the OpenAPI spec. + +Now that we have the `openapi.ts` file, We need a script to find the `openapi.ts` file(s) and execute them, in order to generate the OpenAPI spec. There may be multiple version folders, and the script will accommodate that. + +```ts +// ./infrastructure/api-specs/generate-openapi-docs.ts + +import fs from "fs"; +import path from "path"; + +// Function to recursively find all openapi.ts files +function findOpenApiFiles(dir: string, fileList: string[] = []): string[] { + fs.readdirSync(dir, { withFileTypes: true }).forEach((dirent) => { + const filePath = path.join(dir, dirent.name); + if (dirent.isDirectory()) { + fileList = findOpenApiFiles(filePath, fileList); + } else if (dirent.isFile() && dirent.name === "openapi.ts") { + fileList.push(filePath); + } + }); + return fileList; +} + +// Find all openapi.ts files in src/api-specs +const openApiFiles = findOpenApiFiles(__dirname); +console.log(openApiFiles); + +// Import and execute each openapi.ts file to generate openapi.json +openApiFiles.forEach((file) => { + import(path.resolve(file)) + .then(() => console.log(`Generated OpenAPI document for ${file}`)) + .catch((err) => + console.error(`Error generating OpenAPI document for ${file}:`, err) + ); +}); +``` + +We are including a bonus script here to reset/delete all the generated json, for demo usage. + +```ts +// ./infrastructure/api-specs/delete-json-files.ts + +import fs from "fs"; +import path from "path"; + +// Function to recursively delete all .json files +function deleteJsonFiles(dir: string): void { + fs.readdirSync(dir, { withFileTypes: true }).forEach((dirent) => { + const fullPath = path.join(dir, dirent.name); + if (dirent.isDirectory()) { + // Recursively delete .json files in subdirectories + deleteJsonFiles(fullPath); + } else if (dirent.isFile() && dirent.name.endsWith(".json")) { + // Delete the file if it's a .json file + fs.unlinkSync(fullPath); + console.log(`Deleted file: ${fullPath}`); + } + }); +} + +const apiSpecsDir = path.join(__dirname); +deleteJsonFiles(apiSpecsDir); +``` + +```bash +├── api-specs + ### these script files can be a part of a package + ├── generate-json-schemas.ts + ├── generate-openapi-docs.ts + ├── delete-json-files.ts + └── v1 + ├── openapi.ts + ├── deleteTodo.ts + ├── getTodos.ts + ├── postTodo.ts + ├── putTodo.ts +``` + +> The OpenAPI file generated is of json type, for the example purposes and not to intermix it with the http-capture approach we have referenced before which uses the yml file type. + +### 5. Perform schema governance with [Optic](https://www.useoptic.com/) + +Optic helps in detecting schema changes in our OpenAPI specification (`openapi.json or yml`). It categorizes these changes as either breaking or non-breaking; which is key for us to identify them. This is something we do not get with our own testing; we would just update the types and/or the test and would not really know if they would break future service integrations unless we are consistently very careful and knowledgeable. + +We can use Optic locally, but CI is where it shines, making it obvious for us to detect such changes, or breakages in a schema, not only have analytics and history (Optic Cloud) but also have a neat representation of our OpenAPI spec in the form of online documentation. + +Now that we have the OpenAPI spec, we can create a few utility scripts. You can reference the repo or the PR for the script detail, and modify them to your needs in the real world. We have differentiated the -json vs -yml scripts for reasons previously mentioned with the two approaches. + +```bash +yarn update:api-docs # generates JSON schemas and OpenAPI docs +yarn optic:lint-json # lints our OpenAPI spec for validity +yarn optic:diff-json # detects breaking schema changes vs main with Optic +``` + +```json +"reset:schemas": "npx ts-node ./api-specs/delete-json-files.ts", +"build:schemas": "npx ts-node ./api-specs/generate-json-schemas.ts", +"build:open-api": "npx ts-node ./api-specs/generate-openapi-docs.ts", +"update:api-docs": "yarn reset:schemas && yarn build:schemas && yarn build:open-api" +"optic:diff-json": "optic diff ./api-specs/v1/openapi.json --base main --check'", +"optic:lint-json": "optic lint ./api-specs/v1/openapi.json", +``` + +Note that in this repository we are already running Optic Cloud with the http-capture approach, in CI. We cannot have 2 OpenAPI specifications; one for TS approach and one for http-capture approach, and perform schema governance. However, we will propose a simple CI config, for the TS approach. Mind that in the repo you will find the http-capture approach working in the CI. + +```yml +name: Optic-cloud-features +on: + pull_request: + types: [opened, reopened, edited, synchronize] + +concurrency: + group: ${{ github.ref }} && ${{ github.workflow }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +jobs: + optic-cloud: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + pull-requests: write + steps: + - name: Checkout + uses: actions/checkout@v4.1.1 + + - name: Use node + uses: actions/setup-node@v3.8.2 + with: + node-version-file: .nvmrc + cache: yarn + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Update api docs + run: yarn update:api-docs + + # include a preview and changelog in each PR comment + # sync every OpenAPI spec to Optic Cloud, diff & lint the schema as well + - name: Run Optic + uses: opticdev/action@v1 + with: + # Your Optic Cloud Token + optic_token: ${{ secrets.OPTIC_TOKEN }} + # A GitHub token with access to create comments on pull requests + github_token: ${{ secrets.GITHUB_TOKEN }} + # If true, standard check failures will cause this action to fail. + # If false, standard check failures will show in PR comments and + # in Optic Cloud but will not cause the action to fail + standards_fail: true + additional_args: --match **/**/openapi.json + compare_from_pr: main + compare_from_push: main + env: + CI: true +``` + +### Comparison of the TS approach vs http-capture approach + +#### TypeScript-based Approach with Optic: Use Case Scenario: + +1. **Identifying Type Changes**: A breaking change is introduced to our TypeScript types, which could potentially alter the structure or behavior of our API. +2. **Auto-Generating OpenAPI Documentation**: Following these type changes, our OpenAPI documentation is automatically regenerated using the `update:api-docs` command. This process ensures that the OpenAPI specification reflects the latest state of our TypeScript types. +3. **Detecting Breaking Changes with Optic**: The command `optic:diff-json` is used to detect any breaking changes in the updated OpenAPI specification. This step is crucial for identifying discrepancies that could affect API consumers. +4. **Decision Making and Communication**: + - **Rollback or Update**: Based on the nature of the detected changes, we decide whether to roll back the changes to our types or proceed with updating the API documentation to reflect these changes. + - **Consumer Notification and Version Update**: For significant changes, especially those that are breaking, we notify service consumers of the potential impact. Additionally, we update the version of our OpenAPI specification. This step is essential to communicate the change effectively and ensure that Optic's checks recognize the update as a deliberate and managed change. + +This approach emphasizes the role of TypeScript types in driving the API documentation process. By automatically generating the OpenAPI specification from TypeScript types, we ensure that our documentation is always in sync with our codebase. Moreover, it highlights the use of Optic as a tool for governing API schema changes, ensuring that updates are tracked, managed, and communicated effectively. + +#### HTTP-based Approach with Optic: Use Case Scenario + +Optic can also validate the accuracy of the OpenAPI spec by capturing traffic from E2E tests and comparing it against the OpenAPI spec (`openapi.json` or yml file). + +```bash +# in ./infrastructure folder +yarn optic:verify # Captures E2E test traffic, detects breaking schema changes +``` + +Similar to Optic validate, Optic update allows for interactive updates to the OpenAPI spec. It's similar to `optic:verify` but includes prompts for additional observed changes during E2E test capture. + +```bash +# in ./infrastructure folder +yarn optic:update +``` + +We generally would run `optic:verify` in CI to vet our http tests against our OpenAPI specification. We would run `optic:update` when we know there are changes in our code, not necessarily types, but could be anything. We would theoretically update the code, perhaps update the http tests, run `optic:verify` and record a new OpenAPI spec. This usage is very similar to Jest snapshot testing, where the snapshot is the OpenAPI spec. + +![diagram](https://www.useoptic.com/img/proxy-diagram.png) + +Use case scenario: + +1. **Identifying Potential Breaking Changes**: Any black-box breaking change is made in our service code. This change isn't necessarily related to type definitions but could impact the behavior of the API. + +2. **Verification with Optic**: We use `optic:verify` to identify these changes. + - **Real E2E Tests**: This involves executing real end-to-end tests against a local server or a deployment with our code changes. + - **Comparison Against OpenAPI Spec**: During this process, Optic verifies the traffic captured during these tests against our existing OpenAPI specification. + +3. **Updating OpenAPI Documentation**: If the actual behavior (captured traffic) does not match the current OpenAPI documentation, we are suggested by Optic to update the OpenAPI docs, so we utilize `optic:update` to bring the documentation in line with reality. This ensures that `optic:verify` can now successfully pass, providing us with an accurate coverage report of our OpenAPI documentation. + +4. **Detecting Breaking Changes**: The `optic:diff` command is used to identify any breaking changes compared to the main/master branch. This step is crucial for understanding the impact of recent changes on the overall API. + +5. **Making Informed Decisions**: Depending on the nature of the detected changes, we make a decision: + - **Discarding or Updating**: We either discard the recent changes or update our API documentation using `optic:update`. + - **Communication and Versioning**: For significant or breaking changes, we communicate these changes to our service consumers. Additionally, we update the version of our OpenAPI specification to reflect these changes, ensuring that Optic's checks pass. + +This approach effectively leverages Optic's capabilities to manage and maintain accurate and up-to-date API documentation, especially in the context of continuous integration and delivery. It highlights the importance of aligning actual service behavior with documented API contracts, ensuring consistency and reliability for API consumers. + +### Conclusion + +In conclusion, this exploration into generating OpenAPI documentation from TypeScript types presents a viable alternative to the HTTP-capturing method. While both approaches have their merits, the TypeScript-based method offers a cost effective and type-safe strategy, ensuring that API documentation remains closely aligned with our codebase. It is particularly beneficial for teams seeking to automate their documentation process but keep things simple and fast, albeit it does require the one time investment, and updating of the `opeanapi.ts` file when there are new api endpoints. + +On the other hand, using real http tests to qualify our OpenAPI documentation gives us the ability to not only verify but also modify our spec. Reality is often different to the wishful perception of it, and the HTTP-capturing method is immune to that. The coverage report of our http tests versus our OpenAPI docs is a killer feature as well, giving us the proof of coverage that matters; "_Are our tests covering what we publish that we feature?_". + +> This is a much better alternative to source code coverage, akin to ui-interaction-coverage in Cypress Cloud (for UI apps). + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/o5ftgzb41ccdqqknvivw.png) + +In either approach we get the schema governing features of Optic. We get optic-diff locally or in CI, terminal output for the free version, and human readable web version with PR comments using Optic Cloud. + +As a reminder, here are the Optic Cloud benefits in brief + +- optic diff-all (runs multiple specs at once instead of having to pick each OpenAPI spec) +- PR comments +- [Catalogue of your API changes over time, analytics ](https://app.useoptic.com/organizations/61d22cd6-d47c-478f-885d-677f8a89449f/apis) +- centralized styles guides, with AI +- support + +Whether we use HTTP-capturing or TypeScript-based approach, we still get the same schema governance with Optic. + +![alt](https://www.useoptic.com/changelog2.jpg) + +![alt](https://www.useoptic.com/_next/image?url=%2F_next%2Fstatic%2Fmedia%2Fcompared-to-github.77af37a6.png&w=3840&q=75) + +Looking ahead, further enhancements could include exploring ways to integrate these methods into more complex, multi-service architectures, starting with creating an internal package for scripts that will be repeated in each service repo. + +Your feedback and experiences with these approaches are invaluable—feel free to share your thoughts and insights. + +### Addendum: Using the Types at the Lambdas as the Source of Truth (Recommended) + +Previously, we created type definitions in the openapi-spec folder and then used them in the lambda. This approach ensured that the same type was being used in both the lambda and the OpenAPI spec, thereby creating an alignment. + +``` +types in getTodos -> lambda && OpenAPI spec +``` + +However, a better approach might be to define a type in the lambda and then use it directly in the openAPI spec, with the getTodos type file serving as an intermediary. + +``` +lambda -> types in getTodos -> OpenAPI spec +``` + +In this manner, the lambda becomes the source of truth. + +```ts +// ./infrastructure/Lambda/delete/index.ts + +// (1) define & export a type for the response body, +export type DeleteResponseBody = { + message: string; +}; + +export const handler = async (event: DeleteEvent) => { + // ... + + // (2) and use it in the Lambda Handler + const response: DeleteResponseBody = { + message: 'Todo deleted successfully.', + }; + + return httpResponse(200, JSON.stringify(response)); +}; +``` + +Reuse the exported `DeleteResponseBody` in API docs. + +```ts +// ./infrastructure/api-specs/v1/getTodos.ts + +// (3) re-use the exported type in api docs +import type {DeleteResponseBody} from '../../Lambda/delete/lambda/index'; + +export type ResponseBody = DeleteResponseBody; +``` + +For the POST endpoint: + +```ts +// ./infrastructure/Lambda/post/index.ts + +export type PostBody = { + todo: Todo; +}; + +export const handler = async (event: PostEvent) => { + // ... + + const response: PostBody = { todo }; + + return httpResponse(200, JSON.stringify(response)); +}; +``` + +```ts +// ./infrastructure/api-specs/v1/postTodo.ts + +import type {PostBody} from '../../Lambda/post/lambda/index'; + +export type ResponseBody = PostBody; +export type RequestBody = PostBody; +``` + +For the PUT endpoint: + +```ts +// ./infrastructure/Lambda/put/index.ts + +export type PutBody = { + todo: Todo; +}; + +export const handler = async (event: PutEvent) => { + // ... + + const response: PutBody = { todo: updatedTodo }; + + return httpResponse(200, JSON.stringify(response)); +}; +``` + +```ts +// ./infrastructure/api-specs/v1/putTodo.ts + +import type {PutBody} from '../../Lambda/put/lambda/index'; + +export type ResponseBody = PutBody; +export type RequestBody = PutBody; +``` + +The GET endpoint is particularly noteworthy as it differentiates this approach from the previous one. In the past, we struggled to make `Todo[]` compliant with the more generic DynamoDB expectation. Now, we can create a type that works for both our lambda and generates more precise API documentation. + +```ts +// ./infrastructure/Lambda/get/index.ts + +export type GetResponseBody = { + todos: Partial[] | undefined; +}; + +export const handler = async () => { + const {Items} = await dynamoDB.scan({TableName: tableName}).promise(); + + const response: GetResponseBody = {todos: Items}; + + return httpResponse(200, JSON.stringify(response)); +}; +``` + +```ts +// ./infrastructure/api-specs/v1/getTodos.ts + +import type {GetResponseBody} from '../../Lambda/get/lambda/index'; + +export type ResponseBody = GetResponseBody; +``` + +Here is the [PR](https://github.com/muratkeremozcan/aws-cdk-in-practice/pull/19) that captures the above changes. + + +================================================ +FILE: blog/Building Custom Request Filters for PactJs Verifications in Express and Non-Express Environments.mkd +================================================ +# Building Custom Request Filters for PactJs Verifications in Express and Non-Express Environments + +When working with PactJs contract testing, [request filters](https://docs.pact.io/implementation_guides/javascript/docs/provider#verification-options) are essential for modifying HTTP requests during the verification process between a consumer and a provider. Request filters allow you to add headers, modify request bodies, or handle authentication tokens before requests are sent to the provider. However, implementing these filters can be challenging when working across non-Express setups. + +> Pact docs recommend to only use `requestFilter` feature for things that cannot be persisted in the pact file. Auth tokens are a common use case. + +This blog post will walk through how to create a custom request filter that adds an Authorization header, which works both in Express environments (where middleware functions handle requests) and non-Express environments such as lambdas. Additionally, we'll explore why the solution is designed as a higher-order function and how it accommodates Pact's express-like type requirements. + +Here is a link to the [PR](https://github.com/muratkeremozcan/pact-js-example-provider/pull/97) with the specific changes & the [source code](https://github.com/muratkeremozcan/pact-js-example-provider). + +#### The Problem: + +In certain contract testing scenarios with Pact, you may need a mechanism to modify HTTP requests, such as injecting an Authorization token into headers before the requests are sent to the provider for verification. When testing in non-Express environments, the issue lies with Pact types requiring an Express-like shape for request handling. + +Express middleware typically requires three arguments: `req`, `res`, and `next`. In non-Express environments, only the request object might be available, and the absence of the `next` function (used to pass control to the next middleware) can break the flow of the request-handling logic. Thus, the solution must accommodate both cases while ensuring the logic remains flexible, especially for custom token generation. + +#### The Solution: A Higher-Order Function + +To solve this issue, the request filter is implemented as a **higher-order function**. This allows flexibility in how token generation logic is injected into the request handler. Additionally, it conforms to the Pact verifier's expectations of handling the `req`, `res`, and `next` arguments. This way, the filter can work seamlessly with both Express and non-Express environments. + +Let's break down the code: + +```typescript +// generic HttpRequest structure to accommodate both Express and non-Express environments +type HttpRequest = { + headers: Record + body?: unknown +} + +type NextFunction = () => void | undefined + +// allows customization of token generation logic +type RequestFilterOptions = { + tokenGenerator?: () => string +} +``` + +Here, `HttpRequest` is a generic structure that can represent both Express requests and non-Express requests. The `RequestFilterOptions` allows for customizable token generation by providing an optional `tokenGenerator` function. + +```typescript +const handleExpressEnv = ( + req: HttpRequest, + next: NextFunction +): HttpRequest | undefined => { + // If this is an Express environment, call next() + if (next && typeof next === 'function') { + next() + } else { + // In a non-Express environment, return the modified request + return req + } +} +``` + +The `handleExpressEnv` function is the key to managing both Express and non-Express environments. It checks if `next` exists and, if so, it assumes the environment is Express and passes control to the next middleware. Otherwise, it simply returns the modified request for non-Express environments. The `else` clause can be modified to suite your needs. + +```typescript +const createRequestFilter = + (options?: RequestFilterOptions): ProxyOptions['requestFilter'] => + (req, _, next) => { + const defaultTokenGenerator = () => new Date().toISOString() + const tokenGenerator = options?.tokenGenerator || defaultTokenGenerator + + // add an authorization header if not present + if (!req.headers['Authorization']) { + req.headers['Authorization'] = `Bearer ${tokenGenerator()}` + } + + return handleExpressEnv(req, next) + } +``` + +The `createRequestFilter` is a higher-order function because it returns a function that will be used to filter requests. It allows for the optional injection of a custom token generator. Inside the function, if the `Authorization` header is missing, a token is generated and added to the request headers. After modifying the headers, it hands off the request to `handleExpressEnv` for environment-appropriate handling. + +```typescript +// if you have a token generator, pass it as an option +// createRequestFilter({ tokenGenerator: myCustomTokenGenerator }) +export const requestFilter = createRequestFilter() + +export const noOpRequestFilter: ProxyOptions['requestFilter'] = ( + req, + _, + next +) => handleExpressEnv(req, next) +``` + +Here, we define two exports: + +- `requestFilter` is the default filter that adds the `Authorization` header. +- `noOpRequestFilter` is a no-operation filter that doesn’t modify the request but still handles the environment appropriately. + +These two exports are used to build verifier options for Pact tests. The `noOpRequestFilter` can be used as a default value for the [`buildVerifierOptions` function](https://github.com/muratkeremozcan/pact-js-example-provider/blob/main/src/test-helpers/pact-utils.ts#L70). `requestFilter` can be used [directly in our tests](https://github.com/muratkeremozcan/pact-js-example-provider/blob/main/src/provider-contract.pacttest.ts#L25) to modify the http request + +#### Key Takeaways: + +- **Higher-Order Functions**: By using a higher-order function, we provide flexibility for future customization, like custom token generation. This design pattern is crucial in ensuring reusable and customizable logic. +- **Environment Agnosticism**: The combination of `handleExpressEnv` and higher-order functions allows the filter to work in both Express and non-Express environments. This makes the code more robust and versatile across different contexts. +- **Pact's express-like type requirements**: The filter satisfies Pact's need to handle three arguments (`req`, `res`, and `next`), even if the environment doesn't use Express, ensuring compatibility during the contract testing process. + +#### Conclusion: + +In summary, this custom request filter solves the challenge of modifying HTTP requests in both Express and non-Express environments while allowing for future customization. Using a higher-order function ensures that we can inject different token generation strategies, providing flexibility and maintaining compatibility with Pact’s express-like type requirements. + + +================================================ +FILE: blog/Building the test architecture, increasing adoption, improving the developer experience.mkd +================================================ +# The 32+ ways of selective testing with Cypress: a unified, concise approach to selective testing in CI and local machines + +- [Building the test architecture, increasing adoption, improving the developer experience](#building-the-test-architecture-increasing-adoption-improving-the-developer-experience) +- [The many ways of selecting tests in Cypress](#the-many-ways-of-selecting-tests-in-cypress) + - [Built-in ways of selective testing](#built-in-ways-of-selective-testing) + - [Selective tests with plugins](#selective-tests-with-plugins) +- [Combined ways of selecting tests](#combined-ways-of-selecting-tests) + - [Notice that there are a few test negators](#notice-that-there-are-a-few-test-negators) + - [Notice that only certain methods can work in combination](#notice-that-only-certain-methods-can-work-in-combination) + - [An example for a very specific case with 5 combinations](#an-example-for-a-very-specific-case-with-5-combinations) +- [How can it work in the CI as it works locally with Cypress runner?](#how-can-it-work-in-the-ci-as-it-works-locally-with-cypress-runner) + - [Handle the environments in config files, and define a custom environment variable](#handle-the-environments-in-config-files-and-define-a-custom-environment-variable) + - [Abstract away the logic in the test](#abstract-away-the-logic-in-the-test) + - [Use it with the GitHub Action](#use-it-with-the-github-action) + - [Testing against localhost in CI](#testing-against-localhost-in-ci) + - [Testing against deployments in CI](#testing-against-deployments-in-ci) + +## Building the test architecture, increasing adoption, improving the developer experience + +A common challenge faced while building the test architecture is deciding which e2e tests to execute or skip per deployment, and maybe when to add secondary combinations of browser and viewports. Once these are identified, the goal is to increase adoption and improve the developer experience when executing e2e tests locally and in CI. + +In the context of Cypress, let's explore built-in ways of selecting tests, plugins that expand the possibilities, the GitHub action that provides CI conveniences, and how all of these can combine harmoniously for a similar developer experience between local machines and CI. + +## The many ways of selecting tests in Cypress + +### Built-in ways of selective testing + +There are a few options for selective testing that comes built-in with Cypress. + +- Using [config files](https://docs.cypress.io/guides/references/configuration#Folders-Files) - My personal favorite way of doing it because we have one file per deployment, and most if not all the configuration can be done here. + + For example, when running on dev deployment you want to ignore test files under the prod folder. At `cypress/config/dev.js`: + + ```js + import { defineConfig } from "cypress"; + + export default defineConfig({ + projectId: "123abc", + defaultCommandTimeout: 10000, + retries: { + runMode: 2, + openMode: 0, + }, + e2e: { + setupNodeEvents(on, config) {}, + baseUrl: "https://www.deployed-dev.com", + excludeSpecPattern: "**/prod/*", + }, + }); + ``` + + The `testFiles` property would work the opposite way, only running specific tests for that configuration. + + > Assume We are passing in `--config-file` when running or opening Cypress. + > `"cypress:open-dev": "cypress open --config-file cypress/config/dev.js"` + +- Using [command line](https://docs.cypress.io/guides/references/configuration#Command-Line) : + + > takes precedence over the config file + + `cypress run --excludeSpecPattern="**/prod/*"` - would overwrite the config file. + `cypress run --browser firefox` - would add to the config file. You can use this when you want the config file to apply to all common deployments -ex: dev and stage deployments- but you want to control the browser choice in CI. + +- Using CLI [environment variables](https://docs.cypress.io/guides/references/configuration#Environment-Variables) + + > takes precedence over the config file + + `export CYPRESS_VIEWPORT_WIDTH=800 cypress run` - very similar to command line style. + +- Using [configuration API](https://docs.cypress.io/api/plugins/configuration-api#Usage) - advanced, can be overkill. We have not needed to use it at work, yet. + +- Within the test, [Cypress.config()](https://docs.cypress.io/guides/references/configuration#Cypress-config) - useful if you need one off specs or individual tests to be an exception. + + > takes precedence over other ways of configuration + + `Cypress.config(viewportWidth: 1280, viewportHeight: 720)` + +- Within the test, using the Configuration Object - similar usage as `Cypress.config()` , and very practical. + + > takes precedence over other ways of configuration + + ```javascript + describe('login', { viewportWidth: 1280, viewportHeight: 720}, () => { + it('should login', () => {..} + ``` + +### Selective tests with plugins + +These are some personal favorites, for the control and specificity they provide and how they can combine with built-in ways of selecting tests. + +- [cypress-grep](https://github.com/cypress-io/cypress-grep), - for example for a certain deployment, you want to only run tests with a certain string in the title or only run the tests that have a tag. + + Assume you have a few tests, and one of them is this: + + ```javascript + it('auth user login', { tags: 'smoke' }, () => { + ... + }) + ``` + + ```bash + # run only the tests with "auth user" in the title + $ npx cypress run --env grep="auth user" + # run tests with "login" or "auth user" in their titles + # by separating them with ";" character + $ npx cypress run --env grep="login; auth user" + # run only the tests tagged "smoke" + $ npx cypress run --env grepTags=@smoke + # but also those that have "auth" in their titles + $ npx cypress run --env grep=auth,grepTags=smoke + ``` + +- [cypress-skip-test](https://github.com/cypress-io/cypress-skip-test) - this is a special one for ability to negate tests and have combinations in itself + + ```javascript + it("combination of skip and only", () => { + cy.skipOn("firefox"); + cy.onlyOn("electron").onlyOn("mac"); + cy.log("running test"); + }); + ``` + +## Combined ways of selecting tests + +### Notice that there are a few test negators + +You have two options to skip tests. If the rest of the test selection methods are synonymous to `array.filter`, these would be synonymous to `array.unfilter`/`array.skip` + +1. `excludeSpecPattern` property in a config file, can skip folders or tests + +2. `cy.skipOn()` / `skipOn()` can skip test blocks -`describe`, `context`- or individual tests -`it`. + +### Notice that only certain methods can work in combination + +The built-in group methods take precedence over each other, or add-on to the configuration where they do not overlap. For example, we can use the config-file for most of the deployment configurations, and add on which browser to run the tests via command line. + +The plugin group methods can work with the built-in methods, expanding our choices. + +Not including the [configuration API](https://docs.cypress.io/api/plugins/configuration-api#Usage), there are 5 practical ways of combining configurations: config file, CLI, within the test with configuration object, cypress-grep, cypress-skip-test. This is 2^5, 32 combinations at least! Thanks Cypress! + +### An example for a very specific case with 5 combinations + +Imagine you have a very specific need for a test execution: + +- needs to run against dev deployment +- needs to run with Firefox +- needs to only run the smoke tests +- needs to run in a certain viewport for a spec/top describe block _(usually viewport can be better as a CLI param, but assume we only need it for one spec so we can show a use case here for the configuration object)_ +- needs to skip on mac, and still execute on other OS' + +How would we tackle this? + +In CLI: +`cypress run --config-file cypress/config/dev.js --browser firefox --env grepTags=@smoke` + +In the test: + +```javascript +describe('login', { viewportHeight: 600, viewportWidth: 1000,}, () => { + +it('auth user login', { tags: 'smoke' }, () => { + cy.skipOn('mac') + // the rest of the test +}) +``` + +As you can see we have so much control over how we execute our tests, and even five combinations is overkill for most use cases. Usually we will be ok with 3 combinations of selective test configuration. + +## How can it work in the CI as it works locally with Cypress runner? + +Assume we have many applications and services we are using Cypress with. We will have many yml files, a templating functionality (for example reusable workflows in GitHub Actions). + +Wouldn't it be nice if our engineers could have the same experience with their specific CI configurations as they have locally on their laptops with Cypress runner? In the Cypress runner, the engineer picks a browser and just executes the test(s). How can we abstract away CI configuration complexities to minimal sources of truth so that we can have a unified, concise approach to do selective testing? + +> The ideas are borrowed from an [older pattern of doing selective testing](https://cypress.slides.com/cypress-io/siemens-case-study#/3/1/1) and [cypress-skip-test plugin](https://github.com/cypress-io/cypress-skip-test#environment). + +### Handle the environments in config files, and define a custom environment variable + +We are able to set custom env vars per a cypress config file, abstract away the logic, and have a declarative syntax to manipulate selective testing. + +From [Cypress docs](https://docs.cypress.io/guides/guides/environment-variables#Option-1-configuration-file), we know that _any key/value you set in your [configuration file](https://docs.cypress.io/guides/references/configuration) under the `env` key will become an environment variable._ + +At `cypress/config/dev.js`, we can have a custom variable ENVIRONMENT, and match the value with the name of the config file `dev`: + +```js +import { defineConfig } from "cypress"; + +export default defineConfig({ + projectId: "123abc", + defaultCommandTimeout: 10000, + retries: { + runMode: 2, + openMode: 0, + }, + e2e: { + setupNodeEvents(on, config) {}, + baseUrl: "https://www.deployed-dev.com", + excludeSpecPattern: "**/prod/*", + env: { + ENVIRONMENT: "dev", + }, + }, +}); +``` + +### Abstract away the logic in the test + +After this, cypress-skip-test has access to the custom variable. Instead of something long and imperative such as: + +```javascript +cy.skipOn(Cypress.config("baseUrl") === "https://www.deployed-dev.com"); +``` + +We can use: + +```javascript +cy.skipOn("dev"); +``` + +> note that [cypress-grep](https://www.deployed-dev.com) can also use the "env" property + +### Use it with the GitHub Action + +We know three facts, and we want a way to combine them. + +1. From [Cypress docs](https://docs.cypress.io/guides/guides/environment-variables#Option-1-configuration-file), we know that we can set any custom environment variable in the [configuration file](https://docs.cypress.io/guides/references/configuration) under the `env` key. + +2. Cypress maintains a [very neat GitHub Action](https://github.com/cypress-io/github-action) that makes CI usage convenient with custom parameters. + +3. With GitHub actions we can [set a custom environment variable](https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#setting-an-environment-variable) with `echo "{name}={value}" >> $GITHUB_ENV` + +If these ideas can combine somehow, and the CI could figure out what deployment it is executing the test for, we can have the same experience for the engineers using the CI as they would when they use the Cypress runner locally on their laptops. + +#### Testing against localhost in CI + +Let us do the simpler case first, where CI does not have to figure out what deployment it is running against yet. For testing against localhost in CI, we can just combine setting a custom variable in the Cypress config file (fact 1) with the custom properties from the Cypress GitHub Action (fact 2). After all it is a good practice to have separate ymls for PR testing and deployment testing. + +Assume we have a config file for local testing `cypress/configs/local.js`: + +```js +import { defineConfig } from 'cypress' + +export default defineConfig({ + projectId: "123abc", + defaultCommandTimeout: 10000, + retries: { + runMode: 2, + openMode: 0 + }, + e2e: { + setupNodeEvents(on, config) {}, + baseUrl: "http://localhost:3000", + excludeSpecPattern: "**/prod/*" + env: { + ENVIRONMENT: "local" + } + } +}) +``` + +With this config file setup we tell Cypress which config it should use and which environment it should run against (1). + +In the below GitHub workflow configuration file, let's say `local-e2e.yml`, we specify which config file to use with the Cypress GitHub Action's `config-file` property (2). + +Just like how we use Cypress runner as a local user, we pick which browser to use with the `browser` property of the action. + +At this point, once Cypress launches in the CI step, it knows there is a property `"ENVIRONMENT"` with the value of `"local"`. We have linked facts (1) and (2); cypress-config file can easily work with the Cypress GitHub Action. + +This way we can have the same experience in the CI as we would on our laptop with Cypress runner, and use something like `cy.skipOn('dev')` in the tests without worrying about any additional CI configuration. + +```yaml +# tests against the app being served locally while running in CI +name: e2e local +on: + pull_request: + types: [opened, reopened, edited, synchronize] + +# if this branch is pushed back to back, cancel the older branch's workflow +concurrency: + group: ${{ github.ref }} && ${{ github.workflow }} + cancel-in-progress: true + +jobs: + test: + strategy: + # uses 2 CI machines to run tests in parallel + matrix: + machines: [1, 2] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Cypress tests 🧪 on local + uses: cypress-io/github-action@v4 + with: + # the action gives us the ability to pick the browser + browser: chrome + # for localhost testing, we need a command that serves the application + start: yarn start-my-app + # for localhost testing, the action waits the app to be ready, on any port + wait-on: "http://localhost:3000" + # KEY: we can specify the config file in the GHA and establish the linkage + config-file: cypress/config/local.js + record: true # records on cypress dashboard + parallel: true # parallelizes tests + group: "local ui e2e" # for nice labelling on the dashboard + # adds a convenience filter when using Cypress Dashboard; + # i.e. we can filter test executions by environment name + tag: local + env: + CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }} +``` + +#### Testing against deployments in CI + +We need a way for CI to figure out which deployment we are executing tests against (3), and use that to drive the Cypress GitHub Action (2) which is already working with the Cypress config file (1). + +Assume we are using two config files `cypress/config/dev.js` and `cypress/config/stage.js`: + +```js +import { defineConfig } from 'cypress' + +export default defineConfig({ + projectId: "123abc", + defaultCommandTimeout: 10000, + retries: { + runMode: 2, + openMode: 0 + }, + e2e: { + setupNodeEvents(on, config) {}, + baseUrl: "https://www.deployed-dev.com", + excludeSpecPattern: "**/prod/*" + env: { + ENVIRONMENT: "dev" + } + } +}) +``` + +```js +import { defineConfig } from 'cypress' + +export default defineConfig({ + projectId: "123abc", + defaultCommandTimeout: 10000, + retries: { + runMode: 2, + openMode: 0 + }), + e2e: { + setupNodeEvents(on, config) {}, + baseUrl: "https://www.deployed-stage.com", + excludeSpecPattern: "**/prod/*" + env: { + ENVIRONMENT: "stage" + } + } +}) +``` + +In the below GitHub workflow configuration file, let's say `deployment-e2e.yml`, we specify which config file to use with the GitHub Action's `config-file` property. Exactly how we did for `local-e2e.yml`, facts (1) and (2) are now linked. + +To derive fact (3) we add some bash logic which figures out what environment the CI is in, the details of how is in the yml sample - credit goes to [Christopher Lawrence](https://www.linkedin.com/in/chrstphrjlawrence/) for figuring this out. All we need is for this logic to provide us with a deployment name such as `dev` or `stage`, then we can use that value in the GitHub Action. + +Just like how we use Cypress runner as a local user, we pick which browser to use with the `browser` property of the action. + +At this point, when starting the GitHub Action, all 3 facts compose together: + +- Cypress launches in the CI step driven by the custom environment value -`dev` or `stage`- we achieved from the bash logic - _fact (3)_, +- The config file is selected per this value via the config-file property of the GitHub Action - _fact (2)_, +- The config file has a matching value in the ENVIRONMENT property which in turn drives the tests - _fact (1)_. + +Again, we use the same `cy.skipOn('dev')` in the test, not have to change a thing in the spec file, not worry about any additional CI configuration. + +```yaml +# assume trunk based deployment +# we are pushing to master, deploying master, running tests against dev deployment +# we also want to push tags, deploy, run tests against stage deployment +# additionally we want to execute e2e against any deployment on demand with dispatch +name: e2e deployment +on: + push: + branches: ['master'] + tags: [ 'v*.*.*' ] + # the workflow dispatch takes the value of the environment, + # which matches cypress/config/.json : dev or stage + workflow_dispatch: + inputs: + environment: + description: 'Env to run tests in' + required: true + +# if this branch is pushed back to back, cancel the older branch's workflow +concurrency: + group: ${{ github.ref }} && ${{ github.workflow }} + cancel-in-progress: true + +jobs: + test: + strategy: + # uses 2 CI machines to run tests in parallel + matrix: + machines: [1, 2] + runs-on: ubuntu-latest + env: + # (3): we add custom variables to the environment + # we will use 2 variables to showcase a complex logic in bash + # with a switch statement and if else + EVENT_NAME: ${{ github.event_name }} # helps identify if this is a push or dispatch + REF: ${{ github.ref }} # helps identify branch name, github ref is just your branch name + steps: + - uses: actions/checkout@v3 + + # (fact 3): our custom step which figures out what environment the CI is in + # if this is a workflow dispatch we set a custom variable ENVIRONMENT + # with the value of what is entered in GitHub UI: dev, stage, or wrong text + # if this is push event, we identify from the branch name + # whether it is master branch, or this is a tag + # if master, we set the custom variable ENVIRONMENT as dev, otherwise as stage + - name: Set environment variable + run: | + case $EVENT_NAME in + workflow_dispatch) + ENVIRONMENT=${{ github.event.inputs.environment }} + ;; + push) + if [[ $REF == *"master" ]] + then + ENVIRONMENT=dev + else + ENVIRONMENT=stage + fi + ;; + esac + + echo "ENVIRONMENT=$ENVIRONMENT" >> $GITHUB_ENV + # from GitHub docs we know how to set a custom environment variable + # echo "{name}={value}" >> $GITHUB_ENV + # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#setting-an-environment-variable + + # (fact 2): we use the appropriate cypress config file + # to run tests against a deployment + # we can use the value ${{ env.ENVIRONMENT }} from the previous step + - name: Cypress tests 🧪 on ${{ env.ENVIRONMENT }} + uses: cypress-io/github-action@v2 + with: + browser: chrome + # (fact 1): the config file has a variable ENVIRONMENT + # with a value that matches the name of the config file + # we don't have to define anything additional in CI + config-file: cypress/config/${{ env.ENVIRONMENT }}.js + record: true # records on cypress dashboard + parallel: true # parallelizes tests + # for nice labelling on the dashboard + group: ${{ env.ENVIRONMENT } ui e2e + # adds a convenience filter when using Cypress Dashboard; + # i.e. we can filter test executions by environment name + tag: ${{ env.ENVIRONMENT }} + env: + CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }} +``` + +With this setup the engineers are able to use Cypress in the CI the same way they would use Cypress runner locally on their laptops: + +- They opened (or ran) the tests against the deployment they want: `cypress:open-local` | `cypress:open-dev` | `cypress:open-stage` + +- They selected the browser + +- They were able to work at test level, skip or execute not worrying about any other CI specific configuration parameters + +> We are working on a sample repo similar to [angular-playground](https://github.com/muratkeremozcan/angular-playground) which would help internal training, external information sharing and collaborating with Cypress on reproducible issues. This post will be updated when the repo is ready. + + +================================================ +FILE: blog/CI CD strategies for UI apps and deployed services.mkd +================================================ +# CI CD strategies for UI apps and deployed services + +In today's rapidly-evolving digital ecosystem, Continuous Integration and Continuous Deployment (CI/CD) strategies have become integral to the lifecycle of web applications and services. CI/CD offers a plethora of benefits that enable organizations to deliver products to market rapidly, streamline their operations, and drastically improve their overall software quality. CI/CD is the heartbeat of modern DevOps practices. It bridges the gap between development and operations teams, fostering a culture of seamless collaboration. This synergy is underpinned by a fail-fast mentality, where errors are detected and corrected promptly during the development process, rather than after deployment. + +Without CI/CD, software development becomes a cumbersome, time-consuming endeavor fraught with bottlenecks and pitfalls. This traditional, siloed approach to software development and deployment—known as the waterfall model—often leads to "integration hell", where merging changes from different team members becomes a nightmare, causing delays, and potentially leading to flawed final products. In a world that demands agility and reliability, the absence of CI/CD strategies can make your UI apps and deployed services lag in the competitive landscape. + +In this article, we'll explore two distinct examples of CI/CD implementations, each demonstrating a unique scenario. Our first example centers on a React application, while the second focuses on a service deployed on Amazon Web Services (AWS). For both cases, we will be utilizing GitHub Actions as our CI/CD platform and sharing the repos, giving us the advantage of exploring actual repositories to deepen our understanding. + +Remember, there is no one-size-fits-all solution when it comes to CI/CD. However, the overarching principles and strategies we'll discuss in these examples hold universal applicability. They can be adapted and modified to fit a variety of different situations and needs + +- [CI CD for a UI app](#ci-cd-for-a-ui-app) + - [Key Concepts](#key-concepts) + - [Testing Against Localhost on PRs, Testing Against Deployments](#testing-against-localhost-on-prs-testing-against-deployments) +- [CI CD for a deployed AWS service](#ci-cd-for-a-deployed-aws-service) + - [Testing against temporary branches on PRs](#testing-against-temporary-branches-on-prs) + - [Key Concepts:](#key-concepts-1) + - [Testing against deployments](#testing-against-deployments) +- [Wrap up](#wrap-up) + +## CI CD for a UI app + +We'll start by examining a [Tour of Heroes repo](https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts), featured in the book [CCTDD: Cypress Component Test Driven Design](https://muratkerem.gitbook.io/cctdd/). This repository demonstrates various test checks, including lint (ESLint), type checks (TS), unit tests (Jest), and Cypress component tests & end-to-end tests. The tests are parallelized to reduce feedback time to approximately five minutes, which is an optimal duration to promote a continuous feedback loop for this size of repo. + +Below is the CI architecture for this project: + +![CI/CD](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/h3uk05jjsevogdnpssq1.png) + +The app contains 22 Cypress component tests, 22 Jest/RTL unit/component tests, and 11 Cypress end-to-end tests. With [Cypress Cloud analytics](https://cloud.cypress.io/projects/x953wq/runs/831/specs), there is potential to further reduce this time. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/ccsbl1lqbrkgbsbq3eto.png) + +### Key Concepts + +- Dependencies are installed and cached, drastically reducing the setup time for subsequent commits. +- Unit tests, linting, and type checks are all parallelized, taking advantage of the cache. +- Component tests and end-to-end tests are parallelized as well, without waiting for unit tests, linting, and type checks to complete. +- Nine machines run in parallel, speeding up the CI pipeline. If we have limited CI machines, we could run the Cypress parallelization sequentially after the linting, type check, and unit test stage, but it would increase feedback time by about 25%. +- End-to-end tests execute against `localhost:`, which is served locally on the CI machine. This approach, often overlooked, enables a shift-left testing strategy that we'll discuss more later. + +This [YAML file](https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts/blob/main/.github/workflows/main.yml) details the CI implementation, including combined code coverage with [CodeCov](https://about.codecov.io/). For a simpler example without Cypress parallelization and code coverage, check the [Github Actions YAML file of this template](https://github.com/muratkeremozcan/react-cypress-ts-vite-template/blob/main/.github/workflows/main.yml). The ideas presented here can be applied to any front-end application. + +```yml +name: unit-lint-typecheck-e2e-ct +on: + push: + workflow_dispatch: + +# to cancel outdated runs when a new commit in the same branch comes in +concurrency: + group: ${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + install-dependencies: + name: Install Dependencies + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + # we use the Cypress GHA to cache dependencies, + # and we do not run the tests + - name: Install dependencies + uses: cypress-io/github-action@v5.8.0 + with: + runTests: false + + # we have all the jobs below need the dependencies + # so that they run sequentially + + # unit, lint and typecheck jobs all install dependencies the same way; + # they all hit the cache from the dependencies job + + unit-test: + needs: [install-dependencies] + name: Run Unit Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Install dependencies + uses: cypress-io/github-action@v5.8.0 + with: + runTests: false + + - name: unit-test + run: yarn test: + + lint: + needs: install-dependencies + name: Run Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Install dependencies + uses: cypress-io/github-action@v5.8.0 + with: + runTests: false + + - name: lint + run: yarn lint + + typecheck: + needs: install-dependencies + name: Run typecheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Install dependencies + uses: cypress-io/github-action@v5.8.0 + with: + runTests: false + + - name: typecheck + run: yarn typecheck + + cypress-e2e-test: + needs: [install-dependencies] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Cypress e2e tests 🧪 + uses: cypress-io/github-action@v5.8.0 + with: + start: yarn start + wait-on: 'http://localhost:3000' + browser: chrome + env: + # enables unique runs (when the dreaded retry button is used...) + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + cypress-ct-test: + + needs: [install-dependencies] + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Cypress component tests 🧪 + uses: cypress-io/github-action@v5.8.0 + with: + component: true + browser: chrome + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + +``` + +### Testing Against Localhost on PRs, Testing Against Deployments + +As alluded to in the previous section, testing against localhost during the Pull Request (PR) stage is a cornerstone of the shift-left strategy. This approach ensures that no developer merges changes into the mainline branch unless all CI checks pass. + +In our 'Tour of Heroes' example, this requirement extends to combined code coverage. If a new PR decreases the coverage rate below 100% (for instance, by introducing new code without associated tests), the CI pipeline will fail. + +This rigorous testing practice not only encourages code quality but also fosters a paradigm shift towards process automation. Consequently, it reduces the reliance on management tools for process enforcement, thus improving efficiency and developer experience. + +But what happens after the PR is merged? What about continuous deployment? For this, let us examine another UI app which employs Amplify for deployments: Yan Cui's [Vue-based Twitter clone](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/tree/main/.github/workflows). Looking at this repository, we find two YAML files. [`PR.yml`](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/.github/workflows/PR.yml) is very similar to our previous Tour of Heroes example, though this application is built with Vue & JavaScript rather than React & TypeScript. Noticeably, it only runs on PRs. + +```yml +# ./.github/workflows/PR.yml +name: PR +on: + pull_request: # only PRs + workflow_dispatch: # we can manually trigger... + +# the rest of the file... +``` + +The more interesting part is the [ `dev.yml`](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/.github/workflows/dev.yml) file, which only runs the e2e tests, but this time with a custom config file for dev deployment. Line 31 of the yml specifies the config file: + +```yml +# ./.github/workflows/dev.yml +name: dev +on: + push: + branches: [main] # only on main + workflow_dispatch: # manual trigger... + +jobs: + cypress-e2e-test: + # generic settings... + steps: + # generic settings... + + - name: Cypress e2e tests 🧪 + uses: cypress-io/github-action@v5.0.8 + with: + # KEY point; we specify what config file to use + config-file: cypress/config/dev.config.js + + # generic settings... +``` + +When running tests against multiple deployments, it is a best practice to have separate config files for each deployment, usually differentiating by the baseUrl. + +```js +// ./cypress/config/local.config.js + +const { defineConfig } = require("cypress"); + +module.exports = defineConfig({ + // generic settings... + + e2e: { + setupNodeEvents(on, config) { + // generic settings... + }, + // we have some custom env vars + env: { + ENVIRONMENT: "dev", + API_URL: + "https://awfrp7n7rrhw5kzqimfegnqzeq.appsync-api.eu-west-1.amazonaws.com/graphql", + }, + // we have a baseUrl for dev deployment + baseUrl: "https://main.d2uw1pp8i1hsae.amplifyapp.com/#/", + }, + + // component tests only apply to PRs/local, + // some additional settings for it here + component: { + devServer: { + framework: "vue-cli", + bundler: "webpack", + }, + }, +}); +``` + +```js +// ./cypress/config/dev.config.js + +const { defineConfig } = require("cypress"); + +module.exports = defineConfig({ + // generic settings... + + e2e: { + setupNodeEvents(on, config) { + esbuildPreprocessor(on); + registerDataSession(on, config); + return config; + }, + // UI PRs use the same dev api env vars + env: { + ENVIRONMENT: "dev", + API_URL: + "https://awfrp7n7rrhw5kzqimfegnqzeq.appsync-api.eu-west-1.amazonaws.com/graphql", + }, + // our base url for PRs + baseUrl: "http://localhost:8080/#/", + }, +}); +``` + +As for the Amplify specifics, there is an `amplify.yml` file in the repository and AWS Amplify is configured to recognize our repository. Several options are available for Continuous Deployment of UI applications. Apart from AWS Amplify, Netlify, Vercel, Google Firebase, Heroku, GitHub Pages, Azure App Service are just a few of the other platforms. Each typically requires a configuration file in the repository and a web app where a project is created and linked to a repository. + +However, as these configurations are vendor-specific, we will instead delve into continuous deployment details using an AWS service example with temporary branches or ephemeral instances. (Perhaps in the future we will take the ToH and deploy it via multiple vendors for a through comparison.) + +## CI CD for a deployed AWS service + +In this section, we will explore the process of implementing Continuous Integration and Continuous Deployment (CI/CD) for a service deployed on AWS. We will be using Yan Cui's [AWS AppSync Twitter clone](https://github.com/muratkeremozcan/appsyncmasterclass-backend) as a sample. + +> For the record, AWS AppSync is a fully managed service that enables developers to develop GraphQL APIs with ease, and this service uses GraphQL instead of a more wide spread API gateway. + +Of interest are 3 yml files under [./github/workflows/](https://github.com/muratkeremozcan/appsyncmasterclass-backend/tree/main/.github/workflows); `PR.yml`, `dev.yml`, `stage.yml`. Let us work through [ `PR.yml`](https://github.com/muratkeremozcan/appsyncmasterclass-backend/blob/main/.github/workflows/PR.yml) + +> Disclaimer: Yan uses Jest for unit (mocks AWS resources), integration (unit test-like, uses real AWS resources) and e2e tests. We took this opportunity to create Cypress mirrors of the e2e tests, and discussed the style further [in this video](https://www.youtube.com/watch?v=jTgT3VGhqKw). Therefore, we are less worried about parallelization, or running Jest tests separately for unit, integration or e2e. We just want a serial job set, and the main focus is the deployment and the removal of the temporary stack. + +### Testing against temporary branches on PRs + +A critical aspect of CI/CD at PR level is the ability to create and test against temporary branches with Serverless Framework. Each feature branch is deployed to a dedicated environment, like `my-feature`, and the temporary branch's name is typically the same as the git branch (with the `/` character avoided or replaced). + +> If we have external services that are not a part of our stack, or serverful resources we would like to keep outside of our stack, we do not include these serverful resources as part of the ephemeral environments and would share to those resources, and/or refer to their dev deployments on our PRs. Check out Yan Cui's blog post [How to handle serverful resources when using ephemeral environments](https://theburningmonk.com/2023/02/how-to-handle-serverful-resources-when-using-ephemeral-environments/) for a through explanation. + +Deploying a temporary branch using the [Serverless framework](https://serverless.com/framework/) is simple with the command `sls deploy -s my-feature`. Once the feature has been fully tested and integrated, the temporary stack can be removed using `sls remove -s dev-my-feature`. + +> SAM (Serverless application model), CDK (Cloud Development Kit), Amplify CLI, Terraform are other infra as code alternatives to Serverless Framework, and we assume each has the capability to deploy and remove stacks in some shape or form. Temporary branches are just so fundamental and easy in Serverless Framework, therefore it is being used in the examples. + +#### Key Concepts: + +- To use the Serverless Framework, we need to install the AWS CLI. +- A unique name for our temporary stack is generated from the branch name. +- Generic steps like linting and testing are performed. +- The temporary branch is deployed before running e2e tests against it. +- The temporary stack is removed at the end of the run. + +Temporary branches are regarded as crucial in serverless engineering. They allow engineers to develop and test in isolated environments and stacks without clashing with each other on a shared deployment. This setup also helps avoid having to clean up test data, but mind data if tests run on deployments test data still has to be cleared. + +```yml +name: deploy to temp stack + +on: + pull_request: # runs on branches + workflow_dispatch: + +concurrency: + group: ${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + + # env vars so Serverless Framework can deploy and remove the stack + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: eu-west-1 + + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + # needed to install AWS CLI, which we need for Serverless Framework + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - uses: actions/setup-node@v3 + with: + node-version: "16" + + # generic step + - name: Install dependencies + uses: cypress-io/github-action@v5.6.1 + with: + install-command: npm ci --force + runTests: false + + - name: Install AWS CLI + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install --update + + rm awscliv2.zip + rm -r ./aws + + # generic step + - name: Lint + run: npm run lint + + # since we want to deploy a temporary stack with the branch name, + # we have to get the branch name + - name: Get branch name + id: branch-name + uses: tj-actions/branch-names@v6 + + # command to deploy the stack, + # the same idea as deploying the stack from our laptop + - name: deploy to ${{ steps.branch-name.outputs.current_branch }} + run: | + npm run sls -- config credentials --provider aws --key ${{ secrets.AWS_ACCESS_KEY_ID }} --secret ${{ secrets.AWS_SECRET_ACCESS_KEY }} --overwrite + npm run sls -- deploy -s ${{ steps.branch-name.outputs.current_branch }} + + # some convenience so that we can use process.env in our code and tests + - name: export env vars + run: | + npm run sls export-env -- -s ${{ steps.branch-name.outputs.current_branch }} + + # all jest tests; unit, integration, e2e + - name: jest tests + run: npm t + + # Cypress e2e + - name: Cypress e2e tests 🧪 + uses: cypress-io/github-action@v5.6.1 + with: + browser: chrome + install: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # clean up the stack + - name: Remove stack ${{ steps.branch-name.outputs.current_branch }} + run: | + npm run sls -- remove -s ${{ steps.branch-name.outputs.current_branch }} +``` + +Here is [a sample run](https://github.com/muratkeremozcan/appsyncmasterclass-backend/actions/runs/4667128107), taking under 10 minutes. The biggest time consumption is the deployment taking under 5 minutes which is the worst case scenario since this is a new stack. We can reduce this by half if we use [lambda layers](https://github.com/muratkeremozcan/appsyncmasterclass-backend#79-serverless-layers-to-reduce-package-size). Another one is Removing the stack taking 1:41. We could instead only remove the stack once the PR is merged (but currently we do not know how to handle this with GHA - let us know in the comments if you do). Finally, the tests take a serial 1:33 with Jest and 51s with Cypress, which we could save a minute with parallelization. This is a beefy service with many tests and we could reduce the feedback time to around 5 minutes. Albeit, around 10 minutes CI feedback is fair when deploying stacks from scratch and removing them because temporary stacks is considered the holy grail of e2e testing deployed services. + +> Note about config file and environment variables: in this service, the environment variables are unique per deployment, meaning our dozen+ environment variables change values with each branch, including the API url. For this reason, we have a step to export environment variables in CI, and this is also a requirement when working locally. This means we cannot have unique config files for Cypress, but we can map `process.env` to Cypress `env` [like so](https://github.com/muratkeremozcan/appsyncmasterclass-backend/blob/main/cypress.config.js#L11). Check out the video [map your .env file to Cypress environment variables](https://www.youtube.com/watch?v=fq-VDY6VQls) for a demo. + +### Testing against deployments + +Next, let us look at [`dev.yml`](https://github.com/muratkeremozcan/appsyncmasterclass-backend/blob/main/.github/workflows/dev.yml). This file simplifies the process for branches as it doesn't need to lint, unit test, or remove the branch. The deployments are persistent and environment variables do not change frequently, making the overall testing simpler. We can see that [a sample run](https://github.com/muratkeremozcan/appsyncmasterclass-backend/actions/runs/5120215093) takes under 5 minutes because we have deployed to dev before and we do not remove it at the end. + +```yml +# /.github/workflows/dev.yml + +name: deploy to dev + +on: + push: + branches: [main] # runs on main branch, upon merging a PR + workflow_dispatch: + +concurrency: + group: ${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: eu-west-1 + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.x' + - uses: actions/setup-node@v3 + with: + node-version: '16' + + - name: Install dependencies + uses: cypress-io/github-action@v5.6.1 + with: + install-command: npm ci --force + runTests: false + + - name: install AWS CLI + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install --update + + rm awscliv2.zip + rm -r ./aws + + - name: + deploy to dev + run: | + npm run sls -- config credentials --provider aws --key ${{ secrets.AWS_ACCESS_KEY_ID }} --secret ${{ secrets.AWS_SECRET_ACCESS_KEY }} --overwrite + npm run sls -- deploy + + - name: export env vars + run: npm run export:env + + # no need for integration and unit tests on dev, they get covered on PRs + - name: Cypress e2e tests 🧪 + uses: cypress-io/github-action@v5.6.1 + with: + browser: chrome + install: false + env: + + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +``` + +There are only 3 differences in the [`stage.yml`](https://github.com/muratkeremozcan/appsyncmasterclass-backend/blob/main/.github/workflows/stage.yml) file. We run it when pushing tags. We specify to deploy to stage with `npm run sls -- deploy -s stage `. And finally, the script for exporting stage environment variables is slighly different. + +```yml +name: deploy to stage + +on: + push: + tags: ["*"] # runs when we push tags + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + env: + # same as dev.yml + + steps: + # all steps until her are the same + # the key difference is specifying the stage with "-s stage" + - name: deploy to stage + run: | + npm run sls -- config credentials --provider aws --key ${{ secrets.AWS_ACCESS_KEY_ID }} --secret ${{ secrets.AWS_SECRET_ACCESS_KEY }} --overwrite + npm run sls -- deploy -s stage + + # and the script for exporting stage environment variables is slighly different + - name: export env vars + run: npm run export:env-stage + + # e2e step is still the same + - name: Cypress e2e tests 🧪 + uses: cypress-io/github-action@v5.6.1 + with: + browser: chrome + install: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +``` + +## Wrap up + +In conclusion, a well-structured CI/CD pipeline allows us to integrate and deploy code changes rapidly and reliably, facilitating an agile development process. Utilizing the right tools and practices is crucial to ensure the process is efficient and effective. + +Through the examples we discussed, we explored how these principles apply in different contexts. We saw the importance of engineers working in unique, isolated environments through the use of temporary branches in the AWS service example, and using `localhost` in UI apps. We highlighted the importance of comprehensive testing - linting, type checking, unit tests, and end-to-end (e2e) tests, in the case of two UI apps. + +These practices provide timely feedback and enable deployments to production in a minimal number of steps. Whether dealing with PRs, deployments, or AWS services utilizing temporary branches, the key concepts remain similar and crucial for a successful CI/CD pipeline. + +By adhering to these best practices, we can ensure a robust and efficient pipeline that accelerates development while maintaining the high quality of the software. + + +================================================ +FILE: blog/Cypress Component Testing vs React Testing Library - the complete comparison.mkd +================================================ +# Cypress Component Testing vs React Testing Library - the complete comparison + +Cypress component testing was released about a year prior to this blog post and has been a disruptor not only to the de-facto JS framework component testing solutions but also initiated a paradigm shift in developing frontend components. At the core of this is the ability to render a component in isolation in the actual browser, and to see and observe how the component behaves in isolation while time-travel debugging via Cypress. This changes the entire development experience, with testing - the actual intent - being the icing on the cake. + +Rolling out component testing in a community that is so invested in de-facto component testing solutions can be a daring challenge. In this post, we will analyze the differences between React Testing Library (RTL) and Cypress Component Testing (CyCT), and provide you with resources that can help with adopting component testing in your organization. + +**Toc:** + +- [CyCT vs RTL examples - Tour of Heroes (ToH)](#cyct-vs-rtl-examples---tour-of-heroes-toh) + - [HeaderBarBrand component](#headerbarbrand-component) + - [InputDetail component](#inputdetail-component) + - [NavBar component](#navbar-component) +- [Comparison of low level spies \& mocks: Sinon vs Jest](#comparison-of-low-level-spies--mocks-sinon-vs-jest) + - [Sinon vs Jest: Spy](#sinon-vs-jest-spy) + - [Sinon vs Jest: Stub/Mock](#sinon-vs-jest-stubmock) +- [Comparison of network spies \& mocks: `cy.intercept` vs `MSW`](#comparison-of-network-spies--mocks-cyintercept-vs-msw) +- [CyCT vs RTL examples - Epic React](#cyct-vs-rtl-examples---epic-react) +- [Wrapping up](#wrapping-up) +- [Addendum: Gleb Bahmutov's The Missing Comparison Part video: a comparison of the developer experience](#addendum-gleb-bahmutovs-the-missing-comparison-part-video-a-comparison-of-the-developer-experience) + - [1. Compare the devex making a simple breaking change to the source code: HeaderBarBrand](#1-compare-the-devex-making-a-simple-breaking-change-to-the-source-code-headerbarbrand) + - [2. Compare test stability by simulating an asynchronous process: InputDetail](#2-compare-test-stability-by-simulating-an-asynchronous-process-inputdetail) + - [3. Compare the devex making a "complex" change: Heroes](#3-compare-the-devex-making-a-complex-change-heroes) + +### Our experience at Extend + +Our first component test at Extend was at the end of 2022, and we have been piloting it in one of our apps for about six months. Comparing the Cypress Cloud reports between then and now, we can see a preference towards component testing. + +Bear in mind that we are migrating from Enzyme to CyCT, to be able to upgrade React beyond version 16. If we had RTL tests already in this app - as we do in a second UI app which had already migrated out of Enzyme before CyCT rollout - then the RTL tests and CyCT could coexist. We have about 130 Enzyme tests remaining to migrate in our pilot app. This means we will potentially have around 300 Cypress component tests once the migration is complete. This is aligned with the proportion of e2e to CT we have observed in other apps; usually it is anywhere between 1:3 to 1:5. + +December 2022 (first component test commit): + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/hkq5eoj2d8u12op15fi3.png) + +June 2023: + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/b962ejpe1nntx9kgiiz5.png) + +> If you are curious about how the e2e execution time became so low with more tests, check out the video [Improve Cypress e2e test latency by a factor of 20!!](https://www.youtube.com/watch?v=diB6-jikHvk) + +## CyCT vs RTL examples - [Tour of Heroes](https://github.com/muratkeremozcan/tour-of-heroes-react-vite-cypress-ts) (ToH) + +ToH is the final app built in the book [CCTDD: Cypress Component Test Driven Design](https://muratkerem.gitbook.io/cctdd/). It has a few dozen Cypress component tests and their RTL mirrors. We will cover a few examples to showcase the main differences. + +### [HeaderBarBrand component](https://muratkerem.gitbook.io/cctdd/ch03-headerbarbrand) + +The way the component is mounted is very similar between CyCT and RTL. So are custom [mounts](https://slides.com/muratozcan/cctdd#/3/9) / [renders](https://slides.com/muratozcan/cctdd#/3/10). + +There are less imports in CyCT, because either these are built-in or come with the browser. + +The API is the primary difference; with Cypress we have a left-to-right chain style, with RTL we have a right-to-left variable assignment style. + +28 lines CyCT vs 35 lines in RTL. + +![HeaderBarBrand](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/13icumizquvedq2vgkjn.png) + +[HeaderBarBrand.cy.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/HeaderBarBrand.cy.tsx) + +```tsx +import HeaderBarBrand from "./HeaderBarBrand"; +import { BrowserRouter } from "react-router-dom"; + +describe("HeaderBarBrand", () => { + beforeEach(() => { + cy.mount( + + + + ); + }); + + it("should verify external link attributes", () => { + cy.get("a") + .should("have.attr", "href", "https://reactjs.org/") + .and("have.attr", "target", "_blank") + .and("have.attr", "rel", "noopener noreferrer"); + cy.getByCy("header-bar-brand").within(() => cy.get("svg")); + }); + + it("should verify internal link spans and navigation", () => { + cy.getByCy("navLink").within(() => + ["TOUR", "OF", "HEROES"].map((part: string) => cy.contains("span", part)) + ); + cy.getByCy("navLink").click(); + cy.url().should("contain", "/"); + }); +}); +``` + +[HeaderBarBrand.test.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/HeaderBarBrand.test.tsx) + +```tsx +import HeaderBarBrand from "./HeaderBarBrand"; +import { render, screen, within } from "@testing-library/react"; +import { BrowserRouter } from "react-router-dom"; +import userEvent from "@testing-library/user-event"; +import "@testing-library/jest-dom"; + +describe("HeaderBarBrand", () => { + beforeEach(() => { + render( + + + + ); + }); + it("should verify external link attributes", async () => { + const link = await screen.findByTestId("header-bar-brand-link"); + expect(link).toHaveAttribute("href", "https://reactjs.org/"); + expect(link).toHaveAttribute("target", "_blank"); + expect(link).toHaveAttribute("rel", "noopener noreferrer"); + + // not easy to get a tag with RTL, needed to use a test id + within(await screen.findByTestId("header-bar-brand")).getByTestId( + "react-icon-svg" + ); + }); + + it("should verify internal link spans and navigation", async () => { + const navLink = await screen.findByTestId("navLink"); + const withinNavLink = within(navLink); + ["TOUR", "OF", "HEROES"].forEach((part) => withinNavLink.getByText(part)); + + await userEvent.click(navLink); + expect(window.location.pathname).toBe("/"); + }); +}); +``` + +### [InputDetail component](https://muratkerem.gitbook.io/cctdd/ch05-inputdetail) + +We see similar contrast between CyCT and RTL as before; similar mount/render, different API styles, terser syntax on Cypress side. Note that testing library has a Cypress version and can be used to make the examples even more similar. + +The key difference we want to point out is stubbing the `onChange` property the component. `cy.stub()` vs its counterpart `jest.fn()`. Cypress comes with Sinon, and Cypress' API allows us to stub in-line. + +```tsx +// CyCT: we can stub the property in-line +cy.mount( + +); + +// RTL: variable assignment first +const onChange = jest.fn(); + +render( + +); +``` + +![InputDetailComponent](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/fo5i22l7tv5e13kssy0m.png) + +[InputDetail.cy.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/InputDetail.cy.tsx) + +```tsx +import InputDetail from "./InputDetail"; +import "@testing-library/cypress/add-commands"; + +describe("InputDetail", () => { + const placeholder = "Aslaug"; + const name = "name"; + const value = "some value"; + const newValue = "42"; + + it("should allow the input field to be modified", () => { + cy.mount( + + ); + + cy.contains("label", name); + cy.findByPlaceholderText(placeholder).clear().type(newValue); + cy.findByDisplayValue(newValue).should("be.visible"); + cy.get("@onChange").its("callCount").should("eq", newValue.length); + }); + + it("should not allow the input field to be modified", () => { + cy.mount( + + ); + + cy.contains("label", name); + cy.findByPlaceholderText(placeholder) + .should("have.value", value) + .and("have.attr", "readOnly"); + }); +}); +``` + +[InputDetail.test.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/InputDetail.test.tsx) + +```tsx +import InputDetail from "./InputDetail"; +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; + +describe("InputDetail", () => { + const placeholder = "Aslaug"; + const name = "name"; + const value = "some value"; + const newValue = "42"; + + it("should allow the input field to be modified", async () => { + const onChange = jest.fn(); + render( + + ); + + await screen.findByText(name); + const inputField = await screen.findByPlaceholderText(placeholder); + await userEvent.clear(inputField); + await userEvent.type(inputField, newValue); + expect(inputField).toHaveDisplayValue(newValue); + expect(onChange).toHaveBeenCalledTimes(newValue.length); + }); + + it("should not allow the input field to be modified", async () => { + render( + + ); + + await screen.findByText(name); + const inputField = await screen.findByPlaceholderText(placeholder); + expect(inputField).toHaveDisplayValue(value); + expect(inputField).toHaveAttribute("readOnly"); + }); +}); +``` + +### [NavBar component](https://muratkerem.gitbook.io/cctdd/ch08-navbar) + +We can observe the same similarities and contrast between CyCT and RTL in this component. The most striking of them all is the API style difference; with Cypress we are able to cover a each link with `forEach`, to be able to do the same in RTL we have to do a little bit of more work with `it.each`. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/3gj6egrrkyz6h6iq9hcz.png) + +[NavBar.cy.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/NavBar.cy.tsx) + +```tsx +import NavBar from "./NavBar"; +import { BrowserRouter } from "react-router-dom"; + +const routes = ["heroes", "villains", "boys", "about"]; + +describe("NavBar", () => { + it("should navigate to the correct routes", () => { + cy.mount( + + + + ); + + cy.contains("p", "Menu"); + cy.getByCy("menu-list").children().should("have.length", routes.length); + + routes.forEach((route: string) => { + cy.get(`[href="/${route}"]`) + .contains(route, { matchCase: false }) + .click() + .should("have.class", "active-link") + .siblings() + .should("not.have.class", "active-link"); + + cy.url().should("contain", route); + }); + }); +}); +``` + +[NavBar.test.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/NavBar.test.tsx) + +```tsx +import NavBar from "./NavBar"; +import { render, screen, within, waitFor } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { BrowserRouter } from "react-router-dom"; +import "@testing-library/jest-dom"; + +const routes = ["Heroes", "Villains", "Boys", "About"]; + +describe("NavBar", () => { + beforeEach(() => { + render( + + + + ); + }); + + it("should verify route layout", async () => { + expect(await screen.findByText("Menu")).toBeVisible(); + + const menuList = await screen.findByTestId("menu-list"); + expect(within(menuList).queryAllByRole("link").length).toBe(routes.length); + + routes.forEach((route) => within(menuList).getByText(route)); + }); + + it.each(routes)("should navigate to route %s", async (route: string) => { + const link = async (name: string) => screen.findByRole("link", { name }); + const activeRouteLink = await link(route); + userEvent.click(activeRouteLink); + await waitFor(() => expect(activeRouteLink).toHaveClass("active-link")); + expect(window.location.pathname).toEqual(`/${route.toLowerCase()}`); + + const remainingRoutes = routes.filter((r) => r !== route); + remainingRoutes.forEach(async (inActiveRoute) => { + expect(await link(inActiveRoute)).not.toHaveClass("active-link"); + }); + }); +}); +``` + +In all these examples we have not said anything about the component code, but instead shared a CyCT screen shot to communicate what the component is about. On one side we are staring at html, on the other we are looking at the component in the browser. The picture demonstrates a bug during development that we caught by just looking at the component. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/2b5vbold5ljtr0cc788p.png) + +## Comparison of low level spies & mocks: Sinon vs Jest + +We prepared a [repository](https://github.com/muratkeremozcan/sinon-vs-jest) to cover core examples of using Sinon with Cypress, and created mirror tests in Jest. + +> The original Sinon examples are from Gleb Bahmutov's [Cypress Examples](https://t.co/LgUA1YB1Cx). + +### [Sinon vs Jest](https://github.com/muratkeremozcan/sinon-vs-jest): Spy + +A spy does not modify the behavior of the function - it is left perfectly intact. A spy is most useful when you are testing the contract between multiple functions and you don't care about the side effects the real function may create (if any). + +[spy-practice.cy.ts](https://github.com/muratkeremozcan/sinon-vs-jest/blob/main/cypress/e2e/spy-practice.cy.ts) vs [spy-practice-test.ts](https://github.com/muratkeremozcan/sinon-vs-jest/blob/main/src/spy-practice.test.ts) are linked for you to view the files in comparison; we will summarize the key differences: + +- **Spying on methods**: Both libraries allow you to create a spy that wraps around a function and records all calls to it, along with arguments and return values. Both Jest and Sinon allow you to assert if the spy was called, how many times it was called, and with what arguments. They do differ in syntax and some functionalities. + + `cy.spy(obj, 'method')` vs `jest.spyOn(obj, 'method')` + +- **Syntax**: `have.been.called` vs `toHaveBeenCalled`, `have.been.calledTwice` vs `toHaveBeenCalledTimes(2)` + +- **Matchers**: `sinon.match.string` vs `expect.any(String)` + +- **Custom matchers**: [easy with Sinon](https://github.com/muratkeremozcan/sinon-vs-jest/blob/d596daa1a507707dcff33907052dd1081a718d41/cypress/e2e/spy-practice.cy.ts#L127), [not great devex with Jest](https://github.com/muratkeremozcan/sinon-vs-jest/blob/d596daa1a507707dcff33907052dd1081a718d41/src/spy-practice.test.ts#L104). We will not share the Jest code, because it is very verbose. + +```ts +// Cy + sinon +const { match } = Cypress.sinon; +const isEven = (x: number) => x % 2 === 0; +const isOdd = (x: number) => x % 2 === 1; + +const spy = cy.spy(calculator, "add").as("add"); +calculator.add(2, 3); + +// expect the value to pass a custom predicate function +// the second argument to "match(predicate, message)" +// is shown if the predicate does not pass and assertion fails +expect(spy).to.be.calledWith(match(isEven), match(isOdd, "is odd")); +``` + +- **Asynchronous testing**: In Jest, it is required to explicitly wait for asynchronous operations to complete before assertions are checked. This is illustrated in using the `setTimeout` and `await new Promise` combination. Cypress + Sinon does not necessitate this explicit wait. There are certain advantages to Cypress chain style and built-in retry ability. In the below example with Jest we have to gather the promises, `await` them with `Promise.all` and have to use `toHaveBeenNthCalledWith` to specify the order. + +```ts +// Cy + Sinon +it("resolved value (promises)", () => { + const calc = { + async add(a: number, b: number) { + return /* await */ Cypress.Promise.resolve(a + b).delay(100); // don't use await redundantly + }, + }; + + cy.spy(calc, "add").as("add"); + // wait for the promise to resolve then confirm its resolved value + cy.wrap(calc.add(4, 5)).should("equal", 9); + cy.wrap(calc.add(1, 90)).should("equal", 91); + cy.wrap(calc.add(-5, -8)).should("equal", -13); + + // example of confirming one of the calls used add(4, 5) + cy.get("@add").should("have.been.calledWith", 4, 5); + cy.get("@add").should("have.been.calledWith", 1, 90); + cy.get("@add").should("have.been.calledWith", -5, -8); + + // now let's confirm the resolved values + // first we need to wait for all promises to resolve + cy.get("@add") + .its("returnValues") + .then((ps) => Promise.all(ps)) + .should("deep.equal", [9, 91, -13]); +}); +``` + +```ts +// Jest +it("resolved value (promises)", async () => { + const calc = { + async add(a: number, b: number) { + return a + b; + }, + }; + + const spy = jest.spyOn(calc, "add"); + + // Let's gather the promises first + const promises = [calc.add(4, 5), calc.add(1, 90), calc.add(-5, -8)]; + + // Now we wait for all the promises to resolve + const results = await Promise.all(promises); + + // We can check if the spy was called with the correct arguments at each call + expect(spy).toHaveBeenNthCalledWith(1, 4, 5); + expect(spy).toHaveBeenNthCalledWith(2, 1, 90); + expect(spy).toHaveBeenNthCalledWith(3, -5, -8); + + // Finally, we can verify the resolved values + expect(results).toEqual([9, 91, -13]); +}); +``` + +### [Sinon vs Jest](https://github.com/muratkeremozcan/sinon-vs-jest): Stub/Mock + +A stub is a way to modify a function and delegate control over its behavior to you (the programmer). + +Create a standalone stub (generally for use in unit test): + +```js +cy.stub(); +jest.fn(); +``` + +Replace obj.method() with a stubbed function: + +```js +cy.stub(obj, "method"); +jest.spyOn(obj, "foo").mockImplementation(jest.fn()); +``` + +Force obj.method() to return a value: + +```js +cy.stub(obj, "method").returns("Cliff"); +jest.spyOn(obj, "method").mockReturnValue("Cliff"); +``` + +Force obj.method() when called with "bar" argument to return "foo": + +```js +cy.stub(obj, "method").withArgs("bar").returns("foo"); + +jest.spyOn(obj, "method").mockImplementation((arg) => { + if (arg === "bar") return "foo"; +}); +``` + +Force obj.method() to return a promise which resolves to "foo" + +```js +cy.stub(obj, "method").resolves("foo"); + +jest.spyOn(obj, "method").mockImplementation(() => { + return Promise.resolve("foo"); +}); +``` + +Force obj.method() to return a promise rejected with an error + +```js +cy.stub(obj, "method").rejects(new Error("foo")); + +jest.spyOn(obj, "method").mockImplementation(() => { + return Promise.reject(new Error("foo")); +}); +``` + +It is interesting to note that the equivalent of `cy.stub()` is `jest.fn()` but in many of the comparisons we are using `jest.spyOn(...).mockImplementation(...)` + +In Jest, we can use `.mockImplementation()` to provide a custom implementation for the mock function. +In Sinon, we can use `.callsFake()` or `.returns()` to specify custom behavior for the stub. + +`jest.fn()` can be used more in scenarios where you're not spying on or modifying existing object methods, but rather creating standalone mock functions. For instance, when testing if a function passed as a prop or callback is called correctly in a component test or when needing to create a mock implementation for a function from a module that your function under test is calling. + +Here is an example scenario where jest.fn() could be used + +```js +it("should call the callback", () => { + const mockCallback = jest.fn(); + + function doSomething(callback: (arg: string) => void) { + callback("test argument"); + } + + doSomething(mockCallback); + + expect(mockCallback).toHaveBeenCalledTimes(1); + expect(mockCallback).toHaveBeenCalledWith("test argument"); +}); +``` + +Comparing [stub-practice.cy.ts](https://github.com/muratkeremozcan/sinon-vs-jest/blob/main/cypress/e2e/stub-practice.cy.ts) vs [stub-practice.test.ts](https://github.com/muratkeremozcan/sinon-vs-jest/blob/main/src/stub-practice.test.ts), here are some of the other highlights: + +**Restoring the original method after stub:** + +```ts +const person = { + getName() { + return "Joe"; + }, +}; + +/// Cy + Sinon +expect(person.getName()).to.eq("Joe"); + +const stub = cy.stub(person, "getName").returns("Cliff"); +expect(person.getName()).to.eq("Cliff"); + +// restore the original method +stub.restore(); +expect(person.getName()).to.eq("Joe"); + +/// Jest +expect(person.getName()).toBe("Joe"); + +const stub = jest.spyOn(person, "getName").mockReturnValue("Cliff"); +expect(person.getName()).toBe("Cliff"); + +// restore the original method +stub.mockRestore(); +expect(person.getName()).toBe("Joe"); +``` + +**Matchers: .callThrough(), withArgs(), match.type, match(predicate)**: + +```ts +describe("matchers: .callThrough(), withArgs(), match.type, match(predicate)", () => { + const { match } = Cypress.sinon; + + it("Matching stub depending on arguments", () => { + const greeter = { + greet(name: string | number | undefined) { + return `Hello, ${name}!`; + }, + }; + + const stub = cy.stub(greeter, "greet"); + + stub.callThrough(); // if you want non-matched calls to call the real method + stub.withArgs(match.string).returns("Hi, Joe!"); + stub.withArgs(match.number).throws(new Error("Invalid name")); + + expect(greeter.greet("World")).to.equal("Hi, Joe!"); + expect(() => greeter.greet(42)).to.throw("Invalid name"); + expect(greeter.greet).to.have.been.calledTwice; + + // non-matched calls goes the actual method + expect(greeter.greet()).to.equal("Hello, undefined!"); + }); +}); +``` + +There is no direct equivalent in Jest, but the below does the same thing + +```ts +describe("matchers: .mockImplementation()", () => { + it("Matching stub depending on arguments", () => { + const greeter = { + greet(name: string | number) { + return `Hello, ${name}!`; + }, + }; + + jest + .spyOn(greeter, "greet") + .mockImplementation((name: string | number | undefined) => { + if (typeof name === "string") { + return "Hi, Joe!"; + } else if (typeof name === "number") { + throw new Error("Invalid name"); + } else { + return "Hello, undefined!"; + } + }); + + expect(greeter.greet("World")).toEqual("Hi, Joe!"); + expect(() => greeter.greet(42)).toThrow("Invalid name"); + expect(greeter.greet).toHaveBeenCalledTimes(2); + + expect(greeter.greet()).toEqual("Hello, undefined!"); + }); +}); +``` + +**Calling the original method from the stub**: + +```ts +describe("Call the original method from the stub: callsFake(...), wrappedMethod()", () => { + it("Sometimes you might want to call the original method from the stub and modify it", () => { + const person = { + getName() { + return "Joe"; + }, + }; + + cy.stub(person, "getName").callsFake(() => { + // call the real person.getName() + return person.getName.wrappedMethod().split("").reverse().join(""); + }); + + expect(person.getName()).to.eq("eoJ"); + }); +}); +``` + +There is no direct equivalent in Jest, but the below does the same thing. + +```ts +describe("Call the original method from the stub: mockImplementation(), originalName", () => { + it("Sometimes you might want to call the original method from the stub and modify it", () => { + const person = { + getName() { + return "Joe"; + }, + }; + + const originalGetName = person.getName.bind(person); + + jest.spyOn(person, "getName").mockImplementation(() => { + return originalGetName().split("").reverse().join(""); + }); + + expect(person.getName()).toEqual("eoJ"); + }); +}); +``` + +**Controlling time** `cy.clock` vs `jest.useFakeTimers`: + +When running Cypress tests, the tests themselves are outside the application's iframe. When you use `cy.clock()` command you change the application clock, and not the spec's clock. + +```ts +describe("cy.clock", () => { + it("control the time in the browser", () => { + const specNow = new Date(); + const now = new Date(Date.UTC(2017, 2, 14)).getTime(); + + cy.clock(now) // sets the application clock and pause time + .then(() => { + // spec clock keeps ticking + const specNow2 = new Date(); + // confirm by comparing the timestamps in milliseconds + expect(+specNow2, "spec timestamps").to.be.greaterThan(+specNow); + }); + // but the application's time is frozen + cy.window() + .its("Date") + .then((appDate) => { + const appNow = new appDate(); + expect(+appNow, "application timestamps") + .to.equal(+now) + .and.to.equal(1489449600000); // the timestamp in milliseconds + }); + // we can advance the application clock by 5 seconds + cy.tick(5000); + cy.window() + .its("Date") + .then((appDate) => { + const appNow = new appDate(); + expect(+appNow, "timestamp after 5 synthetic seconds").to.equal( + 1489449605000 + ); + }) + // meanwhile the spec clock only advanced by probably less than 200ms + .then(() => { + const specNow3 = new Date(); + expect(+specNow3, "elapsed on the spec clock").to.be.lessThan( + +specNow + 200 + ); + }); + }); +}); +``` + +The Jest mirror cannot be run in the browser window, and has a slightly different approach. + +```ts +describe("jest.useFakeTimers", () => { + it("control the time in the browser", () => { + jest.useFakeTimers(); + const specNow = new Date(); + const now = new Date(Date.UTC(2017, 2, 14)).getTime(); + + jest.setSystemTime(now); + + // application time is frozen + const appNow = new Date(); + expect(appNow.getTime()).toBe(now); + expect(appNow.getTime()).toBe(1489449600000); // the timestamp in milliseconds + + // we can advance the application clock by 5 seconds + jest.advanceTimersByTime(5000); + const appNow2 = new Date(); + expect(appNow2.getTime()).toBe(1489449605000); + + // spec clock only advanced by probably less than 200ms + const specNow3 = new Date(); + expect(specNow3.getTime()).toBeLessThan(specNow.getTime() + 200); + + jest.useRealTimers(); + }); +}); +``` + +## Comparison of network spies & mocks: `cy.intercept` vs `MSW` + +If you have been through [Kent C. Dodd's Epic React](https://epicreact.dev/), you are already convinced that the farther away from our component the mocking is, the more we are testing our code and having better confidence. The farthest we can mock from our code is mocking the network. To mock the network Cypress has the [`intercept`](https://docs.cypress.io/api/commands/intercept#docusaurus_skipToContent_fallback) api, and the exact mirror on RTL side is [Mock Service Worker (`msw`)](https://mswjs.io/docs/). + +Let us compare [RTL + MSW Heroes.test.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/heroes/Heroes.test.tsx) vs [CyCT + cy.intercept](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/heroes/Heroes.cy.tsx) from Tour of Heroes repo. + +Here is a summary of the key points; with `msw` we have a bit more setup, and a required clean up, on the other side with `cy.intercept` the syntax is succinct and no clean up is required. + +```tsx +// RTL + MSW +it("should see error on initial load with GET", async () => { + // have to define handlers, setup server and listen + const handlers = [ + rest.get( + `${process.env.REACT_APP_API_URL}/heroes`, + async (_req, res, ctx) => res(ctx.status(400)) + ), + ]; + const server = setupServer(...handlers); + server.listen({ + onUnhandledRequest: "warn", + }); + + // test code... + + // have to clean up + server.resetHandlers(); + server.close(); +}); + +// CyCT + cy.intercept +it("should see error on initial load with GET", () => { + // in comparison, this is all we have to do with cyct + cy.intercept("GET", `${Cypress.env("API_URL")}/heroes`, { + statusCode: 400, + delay: 100, + }).as("notFound"); + + // test code... + + // no clean up needed +}); +``` + +The difference is more clear when we want to structure our tests. RTL + `msw` ends up being plenty more boilerplate compared to CyCT and `cy.intercept`. Not only the concise syntax, but also the ability to define or change our network mock on the fly with `cy.intercept` makes our code easier to understand. Check out the code comments below for precise examples: + +```tsx +// RLT + msw +describe("200 flows", () => { + const handlers = [ + rest.get( + `${process.env.REACT_APP_API_URL}/heroes`, + async (_req, res, ctx) => res(ctx.status(200), ctx.json(heroes)) + ), + // we have to have all definitions in the handler + // once declared, these are it; + // we would need a new describe block to change the mock + rest.delete( + `${process.env.REACT_APP_API_URL}/heroes/${heroes[0].id}`, // use /.*/ for all requests + async (_req, res, ctx) => res(ctx.status(400), ctx.json("expected error")) + ), + ]; + const server = setupServer(...handlers); + beforeAll(() => { + server.listen({ + onUnhandledRequest: "warn", + }); + }); + afterEach(server.resetHandlers); + afterAll(server.close); + + it("should display the hero list on render, and go through hero add & refresh flow", async () => { + expect(await screen.findByTestId("list-header")).toBeVisible(); + expect(await screen.findByTestId("hero-list")).toBeVisible(); + + await userEvent.click(await screen.findByTestId("add-button")); + expect(window.location.pathname).toBe("/heroes/add-hero"); + + await userEvent.click(await screen.findByTestId("refresh-button")); + expect(window.location.pathname).toBe("/heroes"); + }); + + const deleteButtons = async () => screen.findAllByTestId("delete-button"); + const modalYesNo = async () => screen.findByTestId("modal-yes-no"); + const maybeModalYesNo = () => screen.queryByTestId("modal-yes-no"); + const invokeHeroDelete = async () => { + userEvent.click((await deleteButtons())[0]); + expect(await modalYesNo()).toBeVisible(); + }; + + it("should go through the modal flow, and cover error on DELETE", async () => { + expect(screen.queryByTestId("modal-dialog")).not.toBeInTheDocument(); + + await invokeHeroDelete(); + await userEvent.click(await screen.findByTestId("button-no")); + expect(maybeModalYesNo()).not.toBeInTheDocument(); + + await invokeHeroDelete(); + await userEvent.click(await screen.findByTestId("button-yes")); + + expect(maybeModalYesNo()).not.toBeInTheDocument(); + expect(await screen.findByTestId("error")).toBeVisible(); + expect(screen.queryByTestId("modal-dialog")).not.toBeInTheDocument(); + }); +}); +``` + +```tsx +// CyCT + cy.intercept +context("200 flows", () => { + beforeEach(() => { + // the GET is common to both tests + cy.intercept("GET", `${Cypress.env("API_URL")}/heroes`, { + fixture: "heroes.json", + }).as("getHeroes"); + + cy.wrappedMount(); + }); + + it("should display the hero list on render, and go through hero add & refresh flow", () => { + cy.wait("@getHeroes"); + + cy.getByCy("list-header").should("be.visible"); + cy.getByCy("hero-list").should("be.visible"); + + cy.getByCy("add-button").click(); + cy.location("pathname").should("eq", "/heroes/add-hero"); + + cy.getByCy("refresh-button").click(); + cy.location("pathname").should("eq", "/heroes"); + }); + + const invokeHeroDelete = () => { + cy.getByCy("delete-button").first().click(); + cy.getByCy("modal-yes-no").should("be.visible"); + }; + + it("should go through the modal flow, and cover error on DELETE", () => { + cy.getByCy("modal-yes-no").should("not.exist"); + + cy.log("do not delete flow"); + invokeHeroDelete(); + cy.getByCy("button-no").click(); + cy.getByCy("modal-yes-no").should("not.exist"); + + cy.log("delete flow"); + invokeHeroDelete(); + + // DELETE mock is unique to this test + // we can define it or change our network mock on the fly + // we could for example have a new GET definition here + cy.intercept("DELETE", "*", { statusCode: 500 }).as("deleteHero"); + + cy.getByCy("button-yes").click(); + cy.wait("@deleteHero"); + cy.getByCy("modal-yes-no").should("not.exist"); + cy.getByCy("error").should("be.visible"); + }); +}); +``` + +In our opinion, the intercept API is simpler and is more flexible compared to MSW. We can observe this in the significant difference in the amount of code we have to write, doing the same thing in RTL + `msw` vs CyCT + `cy.intercept`. You can compare them here [RTL + MSW Heroes.test.tsx](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/heroes/Heroes.test.tsx) vs [CyCT + cy.intercept](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/heroes/Heroes.cy.tsx). + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/h4wuhxdry0s6nd301nxw.png) + +## CyCT vs RTL examples - Epic React + +In the interest of brevity, we will share links to [13 Cypress component tests](https://github.com/search?q=repo%3Amuratkeremozcan%2Fcypress-react-component-test-examples+1%3A1+comparison+with+RTL&type=code) and their RTL mirrors. All CyCT examples are from the repo [cypress-react-component-test-examples](https://github.com/muratkeremozcan/cypress-react-component-test-examples) where you can find 400+ individual CyCT examples. + +Simple counter: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.0.testing-react-apps/02.simple-rtl-example/counter.cy.jsx#L14) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/epic-react/06.testing-react-apps/src/__tests__/exercise/03.js) + +Testing with context: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.0.testing-react-apps/07.testing-with-context/easy-button.cy.js#L4) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/epic-react/06.testing-react-apps/src/__tests__/exercise/07.js) + +Simple redux: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/14.redux/redux.cy.js#L7) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/14.KEY-redux-02.js) + +A11y: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/07.a11y/a11y.cy.js#L39) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/07.a11y.js) + +Geolocation: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.0.testing-react-apps/06.mocking-browser-apis/location.cy.js#L15) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/epic-react/06.testing-react-apps/src/__tests__/exercise/06.js) + +Mocking http (intercept vs msw) : [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.0.testing-react-apps/05.mocking-http-requests/login-submission.cy.jsx#L13) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/epic-react/06.testing-react-apps/src/__tests__/exercise/05.js#L24), another [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/08-09.mocking-http-requests/09.stub-newtork.cy.jsx#L27) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/09.http-msw-mock.js) + +Router-redirect: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/11.router-redirect/editor.cy.js#L13) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/12.KEY.tdd-04-router-redirect.js) + +React-router: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/13.react-router/main.cy.jsx#L21) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/13.KEY.react-router-02.js) + +Modal: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/16.modal-portals/modal.cy.js#L14) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/16.KEY-modal-portals.js) + +Stub window fetch: [cyct](https://github.com/muratkeremozcan/cypress-react-component-test-examples/blob/3f5ddb6eccdf13705d643eb3eabcfd9962e619ab/cypress/component/hooks/kent.c.dodds-epic-react/06.1.testingJs/08-09.mocking-http-requests/08.stub-window.fetch.cy.jsx#L23) vs [rtl](https://github.com/muratkeremozcan/epic-react-testingJs/blob/main/testing-js/03.rtl/src/__tests__/08.KEY-http-jest-mock.js) + +## Wrapping up + +We went through, in detail, 3 examples of CyCT vs RTL from the repo & book [CCTDD: Cypress Component Test Driven Design](https://muratkerem.gitbook.io/cctdd/) where you can find a few dozen more examples of CyCT vs RTL. + +We compared low level mocking in CyCT with Sinon vs mocking in RTL with Jest, with many examples and a [sample cheat sheet repo](https://github.com/muratkeremozcan/sinon-vs-jest). + +We compared network level mocking using `cy.intercept` vs `msw` with repository links. + +Finally we shared about a dozen more CyCT vs RTL examples from Kent C. Dodds' Epic React. + +Equipped with these resources, you have cheat sheets at your fingertips and an information toolset to begin rolling out Cypress component testing in your organizations. + +## Addendum: Gleb Bahmutov's [The Missing Comparison Part video](https://www.youtube.com/watch?v=rFUf7xdtt-I): a comparison of the developer experience + +In his video, Gleb covered an important part of comparing RTL and CyCT; the developer experience. _"Jest (with RTL) use the terminal JSDom, while Cypress gives you the real browser with time traveling debugger, making debugging errors so so so much simpler in Cypress"_ We strongly suggest to see the video, and we will cover the three key points below. + +### 1. Compare the devex making a simple breaking change to the source code: [HeaderBarBrand](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/HeaderBarBrand.tsx#L20) + +Pull the [repo](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts), install and start CyCT with RTL side by side; `yarn cy:open-ct` , `yarn test`. Execute the CyCT and RTL tests for `HeaderBarBrand`. + +Make a breaking change in [HeaderBarBrand](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/HeaderBarBrand.tsx#L20) component; on line 20 change the spelling of `OF` to `ON`. + +```tsx + + TOUR + {/* Change OF to ON */} + ON + HEROES + +``` + +Compare how you would diagnose this failure in RTL and CyCT. + +Here is the RTL failure: + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/1zn3xyyqxqo1s04z341p.png) + +Here is the CyCT failure: + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/q1b0rtxpjud4c1ywxnf8.png) + +### 2. Compare test stability by simulating an asynchronous process: [InputDetail](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/InputDetail.tsx#L34) + +Execute the CyCT and RTL tests for `InputDetail`. + +On line 34 of [InputDetail](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/components/InputDetail.tsx#L34) component, introduce a `setTimeout` to simulate an asynchronous process. These are very common in real applications, and it is a solid comparison of the stability of the two tools, especially in CI. + +```tsx + setTimeout(onChange, 1000)} + readOnly={readOnly} + className="input" + type="text" +> +``` + +CyCT executes the same, with a slight delay retrying. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/ul0a0jj0dcx4r91kdfp2.png) + +We cannot make the RTL test pass as is; it is synchronous. We would have to add asynchronous assertions to the test to make it work. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/4ti6z6p3y12nqv8jhbts.png) + +### 3. Compare the devex making a "complex" change: [Heroes](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/src/heroes/Heroes.tsx) + +The test of concern stubs the deleting of a hero with a 500 network response (MSW with RTL, and cy.intercept with CyCT) to simulate a deletion error. + +In `Heroes` component, comment out line 40 so that we are doing nothing upon hero deletion. + +```tsx +const handleDeleteFromModal = () => { + // heroToDelete ? deleteHero(heroToDelete) : null + setShowModal(false); +}; +``` + +We are in the dark, trying to identify why the test did not work looking at RTL results (which are a few times longer than the screenshot): + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/r2pn0yavzl3s39w3zagx.png) + +Looking at the CyCT failure, we can easily tell that the 500 network call never happened: + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/b8dwaq3tnqqo9gfiilan.png) + + +================================================ +FILE: blog/Documenting and Testing Schemas of Serverless Stacks with Optic & Cypress.mkd +================================================ +# Documenting and Testing Schemas of Serverless Stacks with Optic & Cypress + +Serverless testing is unique. Recently I went through [Yan Cui's Testing Serverless Architectures](https://testserverlessapps.com/?utm_source=blog_courses), highly recommended for all cloud engineers, learned a lot and agreed to these conclusions: + +- **Increased Use of Managed Services**: With tools like AWS Lambda, there's a notable shift towards leveraging managed services. + +- **Focused, Single-Purpose Functions**: Lambda functions typically hone in on a singular task, simplifying individual function complexity. With this structure, the main risk in serverless architectures emerges from integration points—how your Lambda functions communicate and interact with external managed services. + +- **Granularity in Deployment**: Serverless promotes deploying in smaller units. While this granularity provides precise control over access, it also introduces complexities.This increased granularity inherently demands more configurations, heightening security measures and careful management. In turn, the potential for misconfigurations is increased—be it within the application or Identity and Access Management (IAM). + +Given these distinct challenges and benefits of serverless architectures, our testing strategies must adapt. + +- **Unit Testing**: Focused on individual objects or modules. These tests often don't yield a high return on investment unless the business logic is particularly intricate. Their value-to-cost ratio often leans unfavorably when compared to integration tests. They are useful for testing your business logic, particularly your branching logic, but they fall completely flat when testing your cloud infrastructure, making testing this code in a deployed environment essential. + +- **Integration Testing**: Tests interaction with external components, like AWS resources. Integration is the same cost, and more value than unit; feed an event into the lambda handler, validate the consequences. Running these tests against deployed AWS resources locally is more practical and beneficial than attempting to emulate AWS in a local environment. Emulations are often brittle and labor-intensive. Temporary stacks are mandatory in this approach, albeit there are teams that can exercise their handlers without local simulation or remote AWS resources; all the better. + +- **End-to-End (E2E) Testing**: Holistic testing of the system, including its deployment process. There are things integration tests cannot cover, such as IAM Permissions, our IaC/configuration, how the service is built and deployed. With e2e we get all that, alongside testing the functionality. While E2E tests offer the highest confidence, they're also the most resource-intensive. + +Serverless architectures are increasingly prevalent, offering scalability and cost-efficiency. However, they also introduce the above unique testing challenges. Enter schema testing, a method that seems tailor-made for the serverless paradigm. Let's dive in and analyze how it can fit in to our serverless engineering & testing strategy in theory and practice. + +- [Schema testing is a good fit with serverless applications/services/APIs](#schema-testing-is-a-good-fit-with-serverless-applicationsservicesapis) +- [Optic stands apart](#optic-stands-apart) +- [Optic vs Postman and Swagger](#optic-vs-postman-and-swagger) +- [How can it all work in a generic serverless application?](#how-can-it-all-work-in-a-generic-serverless-application) + - [Install Optic to the project, and also globally.](#install-optic-to-the-project-and-also-globally) + - [Generate your `openapi.yml` file](#generate-your-openapiyml-file) + - [Create convenience scripts in `package.json`](#create-convenience-scripts-in-packagejson) + - [One time setup to initialize the Optic (traffic) capture configuration](#one-time-setup-to-initialize-the-optic-traffic-capture-configuration) + - [Setup Optic cloud](#setup-optic-cloud) + - [Capture the http traffic using the Optic proxy](#capture-the-http-traffic-using-the-optic-proxy) +- [Questions, Concerns, other comparisons](#questions-concerns-other-comparisons) + - [Why both e2e and schema test?](#why-both-e2e-and-schema-test) + - [How about increased CI response time?](#how-about-increased-ci-response-time) + - [Do we keep having to record the schema with `schema:verify` ?](#do-we-keep-having-to-record-the-schema-with-schemaverify-) + - [Optic approach vs consumer driven contract testing (Pact)](#optic-approach-vs-consumer-driven-contract-testing-pact) +- [Conclusion](#conclusion) + +## Schema testing is a good fit with serverless applications/services/APIs + +Schema testing confirms an API's response structure, data types, and more, against a predefined schema, e.g., an OpenAPI specification. This kind of testing is useful for two main purposes; documenting the api and spot testing the implementation against potential breaking changes. It can give faster feedback in comparison to traditional e2e, as a preliminary test before heavy weight e2e tests. + +Schema testing stands out as a fitting solution. Here's why: + +- **Rapid feedback for integration integrity**: Schema testing swiftly validates Lambda functions against set structures and ensures communication with external services follows expected patterns. +- **Cost effective**: With minimal resource demands, schema tests sit in a cost effective spot, addressing the confidence gap between unit and E2E tests. +- **Guard against unintentional breaking changes**: In the face of potential misconfigurations in serverless architectures, schema testing indirectly confirms correct integrations and guards against unintentional breaking changes in evolving applications. + +Equally as important, is documenting our schema / API for the consumers. Creating the API docs and keeping them up to date is usually a chore, but there are ways we can automate the documentation upkeep as well as the testing of the schema. + +## Optic stands apart + +We recently tried [Optic](https://www.useoptic.com/) and identified standout features that address the unique needs of serverless applications: + +1. **Traffic Capture and Real-time Insights**: While optic offers a variety of [methods to capture traffic](https://www.useoptic.com/docs/capturing-traffic), its ability to observe API traffic during testing through a reverse proxy mechanism streamlines API updates and verifications, akin to Jest snapshot test record and tests. +2. **Seamless Documentation Sync**: Any changes to the API automatically reflect in its documentation, ensuring alignment. +3. **API Lifecycle Management**: + - **Lifecycle Awareness**: Optic's comprehensive understanding of your API's lifecycle allows it to detect breaking changes and oversee appropriate API versioning. + - **Forward-Only Governance**: While new API endpoints comply with current standards, legacy endpoints remain intact, giving developers timely and pertinent feedback. + - **API Version Control**: Much like Git's approach to code, Optic introduces version control for APIs. +4. **Enhanced Feedback and Integration**: + - **Visual Diff View**: Optic presents intuitive visual depictions of changes in OpenAPI specifications. + - **Integration with CI Pipelines**: Beyond the usual CI integrations, Optic delivers changelog previews directly within pull requests, elucidating API alterations. + - **Git Integration**: Tightly integrated with Git, Optic allows API changes comparison across branches, aiding developers in monitoring developments across the entire project lifecycle. + +By pairing Optic with e2e tests - we prefer Cypress for that because it is a great fit for [api e2e testing event driven systems](https://dev.to/muratkeremozcan/api-testing-event-driven-systems-7fe) - you can harness Optic's traffic capture to either generate or revise your OpenAPI specification. + +## Optic vs Postman and OpenAPI/Swagger + +Vs Postman: + +- Postman is really focussed on their collections format and do not have good tooling for generating OpenAPI or automatically updating it when the API no longer matches the spec. +- Everything in Postman happens in their UI, whereas Optic is triggered in the local developer flows and CI. This gives developers the right feedback, in the right places. +- Optic has tools for API change review that live right in the CI pipeline, there is no comparison for this on Postman side. +- Optic and Postman both have API documentation viewers - every API tool does. Optic is optimized for internal teams + partner APIs and focused on showing consumers how the APIs they use change over time. + +Vs Swagger + +- Swagger is primarily an API specification framework. Optic focuses on automated OpenAPI spec updates and change management. +- **Generation**: Swagger tools like Swagger Editor are for manual API definition. Optic auto-captures API traffic to update specs. This is a key difference. +- **Integration**: Swagger often demands manual updates. Optic integrates with developer flows and CI, alerting discrepancies between observed API behavior and the spec. +- **Documentation**: Swagger UI provides interactive docs with request execution. Optic tracks API changes over time, ideal for teams to understand service evolution. +- **Ecosystem**: Swagger boasts tools for editing, visualization, and code generation. Optic specializes in automatic change management. +- **Audience**: Swagger is versatile for broad audiences, while Optic is tailored for environments with frequent API changes, emphasizing automated updates and change tracking. + +## How can it all work in a generic serverless application? + +Our [sample app](https://github.com/muratkeremozcan/prod-ready-serverless) is from Yan Cui's highly acclaimed [Production-Ready Serverless](https://productionreadyserverless.com/) workshop. Yan guides us through building an event driven serverless app in NodeJs with AWS API Gateway, Lambda, Cognito, EventBridge, DDB, and server-side rendering, which uses AWS Cognito for authentication. + +Here is the [link to the repo](https://github.com/muratkeremozcan/prod-ready-serverless) and [Optic PR](https://github.com/muratkeremozcan/prod-ready-serverless/pull/43/files). +Here is a [link to another repo](https://github.com/muratkeremozcan/aws-cdk-in-practice) and its [Optic PR](https://github.com/muratkeremozcan/aws-cdk-in-practice/pull/11/files). + +Optic has great documentation, and the below is a cohesive description of the setup with a focus on a serverless app. + +### Install Optic to the project, and also globally. + +```bash +npm i -D @useoptic/optic +npm i -g @useoptic/optic +``` + +We need to create an OpenAPI specification. While there are many ways, a quick and dirty way is by using the AWS cli. It needs some tweaks, but we can work with it to start things out. + +In the repository we are using [`serverless-export-env`](https://www.serverless.com/plugins/serverless-export-env) which allows us to extract the environment variables of our serverless stack to a `.env` file. The values here mostly change per the deployment. + +### Generate your `openapi.yml` file + +Create the below script, which will be used to create an OpenApi file using +AWS cli. It uses the [get-export](https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-export.html) command from AWS cli to hit the API gateway and generate the `openapi.yml` file. It assumes you have environment variables `baseUrl` and +`deployment` in your `.env` file, so that we know which api gateway we are concerned with on which +deployment. You can use any var name as long as they match between the .env +file and the script. + +```bash +# .env +baseUrl=https://myApiGwId.execute-api.us-east-1.amazonaws.com/dev +deployment=dev # this could be a temp branch, or stage +# create-openapi.sh + +# Load .env variables +set -a +source .env +set +a + +# Extract the REST API id from the baseUrl +rest_api_id=$(echo $baseUrl | cut -d '/' -f3 | cut -d '.' -f1) + +echo "Rest API ID: $rest_api_id" + +# Run the aws apigateway get-export command +aws apigateway get-export --rest-api-id $rest_api_id --stage-name $deployment --export-type oas30 --accepts application/yaml ./openapi.yml +``` + +Give permissions to the file and execute: + +```bash +chmod +x create-openapi.sh +./create-openapi.sh +``` + +> You need the latest AWS CLI version. Here are MAC instructions, here's the [AWS reference](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) +> +> ```bash +> curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg" +> sudo installer -pkg AWSCLIV2.pkg -target / +> +> ``` + +Like some other AWS tools, it does not have the best developer experience but we can work around it. The initial `openapi.yml` that gets created with `aws-cli` does not pass checks +at https://apitools.dev/swagger-parser/online/ + +1. Each path has options > responses, but this needs to be copied also to http-verb > options for each path. +2. Set openapi version to '3.1.0'. +3. Remove the `server` property. + +### Create convenience scripts in `package.json` + +```json +"optic:lint": "optic lint openapi.yml", +"optic:diff": "optic diff openapi.yml --base main --check'", +"optic:verify": "dotenv -e .env -- bash -c 'echo $baseUrl && optic capture openapi.yml --server-override $baseUrl'", +"optic:update": "dotenv -e .env -- bash -c 'echo $baseUrl && optic capture openapi.yml --server-override $baseUrl --update interactive'" +``` + +The `optic:update` script will update the OpenAPI specification (the `openapi.yml` file), similar to Jest snapshot update. Optic team recommends to use this locally to "record the API snapshot". + +The `optic:verify` script works similar to a Jest snapshot test, check whether the traffic captured in the e2e matches your current openapi specification. + +`optic:lint` and `optic:diff` can quickly lint the OpenAPI spec and diff it against main. These scripts are mostly for local use. However, unless the spec has been updated with `optic:update`, the diff will naturally not find any issues. + +### One time setup to initialize the Optic (traffic) capture configuration + +This creates an`optic.yml` file: + +```bash +optic capture init openapi.yml +``` + + We need to fine tune it for a serverless stack. In the yml file, enter any placeholder for `server.url`. It has to exist with `https` prefix but does not have to be a valid url. + +Remove `server.command` , our server is already deployed and running. + +Replace `requests.run.command` wit the e2e test command. + +`requests.run.proxy_variable` should be set to your api gateway url, below we are using an environment variable with the name `rest_api_url`. + +We are using the e2e test script `cy:run-fast` which turns off video, screenshots and command log. This command could be any kind of e2e/http test for instance in this repository we also have mirroring Jest e2e tests using Axios. So long as they are http tests that hit the API gateway, and kind of test can be used. + +```yml +# ./optic.yml + +ruleset: + # Prevent breaking changes + - breaking-changes +capture: + openapi.yml: + server: + # specified in package.json with --server-override + url: https://api.example.com # need a placeholder + + ready_endpoint: / + # The interval to check 'ready_endpoint', in ms. + # Optional: default: 1000 + ready_interval: 1000 + # The length of time in ms to wait for a successful ready check to occur. + # Optional: default: 10_000, 10 seconds + ready_timeout: 10_000 + # At least one of 'requests.run' or 'requests.send' is required below. + requests: + # Run a command to generate traffic. Requests should be sent to the Optic proxy, the address of which is injected + # into 'run.command's env as OPTIC_PROXY or the value of 'run.proxy_variable', if set. + run: + # The command that will generate traffic to the Optic proxy. Globbing with '*' is supported. + # Required if specifying 'requests.run'. + command: npm run cy:run-fast + # The name of the environment variable injected into the env of the command that contains the address of the Optic proxy. + # Optional: default: OPTIC_PROXY + proxy_variable: rest_api_url +``` + +### [Setup Optic cloud](https://www.useoptic.com/docs/cloud-get-started) + +Optic Cloud has a few key benefits that make it compelling, and a [very generous pricing model](https://www.useoptic.com/pricing): + +- PR comments. +- Centralized api style governance (setup by `optic.yml` file) for all your services, supported with AI. +- [An internal catalogue of all your APIs + changes](https://www.useoptic.com/use-cases/api-change-management) so developers always know how the latest version works. This is much easier to read than `openapi.yml` files in the repos, and the changes between versions are visualized for easier comprehension. +- Dashboards tracking OpenAPI accuracy over time, how well you follow the API standards, and breaking changes + issues over time +- **API Design and Collaboration**: Optic facilitates an interactive feedback mechanism, allowing stakeholders to weigh in on API designs prior to deployment. This proactive approach ensures optimal design choices are made before consumers start relying on them, aligning with the API-first development mantra. +- Support. + +Create a token at Optic app. Save this as GitHub secret. +[Enable Optic commenting on pull requests](https://www.useoptic.com/docs/setup-ci#configure-commenting-on-pull-requests) ( `Repo > Settings > Actions > General` and set `Workflow permissions` to `Read and write permissions`) + +```bash +optic login +# you will copy over the token from the web prompt + +# add the api +optic api add openapi.yml --history-depth=0 +``` + +Once done, you will see your organization at Optic (updated a few seconds ago). + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/rz59mevh8fgr4rt9evux.png) + +### Capture the http traffic using the Optic proxy + +Execute the script `optic:update` to capture the traffic and update the `openapi.yml` file. Again, this is similar to recording Jest snapshots. + +[From the docs](https://www.useoptic.com/docs/capturing-traffic) + +- Your http tests are trafficked through Optic's reverse proxy to your api + gateway +- Optic observes the traffic, then creates or updates your open api spec. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/1ek9yke8nwxdzkr78626.png) + +We want to use this script in the CI to verify if the traffic captured in the e2e matches our current OpenAPI specification. + +```bash +npm run optic:update +``` + +The other script `optic:verify` is to test your schema against your openapi spec. This is what we will use in the CI, and it works similar to executing Jest snapshot tests. + +```bash +npm run optic:verify +``` + +We want to add Optic schema verification to our PRs so that the CI executes the schema test when a commit is pushed. There are many varieties of setting up our CI, and we will keep things simple and share the key highlights. For the full yml, take a look at [`PR.yml` file at the repo](https://github.com/muratkeremozcan/prod-ready-serverless/blob/main/.github/workflows/PR.yml). + +```yml +# ./.github/workflows/PR.yml + +# ... + +jobs: + build-deploy-test: + runs-on: ubuntu-latest + + # .... + + # Permissions are only needed here if you are using OIDC + permissions: + id-token: write + contents: write + pull-requests: write # This provides write access to issues and PRs + + steps: + # installation & deployment... + + # Jest & Cypress tests... + + # Schema verification + - name: verify the schema with Optic + run: npm run optic:verify + + # include a preview and changelog in each PR comment + # sync every OpenAPI spec to Optic Cloud, diff & lint the schema as well + - uses: opticdev/action@v1 + with: + # Your Optic Cloud Token + optic_token: ${{ secrets.OPTIC_TOKEN }} + # A GitHub token with access to create comments on pull requests + github_token: ${{ secrets.GITHUB_TOKEN }} + # If true, standard check failures will cause this action to fail. + # If false, standard check failures will show in PR comments and + # in Optic Cloud but will not cause the action to fail + standards_fail: true + # If you have more than one spec, separate matches with commas + # (openapi.yml,other.yml) + additional_args: --generated --match openapi.yml + compare_from_pr: cloud:default + compare_from_push: cloud:default + env: + CI: true +``` + +## Questions, Concerns, other comparisons + +> This approach is new and we are trying it out internally at my company [Extend](https://www.extend.com/). +> We will see how serious these concerns actually are and update when we find solutions that work for us. + +### Why both e2e and schema test? + +Q: If we have e2e tests already, why are we also running schema tests? We need the serverless stack to be deployed for either to be running, because in schema testing the serverless stack we are recording or verifying the traffic with utilizing e2e tests too. What value add do we get from the schema test? + +A: In the case of serverless applications, while both e2e and schema testing necessitate the deployment of the serverless stack, and schema tests often leverage e2e for traffic recording and verification, they offer unique perspectives and benefits. + +**1. Precision and Specificity**: E2E tests provide a holistic system assessment, a comprehensive overview. In contrast, schema tests zero in on the API's structural integrity, ensuring its responses adhere strictly to the predefined structure. This granular approach might capture subtle inconsistencies broader tests could miss. + +**2. Documentation, Drift Detection, and Change Management**: schema tests offer more than mere validation. They play a pivotal role in: + +- **Dynamic Documentation Syncing**: Immediate reflection of API changes in its documentation ensures continuous alignment, fostering improved developer communication and minimizing documentation drift risks. +- **API Lifecycle Oversight**: They aid in detecting breaking changes, streamlining versioning, and managing the evolution of the API. This proactive, automated approach uniquely distinguishes schema testing from its e2e counterpart. + +--- + +### How about increased CI response time? + +Q: How do we ensure schema testing has less impact on CI response time? + +A: In a CI pipeline our setup can look like so: + +```yml +install----deploy---------unit & integration +--------------------------e2e +--------------------------schema:verify +``` + +With that setup we can have a minimal impact on the duration of the pipeline. + +Additionally, if we have local testing capabilities - we are working on this at my company [Extend](https://www.extend.com/) - schema testing can happen prior to deploy, or concurrently. + +```yml +install----deploy-----------unit & integration +-----------schema:verify----e2e +``` + +```yml +install----schema:verify----deploy----unit & integration +--------------------------------------e2e +``` + +--- + +### Do we keep having to record the schema with `schema:verify` ? + +Q: In the CI examples, we are executing the `schema:verify` , but when do we execute `schema:update` ? Because, if we do not update the schema, then the tests may give false positives. + +A: Usually schema update is a chore; a manual process where we update the `openapi.yml` file. If we have other methods of updating the schema -perhaps we update it via our code, and or types- we do not need to `schema:update` and in turn run the e2e suite locally. + +If we do not have any practical way to update the OpenAPI specification, being able to do so with Optic is a boon. The engineers are responsible to own the schema update when they are making potential changes to the schema, so they would take advantage of automatically updating it by executing `schema:update` and not having to think about it. + +Additionally, we can have a cron job in CI that runs nightly and updates the schema. The only gap in this approach would be is if we neglect the local `schema:update` execution, and we are changing the schema *and* releasing that day something. The cron job can reduce the human negligence error, but not entirely eliminate it. + +--- + +### Optic approach vs consumer driven contract testing (Pact) + +Q: How does this approach relate to consumer driven contract testing (Pact)? How do they compare to just e2e? + +A: First, a comparison of both with e2e: + +- With e2e, you deploy and release your service, and you can break consumer expectations, letting them know too late (usually never letting them know and they are surprised) +- With Pact io, you would run a test and realize it is breaking the consumer’s expectation, communicate with the consumer you are going to break their expectations. They are informed prior to the deployment. Alternatively, esspecially if it is an external consumer, you do not make the breaking change. +- With Optic, you realize you are changing your schema, because the schema test fails, so you realize you are probably breaking consumer expectations. A similar communication or decision making process takes place; either you let them know about the breaking change or do not make it. + +Now, let's compare API governance with consumer-driven contract testing and the Optic approach: + +**Consumer-Driven Contract Testing with Pact**: + +- **Prevention Over Correction**: Instead of finding out post-deployment that an API change has disrupted a consumer, you find out during the development phase. +- **Consumer-Centric**: Contracts are based on the consumer's perspective. Providers ensure they meet these contracts. +- **Continuous Feedback**: As API providers and consumers evolve, the contracts can be continually validated and updated. + +**Optic**: + +- **Observation Over Definition**: Instead of pre-defined contracts, Optic observes actual API behavior and alerts developers to unintended changes. +- **Documentation & Drift Detection**: As API evolves, Optic helps ensure documentation is automatically and accurately updated, reducing the chances of drift. +- **Automated Schema Testing**: You understand changes to your schema, allowing for proactive communication with consumers about potential disruptions. + +**Key differences:** + +- You have to write the consumer driven contract tests with Pact which is a lot of work compared to just maintaining automated schema test with Optic. +- Pact is not going to tell you anything about API best practices, design standards, etc which is also another really important part of doing governance. + +## Conclusion + +**Schema testing, Optic, and e2e tests** together create a formidable toolkit for enhancing API development in serverless architectures: + +1. **Enhanced Testing Workflow:** Pairing Optic with e2e tests, especially with tools like Cypress, offers a dynamic way to test APIs in event-driven systems. Schema testing is adding another cost-effective & responsive layer to our testing toolbox. +2. **Automated Schema Generation:** Using AWS CLI and OpenAPI specs, API schemas can be initially generated. +3. **Real-time Traffic Capturing with Optic:** The OpenAPI specifications are kept accurate through real-time traffic observation, ensuring a true representation of system interactions. +4. **Effortless API Documentation Updates:** With Optic's traffic capture capabilities, not only is API behavior tracked, but documentation is also automatically updated. This means every time you run your e2e tests, you're also refreshing your API documentation, ensuring it remains relevant and up-to-date. +5. **Simplified Integration & Collaboration:** Embedding Optic commands in `package.json` and using Optic Cloud for team-based API review streamlines collaboration. With CI integration, it enforces an API-first development approach that remains consistent with the defined schema. +6. **Visual Oversight:** Optic Cloud's visual tools provide a clear, intuitive view of API changes, negating the need to delve into raw specification files. + +In essence, the synergistic effect of schema testing, Optic, and end-to-end testing tools like Cypress offers a comprehensive solution to API development challenges in serverless environments. + + +================================================ +FILE: blog/Functional Programming Test Patterns with Cypress.mkd +================================================ +# Functional Programming Test Patterns with Cypress + +The debate on Page Object vs module pattern is really just Inheritance vs Composition. Inheritance (PO) is great for describing what something is; the page has x, y, z on it. Composition (module pattern) is great for describing what something does. Which one do you think best fits component based architecture, where components are the building blocks and get reused on pages? How about user flows? + +## No more Page Objects + +Page Object design pattern has two benefits +1. They keep all page element selectors in one place and thus separation of Test code from Locators of the system. +1. They standardize how tests interact with the page and thus avoid duplication of code and ease code maintenance. + +OO in JS is a little awkward. Introduction on `Class` in ES6 helps but Classes, specifically `this` keyword, can still surprise people used to Java because they work differently than. [Here is a great blog from Kent C. Dodds which highlights this point](https://kentcdodds.com/blog/classes-complexity-and-functional-programming) + +## Enter Page Modules + +In Java land it's pretty common to find Page Objects which inherit from the Base Page. e.g. of this in JS will be: + +```JS +import { HomePage } from './BasePage' + +class HomePage extends BasePage { + constructor() { + super(); + this.mainElement = 'body > .banner'; + } + +//... More code + +export const mainPage = new MainPage(); + +} +``` + +With the move to FP we are going to lose not only Inheritance but the Class itself. Therefore we need to use `Modules` to arrange our code. Each `module` exports public functions that can be imported into other modules and used. + +```js +// HomePage Module - HomePage.js +export function login(email, password){ + //... +} + +export function logout(){ + //... +} + +export function search(criterion){ + // ... +} +``` + +This module could be then imported into your tests or other modules and used as below. +```js +// HomePage Test - HomePageTest.js +import * as homePage from './HomePage.js' + +describe('Home Page', ()=> { + it('User can login', ()=> { + cy.visit('/') + homePage.login('abc','123456') + }) +}) + +``` + +Or We could selectively import individual functions from a module. + +```js +import {login} from './HomePage.js' +describe('Home Page', ()=> { + it('User can login', ()=> { + cy.visit('/') + login('abc','123456') + }) +}) + +``` + +### What about Inheritance + +```java +public class HomePage extends BasePage { + +} + +``` +A lot of times we come around Test suites where Page Objects extend a `BasePage` or every test file extends a `BaseTest` class. The intention behind doing this is often code reuse. Most often the `BaseTest` class has methods related to login, logout and logging etc. This is an anti-pattern. Bundling unrelated functionality into a parent class for the purpose of reuse is an abuse of Inheritance. + +Common functionality that is to be made available to multiple specs (my recommendation is 3+) could be added as Cypress `Custom commands`. Custom Commands are available to be used globally with the `cy.` prefix. e.g. we can add a method called `login` as a custom command as below. + +```js +Cypress.Commands.add('login', (username, password) => { + cy.get('#username').type(username) + //... +}) +``` + +The `Cypress.Commands.add` takes the name of the custom command as the first argument and a closure as the second argument. Cypress custom commands are chainable and automatically returned, even there is no explicit return. + +Now we could use the custom command in any spec. + +```js +describe('Login Page', ()=>{ + it('User can login', ()=>{ + cy.login('abc','123456') + // ... + }) +}) + +``` + +Functionality that is shared between a few specs (my recommendation is 2/3 or less) should be added to utility modules. + +#### Favour composition over Inheritance + +Why? Watch [this video](https://www.youtube.com/embed/wfMtDGfHWpA). + +Consider the below code which uses Inheritance + +```js +class Person { + constructor(firstName, lastName) { + this.firstName = firstName; + this.lastName = lastName; + } + getInfo(greetStr) { + return `${greetStr}, I am ${this.firstName} ${this.lastName}.`; + } +} + +class Employee extends Person { + constructor(firstName, lastName, employeeId) { + super(firstName, lastName); + this.employeeId = employeeId; + } + + getId(greetStr) { + return `${greetStr}, my employee id is ${this.employeeId}.`; + } +} +const employee = new Employee('John', 'Doe', 123); +console.log(employee.getInfo('Hi')); // Hi, I am John Doe. +console.log(employee.getId('Hello')); // Hello, my employee id is 123. +``` + +The same functionality could be achieved using Composition like so + +```js +// We first define all the functions that the classes would have +// e.g. getInfo() and getId() + +function getInfo(firstName, lastName, greetStr){ + return `${greetStr}, I am ${firstName} ${lastName}.` +} + +function getId(emplyeeId, greetStr){ + return `${greetStr}, my employee id is ${emplyeeId}.`; +} + +// Instead of a Person Class we create a function +// which returns an Object which represents the person. +// This object is "Composed" of the bindings and functions available to us + +function CreatePerson(firstName, lastName){ + return { + firstName: firstName, + lastName: lastName, + getInfo: (greetStr) => getInfo(firstName, lastName, greetStr) + } +} + +function CreateEmployee(firstName, lastName, employeeId){ + return { + employeeId: employeeId, + getId : (greetStr) => getId(employeeId, greetStr), + getInfo: (greetStr) => getInfo(firstName, lastName, greetStr) + }; +} + +// Notice that the objects returned by CreatePerson +// and CreateEmployee functions are independent of each other +// (i.e. not bounded by a relation aka inheritance) +// Both the returned objects have a property on them +// whose value happens to be the same function. + +let person = CreatePerson('Bla', 'Bla') +let employee = CreateEmployee('John', 'Doe', 123) +console.log( employee.getInfo('Hi')) // Hi, I am John Doe. +console.log( employee.getId('Hello')) // Hello, my employee id is 123. +``` + +Functions which return objects are called `Factory functions`. Watch [this video](https://www.youtube.com/embed/ImwrezYhw4w) for a better convincing argument on using *Factory Functions* over classes. + + +### Readability + +The main benefit of using Page Object is that it encapsulates the complexity of the UI and locators and thus helping with reusability and making the tests more readable. + +> For these examples I am using the [Cypress TodoMVC Example Repo](https://github.com/cypress-io/cypress-example-todomvc) and refactoring few tests. + +Compare the below tests +```js +describe('Todo Application', ()=> { + + it('Can add a new Todo', ()=>{ + cy.get('.new-todo') + .type('First Todo') + .type('{enter}') + + cy.get('.todo-list li') + .eq(0) + .find('label') + .should('contain', 'First Todo') + }) +}) + +``` + +VS + +```js +import {addTodo, getTodoName} from './TodoUtil' + +describe('Todo Application', ()=> { + + it('Can add a new Todo', ()=>{ + addTodo('First Todo') + .then(getTodoName) + .should('equal', 'First Todo') + }) +}) + +``` + +The second test requires less cognitive load to understand because it's declarative and don't makes us read through the steps of how to add new todo or get it's name. + +The `addTodo` and `getTodoName` come from the `TodoUtil` module + +```js +// TodoUtil.js +export const addTodo = (name) => { + cy.get('.new-todo') + .type(name) + .type('{enter}') + + return cy + .get('.todo-list') + .eq(0) +} + +export const getTodoName = (todo) => { + return todo.find('label) +} +``` + +While the first approach is fine for small and simple scenarios but as the scenarios become more complex or longer a more declarative approach can be a lifesaver. + +```js +// The scenario to test that we are able to update the newly created Todo will look like +import {addTodo, getTodoName, updateTodo} from './TodoUtil' + +describe('Todo Application', ()=> { + + const TODO_NAME = 'First todo' + +it('Can update a newly created todo', ()=>{ + addTodo(TODO_NAME) + .then(updateTodo(TODO_NAME + ' new')) + .then(getTodoName) + .should('equal', TODO_NAME + ' new') + + }) +}) + +``` + +The new method `updateTodo` from `TodoUtils.js` looks like + +```js +export const updateTodo = (name) => ($todo) => { + cy.wrap($todo).within(() => { + cy.get('label').dblclick() + cy.get('.edit').clear().type(`${name}{enter}`) + }) + + return cy.wrap($todo) +``` + +## But I still love my Page Objects. + +Most common arguments against Page Objects are + +1. Page Objects introduce additional state in addition to the system state which makes tests hard to understand. +2. Using Page object means that all our tests are going to go through the Application's GUI. +3. Page objects try to fit multiple cases into a uniform interface, falling back to conditional logic. + +While the above arguments are all true in my experience, but biggest problem with Page Objects arise due to Selenium's recommendation that "methods should return Page Objects". + +[Read all recommendations here](https://github.com/SeleniumHQ/selenium/wiki/PageObjects) + +Let's try to look at some common situations we find ourselves in when using Page Objects and how to solve them. + +### Single Responsibility Principle is not met by Page Objects +PO bind unrelated functionality together in one class. e.g. in the below code `searchProduct()` functionality is not related to `login` or `logout` actions. + +```java +public class HomePage { + private final WebDriver webDriver; + + public HomePage(WebDriver webDriver) { + this.webDriver = webDriver; + } + + public SignUpPage signUp() { + webDriver.findElement(By.linkText("Sign up")).click(); + return new SignUpPage(webDriver); + } + + public void logOut() { + webDriver.findElement(By.linkText("Log out")).click(); + } + + public LoginPage logIn() { + webDriver.findElement(By.linkText("Log in")).click(); + return new LoginPage(webDriver); + } + + public ProductPage searchProduct(String product){ + webDriver.findElement(By.linkText(product)).click(); + return new ProductPage(webDriver); + } + +} + +``` + +This means that or class does not follow Single Responsiility Princial (SPR). + +> The **Single Responsibility Principal (SRP)** states that every module or class should have responsibility over a single part of the functionality provided by the software, and that responsibility should be entirely encapsulated by the class. + +Breaking Page Objects like above into multiple smaller Page Objects does not help with `SRP` either. + +e.g. We could take the `Login` action outside the `HomePage` and create a new `LoginPage` object and use it like below. + +```Java +LoginPage loginPage = new HomePage().navigateToLoginPage(); +loginPage.login("username", "password"); +``` + +As the actions belong to 2 different pages, this code will repeat in all test cases that use login. The responsibility is not entirely encapsulated. + +One way of fixing this is be to define our Class/Module not by the page but by the intent. + +```js +// login.js + +export const loginAsCustomer = (name, password) => { +} + +``` +The `loginAsCustomer` method can then work through both the Home and Login screens of the application to complete login as a single user action. + +> :pencil: If possible, prefer basing your modules on user intent rather than basing them strictly by Page. + +### Page Order != User Flows + +Another place where PO may complicate things is when User flows are not be same as the page order. + +Consider the example of a shopping website. Here the user can add an item to the cart either using the Product Page or using the search functionality on the Search page. + +From the Cart page the user maybe taken to either the Home page or the Search page on clicking "continue to shop", depending on if the last item was added using the Product Page or the Search Page. + +The code for the `CartPage` class might now look something like this + +```java +public class CartPage{ + Page continueShopping(){ + if(state) { // determines using which page the last item was added + return new SearchPage(); + } + else { + return new HomePage(); + } +} + +``` + +**Why is this a problem?** + +Not only is this code more complex to understand and we have to maintain additional state, it also makes it harder to modify the CartPage if in future another user flow was introduced. This violates the _Open/Closed principle (OCP)._ + +> **The open/closed principle(OCP)** states “software entities (classes, modules, functions, etc.) should be open for extension, but closed for modification” + +One way to remove the state logic from our `Cart` Module is by changing the `continueShopping` method to not return references to other Classes/Modules and limit it to only clicking the continue shopping link. + +```js +// cart.js + +export const continueShopping(){ + cy.get('#continue').click(); +} + +// Test.js + +it('user can add item to cart from home page post selecting "continue to shop"', (){ + //.... code to add product to the cart from Product Page + cartPage.continueShopping(); + homePage.navigateToProductPage(); + productPage.addItemToCart('item2'); +}) + +it('user can add item to cart from search page post selecting "continue to shop"', (){ + //.... code to add product to the cart using Search + cartPage.continueShopping(); + searchPage.addItemToCart('item'); +}) +``` + +In this example our Test builds user flows just by choosing the right order of calling loosely coupled steps. This means our individual modules did not have to maintain state and state based was removed. + +### Loosely coupled steps + +Need another example of how loosely coupled steps reduce complexity? Consider the below typical `LoginPage` class. + +Business requirement is that on successful login user is taken to "Home Page" and on unsuccessful login use stays on the "Login" page. + +```java +class LoginPage { + HomePage validLogin(String userName,String password){...} + LoginPage invalidLogin(String userName,String password){...} + } +} +``` + +Now let's introduce roles in the mix. An "Admin" on login is taken to the "Admin Dashboard" instead of the Home Page. So we now need to add another method to the LoginPage class and return an instance of the "Admin Dashboard" Page. + +```java +class LoginPage { + HomePage validLogin(String userName,String password){...} + LoginPage invalidLogin(String userName,String password){...} + + AdminDashboardPage adminValidLogin(String userName,String password){...} + } +} +``` + +More roles will mean even more methods because there is a tight coupling between the pages and the return type. + +Let's fix this by not returning references to different pages by the login action. + +```js +// login.js +export default const login = (username, password) => { + cy.get('.username').type(username) + cy.get('.password').type(password) + cy.click('.loginButton') +} +``` + +Our test will now look like + +```js +// Test.js +it('User is taken to Home Page on valid login', ()=> { + login('abc', '12345') + cy.title().should('equal', 'Home Page'); +}) + +it('Admin is taken to Admin Dashboard on valid login', ()=> { + login('admin', '12345') + cy.title().should('equal', 'Admin Dashboard'); +}) + +``` + +Hopefully you can see that preferring Loosely Coupled steps leads to us writing less lines of code with reduced complexity. + +If this did not convince you, We recommend [this pattern](https://applitools.com/blog/page-objects-app-actions-cypress/) for using page objects in Cypress. + +### Addendum (2023) + +Since this article years ago, I have come to some other realizations. + +An abstraction has a cost in readability, sometimes abstraction does more harm than good; selectors growing and being abstracted may be such a case. + +Using data attributes (data-cy + a custom selector command) or testing lib, the need for selectors is less and less; we at work never even worry about them at all. + +Some of those helpers are only used in 1 test file, in that sense they do not need to be imported from another file, or be a command, they don't even need to be abstracted unless the code is being repeated within the spec file. + +Optimizing imports, commands etc. are critical for test startup speed https://www.youtube.com/watch?v=9d7zDR3eyB8 . This means you do not want to overuse commands unless they are being used in many, many places. For that, you might prefer helpers if the code has to be repeated in 2-3 places. The screenshot shows an example of how that might play out in a large app with 1000+ tests. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/bowqrt6upqw3q6n3q3d5.png) + + +================================================ +FILE: blog/Handling Pact Breaking Changes Dynamically in CICD.mkd +================================================ +# Handling Pact Breaking Changes Dynamically in CI/CD + +When working with consumer-driven contract testing using [Pact](https://docs.pact.io/), ensuring compatibility between provider and consumer services can be challenging, especially when introducing breaking changes. Providers often need to make changes that might not be immediately compatible with consumers, and coordinating these changes can be a hassle. To streamline this process, we can leverage environment variables and GitHub Actions to dynamically handle breaking changes in Pact tests with minimal developer intervention. + +In this blog post, I'll walk you through a method to manage breaking changes in Pact verification tests, ensuring a smoother development workflow and reducing the friction often associated with such changes. + +You can find the final code at the repository https://github.com/muratkeremozcan/pact-js-example-provider. It is continuously battle tested and improved; you may see mismatches between the post and the code. + +------ + +## The Problem: Breaking Changes in Pact Verification + +By default, when verifying consumer contracts, we might test against multiple consumer versions: + +- **`matchingBranch`**: Verifies against feature branches that match the provider's branch, facilitating coordinated development. +- **`mainBranch`**: Verifies against the consumer’s `main` branch, which should be stable. +- **`deployedOrReleased`**: Verifies against consumer versions that are currently deployed or released into production. + +However, when a provider introduces a breaking change, these default checks can cause unnecessary failures. Developers want to focus on verifying against the `matchingBranch`, where compatible changes are expected, without being bogged down by failures from the `mainBranch` or `deployedOrReleased` versions. + +------ + +## The Solution: Managing Breaking Changes with Environment Variables + +To address this, we introduce an environment variable `PACT_BREAKING_CHANGE`. When set to `true`, it modifies the verification process to focus solely on the `matchingBranch`, skipping the checks against `mainBranch` and `deployedOrReleased`. This allows developers to proceed with breaking changes without the CI pipeline failing due to incompatibilities with consumer versions that haven't yet been updated. + +### Implementation Details + +#### Verifier Options + +We adjust the verifier options to include or exclude certain consumer version selectors based on the `PACT_BREAKING_CHANGE` variable: + +```typescript +const options: VerifierOptions = { + providerBaseUrl: `http://127.0.0.1:${port}`, + provider: 'MoviesAPI', + publishVerificationResult: true, + providerVersion, + providerVersionBranch: providerBranch, + pactBrokerUrl: process.env.PACT_BROKER_BASE_URL, + pactBrokerToken: process.env.PACT_BROKER_TOKEN, + stateHandlers, + beforeEach, + afterEach, + enablePending: PACT_BREAKING_CHANGE === 'true', +}; + +// Determine which consumer versions to verify against +options.consumerVersionSelectors = [ + { matchingBranch: true }, + ...(includeMainAndDeployed ? [ + { mainBranch: true }, + { deployedOrReleased: true }, + ] : []), + ...(includeAllPacts ? [{ all: true }] : []), +]; +``` + +- **`includeMainAndDeployed`**: Set to `false` when `PACT_BREAKING_CHANGE` is `'true'`, excluding `mainBranch` and `deployedOrReleased` from verification. +- **`enablePending`**: Enables pending pacts when introducing breaking changes, allowing the provider to accept pending pacts without failing the build. +- **`includeAllPacts`**: When set, includes all pacts in the verification process, useful for broader testing during breaking changes. + +#### Building Consumer Version Selectors + +We encapsulate the logic for determining which consumer versions to verify in a helper function: + +```typescript +import type { ConsumerVersionSelector } from '@pact-foundation/pact-core'; + +function buildConsumerVersionSelectors( + consumer: string | undefined, + includeMainAndDeployed: boolean, + includeAllPacts: boolean +): ConsumerVersionSelector[] { + const baseSelector: Partial = consumer ? { consumer } : {}; + + const selectors: ConsumerVersionSelector[] = [ + { ...baseSelector, matchingBranch: true }, + ...(includeMainAndDeployed ? [ + { ...baseSelector, mainBranch: true }, + { ...baseSelector, deployedOrReleased: true }, + ] : []), + ...(includeAllPacts ? [{ ...baseSelector, all: true }] : []), + ]; + + return selectors; +} +``` + +## Integrating with CI/CD + +To make this process seamless in CI, we use GitHub Actions to set the `PACT_BREAKING_CHANGE` environment variable based on the PR description. Developers can indicate a breaking change by including a checkbox in the PR description. This automates the process, eliminating the need for manual intervention. + +### GitHub Actions Workflow + +Here's how we set up the workflow: + +```yml +name: Run contract tests + +on: + pull_request: + types: [opened, synchronize, reopened, edited] + push: + branches: + - main + +jobs: + contract-test: + runs-on: ubuntu-latest + env: + PACT_BROKER_BASE_URL: ${{ secrets.PACT_BROKER_BASE_URL }} + PACT_BROKER_TOKEN: ${{ secrets.PACT_BROKER_TOKEN }} + GITHUB_SHA: ${{ github.sha }} + GITHUB_REF_NAME: ${{ github.ref_name }} + DATABASE_URL: 'file:./dev.db' + PORT: 3001 + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref || github.ref_name }} + fetch-depth: 0 + + - name: Read Node version from .nvmrc + id: node_version + run: echo "NODE_VERSION=$(cat .nvmrc)" >> $GITHUB_ENV + + - uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + + # Set PACT_BREAKING_CHANGE based on PR description during pull_request events + - name: Set PACT_BREAKING_CHANGE based on PR description (PR event) + if: ${{ github.event_name == 'pull_request' }} + uses: actions/github-script@v7 + with: + script: | + const prBody = context.payload.pull_request.body || ''; + if (prBody.includes('[x] Pact breaking change')) { + core.exportVariable('PACT_BREAKING_CHANGE', 'true'); + console.log('PACT_BREAKING_CHANGE set to true based on PR description checkbox.'); + } else { + core.exportVariable('PACT_BREAKING_CHANGE', 'false'); + console.log('PACT_BREAKING_CHANGE remains false.'); + } + + # Set PACT_BREAKING_CHANGE based on merged PR description during push events + - name: Set PACT_BREAKING_CHANGE based on merged PR description (push to main) + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + uses: actions/github-script@v7 + with: + script: | + const commitSha = context.sha; + const { data: prs } = await github.rest.repos.listPullRequestsAssociatedWithCommit({ + owner: context.repo.owner, + repo: context.repo.repo, + commit_sha: commitSha, + }); + const mergedPr = prs.find(pr => pr.merged_at !== null && pr.merge_commit_sha === commitSha); + if (mergedPr) { + const prBody = mergedPr.body || ''; + if (prBody.includes('[x] Pact breaking change')) { + core.exportVariable('PACT_BREAKING_CHANGE', 'true'); + console.log('PACT_BREAKING_CHANGE set to true based on merged PR description checkbox.'); + } else { + core.exportVariable('PACT_BREAKING_CHANGE', 'false'); + console.log('PACT_BREAKING_CHANGE remains false.'); + } + } else { + core.exportVariable('PACT_BREAKING_CHANGE', 'false'); + console.log('No merged PR found for this commit. PACT_BREAKING_CHANGE remains false.'); + } + + - name: Install dependencies + run: npm ci + + - name: Run provider contract tests + run: | + echo "Running provider contract tests with PACT_BREAKING_CHANGE=$PACT_BREAKING_CHANGE" + npm run test:provider-ci + + - name: Can I deploy provider? + if: env.PACT_BREAKING_CHANGE == 'false' + run: npm run can:i:deploy:provider + + - name: Record provider deployment + if: github.ref == 'refs/heads/main' + run: npm run record:provider:deployment --env=dev +``` + +### PR Description Checkbox + +Developers can indicate a breaking change by including the following in their PR description: + +```markdown +### Pact Breaking Change + +- [x] Pact breaking change (check if this PR introduces a breaking change) +``` + +When the checkbox is checked, the `PACT_BREAKING_CHANGE` variable is set to `true` in the CI environment. + +## Handling Verification Failures Due to Breaking Changes + +In scenarios where verification failures occur due to breaking changes, we can adjust our test code to handle these failures gracefully. Here's how we modify the test: + +```typescript +const PACT_BREAKING_CHANGE = process.env.PACT_BREAKING_CHANGE || 'false' +const GITHUB_BRANCH = process.env.GITHUB_BRANCH || 'local' + +describe('Pact Verification', () => { + const port = process.env.PORT || '3001' + const options = buildVerifierOptions({ + provider: 'MoviesAPI', + consumer: process.env.PACT_CONSUMER, // filter by the consumer, or run for all if no env var is provided + includeMainAndDeployed: PACT_BREAKING_CHANGE !== 'true', // if it is a breaking change, set the env var + enablePending: PACT_BREAKING_CHANGE === 'true', + port, + stateHandlers, + beforeEach: () => { + console.log('I run before each test coming from the consumer...') + return Promise.resolve() + }, + afterEach: () => { + console.log('I run after each test coming from the consumer...') + return Promise.resolve() + } + }) + const verifier = new Verifier(options) + + it('should validate the expectations of movie-consumer', async () => { + try { + const output = await verifier.verifyProvider() + console.log('Pact Verification Complete!') + console.log('Result:', output) + } catch (error) { + console.error('Pact Verification Failed:', error) + + if (PACT_BREAKING_CHANGE === 'true' && GITHUB_BRANCH === 'main') { + console.log( + 'Ignoring Pact verification failures due to breaking change on main branch.' + ) + } else { + throw error // Re-throw the error to fail the test + } + } + }) +}) +``` + +- **Error Handling**: We wrap the verification in a try-catch block. If verification fails and `PACT_BREAKING_CHANGE` is `'true'` on the `main` branch, we log a message and prevent the test from failing the CI pipeline. +- **Local Development**: When running tests locally, `PACT_BREAKING_CHANGE` defaults to `'false'`, ensuring that tests fail on verification failures, prompting developers to address issues promptly. + +## Workflow Summary + +This approach allows us to manage breaking changes effectively by: + +- **Automating Environment Variable Setting**: Using GitHub Actions to set `PACT_BREAKING_CHANGE` based on the PR description reduces manual steps. +- **Adjusting Verification Behavior**: Modifying the consumer version selectors dynamically based on `PACT_BREAKING_CHANGE` ensures that we only verify relevant consumer versions during breaking changes. +- **Handling Test Failures Gracefully**: By adjusting the test code to conditionally handle verification failures, we prevent unnecessary CI failures while maintaining test integrity. + +## Breaking Change Flows + +### Consumer Flow + +For consumers adapting to breaking changes: + +```bash +# 1. Update the consumer tests +npm run test:consumer # Execute the updated tests +npm run publish:pact # Publish the updated pact +npm run can:i:deploy:consumer # Check if it's safe to deploy +# Only on main +npm run record:consumer:deployment --env=dev # Record the deployment +``` + +### Provider Flow + +For providers introducing breaking changes: + +```bash +# 1. Create a branch with the breaking change +PACT_BREAKING_CHANGE=true npm run test:provider-ci # Run the provider tests with the breaking change flag +# Note: The can:i:deploy:provider step is skipped because we're introducing a breaking change +# 2. Merge to main +``` + +## Conclusion + +By introducing this environment-driven mechanism, we now have a flexible way to manage breaking changes in Pact tests. This approach allows us to: + +- Maintain confidence in our provider contracts while rolling out changes incrementally. +- Simplify the developer experience by dynamically handling the changes via an env var and a checkbox in the PR description. + +Moreover, the addition of the `buildConsumerVersionSelectors` function significantly DRYs up the code by centralizing the logic for determining which consumer versions to verify. By encapsulating this logic, it not only makes the code more maintainable but also provides a cleaner developer experience when managing consumer filtering and selector behavior. + +This approach can be applied to any project using Pact and provides a smooth, automated way to handle breaking changes without manual intervention. + + +================================================ +FILE: blog/Improve Cypress e2e test latency by a factor of 20!!.mkd +================================================ +# Improve Cypress e2e test latency by a factor of 20!! + +It has been almost 2 years since my friend Gleb Bahmutov published the blog [Fast Cypress spec bundling using ESBuild](https://glebbahmutov.com/blog/fast-esbuild/) and the npm module [cypress-esbuild-preprocessor](https://github.com/bahmutov/cypress-esbuild-preprocessor). He reported incredible gains in test latency when using esbuild as opposed to the Cypress built-in preprocessor that uses Webpack under the hood. In layman terms, test latency is the time it takes to bundle & start a test, the time we see **"Your Tests Are Starting..."** before the execution. + +Looking at [sourcegraph.com](https://sourcegraph.com/) or [GitHub code search](https://cs.github.com/) for a search string `from '@bahmutov/cypress-esbuild-preprocessor'`, at the time of writing we only find a handful of open source repos taking advantage of esbuild with Cypress. We thought we should spread the good word and report on the results at scale about the cost savings in engineering and CI feedback time. + +Any blog post is lackluster without working code, so here is a [PR from scratch](https://github.com/muratkeremozcan/cypress-crud-api-test/pull/229/files) adding esbuild to a repository with Cypress. You can find the final code on the main branch of [the repository we will use in this example](https://github.com/muratkeremozcan/cypress-crud-api-test). Other examples can be found at [tour-of-heroes-react-cypress-ts](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts) as well as a [VueJS app](https://github.com/muratkeremozcan/appsyncmasterclass-frontend). The framework and the bundler the framework uses are irrelevant, any repo can take advantage of [cypress-esbuild-preprocessor](https://github.com/bahmutov/cypress-esbuild-preprocessor) for e2e tests. + +> Check out the cross linked Youtube video at https://www.youtube.com/watch?v=diB6-jikHvk + +> Esbuild preprocessor only applies to Cypress e2e tests. Our component tests use the same bundler our framework is using. We are still looking into practical custom bundler recipes, maybe your app uses webpack but you want to use vite in the component tests, and will update this post if there are any possible improvements to component test latency. + + +- [TL, DR;](#tl-dr) +- [Long version](#long-version) + - [Prerequisite: optimize cypress config for plugins, tasks, commands, e2e, ct](#prerequisite-optimize-cypress-config-for-plugins-tasks-commands-e2e-ct) + - [Step 1: Add the packages](#step-1-add-the-packages) + - [Step 2: Isolate the preprocessor task in its own file \& import into the config file](#step-2-isolate-the-preprocessor-task-in-its-own-file--import-into-the-config-file) + - [Step 3: If there are any compile related issues, wrap them `cy.task()`](#step-3-if-there-are-any-compile-related-issues-wrap-them-cytask) +- [Local feedback duration](#local-feedback-duration) +- [CI feedback duration and cost savings](#ci-feedback-duration-and-cost-savings) +- [Wrap up](#wrap-up) + +## TL, DR; + +* Add the packages + +```bash +yarn add -D @bahmutov/cypress-esbuild-preprocessor esbuild @esbuild-plugins/node-globals-polyfill @esbuild-plugins/node-modules-polyfill +``` + +> Esbuild polyfills may not be necessary in simpler repos, but if they are necessary, in the absence of them you will get cryptic errors. You can toggle these later to see if they are needed + +* Isolate the preprocessor task in its own file: [TS example](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/cypress/support/esbuild-preprocessor.ts), [JS example](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/cypress/support/esbuild-preprocessor.js). Import it into the config file(s): [TS example](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/cypress.config.ts#L22), [JS example](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/cypress/config/local.config.js#L20). + +> This will make it seamless to toggle the esbuild preprocessor at any point in the future, making it easy to isolate webpack vs esbuild compile issues, and to opt out of the workaround later if Cypress makes esbuild the default e2e bundler. + +* If there are any compile related issues, possibly from polyfills not having support for packages intended for Node.js usage, such as [fs or crypto](https://github.com/remorses/esbuild-plugins/blob/d9f6601a24dc4e0470046eda8c772e6523c52b96/node-modules-polyfill/src/polyfills.ts#L142), wrap them in [cy.task](https://docs.cypress.io/api/commands/task#docusaurus_skipToContent_fallback) so that they can be executed in Cypress/browser context. + +> [Here is an externally reproduced blocker and the workaround to it with `cy.task`](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/pull/219/files#diff-f18e5de0df19ff702ed389b87fbaab9ecfa62544389e805f73ad5ffa8d2764e0R12), about `jsonwebtoken` and crypto. `jwt.sign` from `jsonwebtoken` causes [a compile issue](https://github.com/cypress-io/cypress/issues/25533#issuecomment-1434520618), therefore we wrap it in `cy.task`. We will go through another example below so you can see the error and exercise with `cy.task` to solve it. + +## Long version + +### Optional prerequisite: optimize cypress config for plugins, tasks, commands, e2e, ct + +This optimization will help speed up our test warmup time at scale, further simplify our plugin and task configurations. The process is described elaborately in [this video](https://www.youtube.com/watch?v=9d7zDR3eyB8), [the PR](https://github.com/muratkeremozcan/react-cypress-ts-vite-template/pull/55), and the final code is shown in two simple template examples; [CRA-repo](https://github.com/muratkeremozcan/react-cypress-ts-template), [Vite-repo](https://github.com/muratkeremozcan/react-cypress-ts-vite-template). This is the way we wish Cypress came out of the box. + +Here are the main takeaways: + +- `support/commands.ts`, `e2e.ts`, `component.ts`/`tsx` must exist, or they will get created on `cypress open`. + + - `e2e.ts` runs before e2e tests + - `component.ts` runs before component tests. + - `commands.ts` is imported in `e2e.ts` and `component.ts` files, therefore it runs before any kind of test. + - Put commands applicable to both e2e and CT in `commands.ts`. + - Put e2e-only commands in `e2e.ts`, ct-only commands in `component.ts/tsx`. + +- Prefer to import plugins at spec files as opposed to importing them in one of the above files. Only import in the above 3 files if they must be included in every test. I.e. if the plugin must apply to all e2e, import it at `e2e.ts`, if it must apply to all CT, import it at `component.ts`. If it must be everywhere, import it in `commands.ts`. +- Some plugins also have to be included under `setupNodeEvents` function in the `cypress.config` file(s) , for example [cypress-data-session](https://github.com/bahmutov/cypress-data-session#v10) needs this to use the [shareAcrossSpecs](https://github.com/bahmutov/cypress-data-session#shareacrossspecs) option. Isolate all such plugins in one file; [TS example](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/cypress/support/plugins.ts). +- Similar to the previous bullet point, tasks also can be isolated under one file as in this [example](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/cypress/support/tasks.ts#L8). We can enable the tasks with a [one-liner](https://github.com/muratkeremozcan/tour-of-heroes-react-cypress-ts/blob/main/cypress.config.ts#L23) which is particularly useful when we have multiple config files, for example when we have a config per deployment. +- Large imports impact bundle time negatively; for example prefer named imports vs global imports, prefer `en` locale for `fakerJs` vs every locale if you only need English. + +### Step 1: Add the packages + +We will be using a sample repo with only Cypress; [cypress-crud-api-test](https://github.com/muratkeremozcan/cypress-crud-api-test) that has the above prerequisite fulfilled. This just makes esbuild preprocessor easier to bring in to the project, but it is not a requirement. + +Clone the repo `https://github.com/muratkeremozcan/cypress-crud-api-test` and check out the branch `before-esbuild` to start from scratch. You can find the [final PR here](https://github.com/muratkeremozcan/cypress-crud-api-test/pull/229/files). + +`yarn add -D @bahmutov/cypress-esbuild-preprocessor esbuild @esbuild-plugins/node-globals-polyfill @esbuild-plugins/node-modules-polyfill` + +### Step 2: Isolate the preprocessor task in its own file & import into the config file + +Copy this code to `cypress/support/esbuild-preprocessor.ts` + +```typescript +// ./cypress/support/esbuild-preprocessor.ts + +import { NodeGlobalsPolyfillPlugin } from "@esbuild-plugins/node-globals-polyfill"; +import { NodeModulesPolyfillPlugin } from "@esbuild-plugins/node-modules-polyfill"; +const createBundler = require('@bahmutov/cypress-esbuild-preprocessor'); + +export default function tasks(on: Cypress.PluginEvents) { + on( + "file:preprocessor", + createBundler({ + plugins: [ + NodeModulesPolyfillPlugin(), + NodeGlobalsPolyfillPlugin({ + process: true, + buffer: true, + }), + ], + }) + ); +} +``` + +Import the task at the config file. + +> We can comment out the line any time to opt out of esbuild. + +```typescript +// ./cypress.config.ts + +import { defineConfig } from "cypress"; +import plugins from "./cypress/support/plugins"; +import tasks from "./cypress/support/tasks"; +import esbuildPreprocessor from "./cypress/support/esbuild-preprocessor"; // new + +export default defineConfig({ + viewportHeight: 1280, + viewportWidth: 1280, + projectId: "4q6j7j", + + e2e: { + setupNodeEvents(on, config) { + esbuildPreprocessor(on); // new + tasks(on); + return plugins(on, config); + }, + baseUrl: "https://2afo7guwib.execute-api.us-east-1.amazonaws.com/latest", + }, +}); +``` + +### Step 3: If there are any compile related issues, wrap them `cy.task()` + +At this point we are done, because in this repo we do not have any compile issues. We are already wrapping the Node.js native package [`jsonwebtoken`](https://github.com/muratkeremozcan/cypress-crud-api-test/blob/main/scripts/cypress-token.ts) in [`cy.task`](https://github.com/muratkeremozcan/cypress-crud-api-test/blob/main/cypress/support/tasks.ts). + +Let's suppose we were not doing that and reproduce a compile issue you may run into. + +Create a test file `compile-error.cy.ts` + +```typescript +// ./cypress/e2e/compile-error.cy.ts + +import jwt from "jsonwebtoken"; // use version 8.5.1 + +// The jwt.sign method expects the payload as the first argument, +// the secret key as the second argument, +// options (such as expiration time) as the third argument +const newToken = () => + jwt.sign( + { + email: "c", + firstName: "b", + lastName: "c", + accountId: "123", + scope: "orders:order:create orders:order:delete orders:order:update", + }, + "TEST", + { + expiresIn: "10m", + subject: "123", + } + ); + +it("fails", () => { + console.log(newToken()); +}); +``` + +Execute the test and we get a cryptic compile error + +![compile-error](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/wr6dosve74eue1u99bde.png) + +Revert back to the webpack preprocessor by disabling the `eslintPreprocessor` : + +```typescript +import { defineConfig } from "cypress"; +import plugins from "./cypress/support/plugins"; +import tasks from "./cypress/support/tasks"; +// import esbuildPreprocessor from './cypress/support/esbuild-preprocessor' + +export default defineConfig({ + viewportHeight: 1280, + viewportWidth: 1280, + projectId: "4q6j7j", + + e2e: { + setupNodeEvents(on, config) { + // esbuildPreprocessor(on) // DISABLED + tasks(on); + return plugins(on, config); + }, + baseUrl: "https://2afo7guwib.execute-api.us-east-1.amazonaws.com/latest", + }, +}); +``` + +We see that the test takes a few seconds to start(!) but it compiles. We can even see the encrypted token value in the console. + +Enable back the esbuildPreprocessor, and let's work around the issue by wrapping the NodeJs native code in `cy.task`. + +Create a new file `cypress/support/newToken.ts`: + +```typescript +// ./cypress/support/newToken.ts + +import jwt from "jsonwebtoken"; + +const newToken = () => + jwt.sign( + { + email: "c", + firstName: "b", + lastName: "c", + accountId: "123", + scope: "orders:order:create orders:order:delete orders:order:update", + }, + "TEST", + { + expiresIn: "10m", + subject: "123", + } + ); +export default newToken; +``` + +Add the task to `cypress/support/tasks.ts`: + +```typescript +import log from "./log"; +import newToken from "./newToken"; // the new task +import * as token from "../../scripts/cypress-token"; + +export default function tasks(on: Cypress.PluginEvents) { + on("task", { log }); + + on("task", token); + + on("task", { newToken }); // the new task +} +``` + +Use cy.task in the test, and we are green. + +```typescript +// ./cypress/e2e/compile-error.cy.ts +it("fails NOT!", () => { + cy.task("newToken").then(console.log); +}); +``` + +![no-error](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/meauaiobg0qy1vaoo5jl.png) + +This approach has worked really well in multiple external as well internal projects at Extend. Let's look at some results at scale. + +## Local feedback duration + +We demoed some esbuild results on a small project in [this video](https://www.youtube.com/watch?v=Hc_3oLpayOY). At scale, in real world applications, the numbers are even more impressive. Here are some results for local testing with 3 internal applications at Extend. They use lots of plugins and reach over 2 million (it block) executions per year according to Cypress Cloud. + +```markdown +| | plugin optimization | esbuild-preprocessor | test latency improvement | +| ----- | ------------------- | -------------------- | ------------------------------- | +| App A | none | yes | 20sec -> 2 sec, 10x improvement | +| App B | yes | none | 20sec -> 10 sec, 2x improvement | +| App C | yes | yes | 20sec -> 1 sec, 20x improvement | +``` + +The 8 minute video [Improve Cypress e2e test latency by a factor of 20!!](https://studio.youtube.com/video/diB6-jikHvk/edit) demonstrates the results in action. + +Esbuild gave us 10x test latency improvement. The cost was minimal, a factor of [the sample PR here](https://github.com/muratkeremozcan/cypress-crud-api-test/pull/229/files). + +Performing the plugin import optimization (described in [this video](https://www.youtube.com/watch?v=9d7zDR3eyB8)) gave us 2x improvement albeit at the cost of 100s, sometimes 1000s of changes in lines of code. + +Opinion: if you are starting new or if you do not have too many tests, do both optimizations. If you have many tests, and esbuild optimization is satisfactory then skip the plugin optimization. + +## CI feedback duration and cost savings + +Mind that Cypress Cloud only reports on test execution duration, which does not include test latency; "Your Tests Are Loading...". We have to look at CI execution results to see the gain. Any improvement on Cypress Cloud reported test duration is a bonus. + +The following CI results are only for esbuild preprocessor, in this app we already had test plugins and file imports optimized. + +In the before we have 14 parallel machines, each taking around 12.5 minutes: + +![CI-before](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/x1autgxh3nctpr641mxm.png) + +After the esbuild preprocessor improvement, we are saving around 2 minutes per machine which is ~15% improvement in execution time. It also reflects in CI minutes, ~22minutes less in this case. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/qnck6amn2mwu1a8bxbab.png) + +Here is the before view of the test suite in Cypress Cloud. The duration was 6:09. Mind that the graph looks open on the right side because of component test machines starting later. + +![Cy-cloud-before](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/uq6a29ggr035dm0d7cf1.png) + +Here is the after Cypress Cloud view after esbuild preprocessor improvements. Surprisingly test execution speed also came down to 4:28. This means esbuild also effected the test duration by about 20%. The view is in a different scale because of the component tests being parallelized and finishing faster in this run, but we can notice the reduced gap between the green blocks which are the test spec files. They start faster back to back. + +![cy-cloud-after](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/sga88eiwfme2fxi4ozpj.png) + +We should analyze the results together with Cypress Could engineers, perhaps our assumptions are not entirely accurate, though a conservative estimate would be that per CI run we are saving at least 20% feedback time and cost in CI minutes. + +If we look at GitHub workflow runs in the past year in one of the projects, even with very conservative numbers, we can be saving an immense amount of time every year for engineers waiting for their results. Suppose 100k e2e runs happen every year, each saving 2 minutes wait time for the engineer, and ~20ish CI minutes. That's over 100 days of engineering time saved per year, 1000 days of CI minutes. + +![workflow-runs](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/8yoj7ia4g58fxat57v11.png) + +## Wrap up + +Esbuild preprocessor is easy to implement for current Cypress e2e test suites, giving incredible local and CI time & cost savings. + +Plugin and file import tune up is recommended for new projects, or if the cost of refactor is feasible. + +We really want Cypress to make esbuild the norm everywhere. Give your thumbs up to the open feature request https://github.com/cypress-io/cypress/issues/25533 . + +Many thanks to Gleb Bahmutov, Lachlan Miller and all the Cypress engineers making the tool better and more performant. + + +================================================ +FILE: blog/Page Objects vs. Functional Helpers.mkd +================================================ +# Page Objects vs. Functional Helpers + +### **Page Objects vs. Functional Helpers** + +A while ago, I published [Functional Programming Test Patterns with Cypress](https://dev.to/muratkeremozcan/functional-test-patterns-with-cypress-27ed), where I went in-depth on **why page objects are unnecessary in modern test automation**. Back then, I didn’t realize just how **ahead of its time** that take was—since many people still **insist on using page objects today**. + +This post is a **simpler, updated version** of that argument. + +--- + +### **🚀 Why Functional Helpers > Page Objects?** + +The **Page Object Model (POM)** follows **inheritance**, while **functional helpers follow composition**. + +But modern web apps are built with **component-based architecture**, where **components** are the real building blocks—not pages. + +❓ **If components compose and pages are just collections of them, does it really make sense to abstract pages with classes?** Or does it introduce **unnecessary duplication and over-abstraction**? + +In modern testing frameworks like **Playwright** and **Cypress**, strict **Page Object Model (POM)** is often **overkill**, especially when: + +✅ You’re using **data selectors (`data-qa`, `data-cy`)** for stable locators. +✅ The tools already offer **powerful built-in utilities** for UI interactions. +✅ POM introduces **extra complexity that makes debugging harder**. + +--- + +## **❌ Why Page Objects No Longer Make Sense** + +🔙 Why POM Needed Classes in the Past + +It’s worth acknowledging that POM wasn’t originally a bad idea—in the Selenium/WebDriver days, classes were necessary to manage workflows across different pages because test frameworks lacked built-in ways to handle: +• State → A base page maintained shared properties like driver (in Selenium) or page (in Playwright). +• Inheritance → Page classes extended a BasePage to inherit common navigation, helper methods, or interactions. +• Encapsulation → Pages wrapped UI interactions to abstract implementation details and expose only relevant actions. + +At the time, this structure solved real problems—but modern frameworks already provide solutions for these things, making POM mostly redundant. + +For example, Playwright already manages state with context.newPage(), provides a clean API for interactions, and supports direct functional composition without forcing abstraction layers. + +📌 POM was useful when test frameworks were low-level, but today? The problems it solved either no longer exist or are solved better without it. + +### **1️⃣ Unnecessary Abstraction** + +- POM adds an extra **layer** that often **doesn’t provide feasible value**. +- Modern test frameworks are already **powerful enough** without it. + +### **2️⃣ Base Page Inheritance is Overkill** + +- Having a `BasePage` class with generic methods (`click()`, `fill()`) **just to wrap Playwright’s API** (or Cypress) makes no sense. +- Playwright (or Cy) **already** has `page.locator()`, `page.click()`, `page.fill()`, etc. + +### **3️⃣ Harder Debugging** + +- With **POM**, if a test fails, you **have to jump between multiple files** to figure out what went wrong. +- With **direct helper functions**, you **see exactly what’s happening**. + +--- + +## **🔴 Traditional Page Object Model (POM)** + +🚨 **Problems with POM:** +❌ **Unnecessary complexity** → Extra class & inheritance +❌ **Harder debugging** → Need to jump between files +❌ **Wrapping Playwright’s own API for no reason** + +🔹 **Example (`LoginPage.js` - POM Approach)** + +```js +class LoginPage { + constructor(page) { + this.page = page; + this.usernameField = page.locator('[data-testid="username"]'); + this.passwordField = page.locator('[data-testid="password"]'); + this.loginButton = page.locator('[data-testid="login-button"]'); + } + + async login(username, password) { + await this.usernameField.fill(username); + await this.passwordField.fill(password); + await this.loginButton.click(); + } +} + +export default LoginPage; +``` + +🔹 **Usage in a Test** + +```js +import { test, expect } from "@playwright/test"; +import LoginPage from "./LoginPage.js"; + +test("User can log in", async ({ page }) => { + const loginPage = new LoginPage(page); + await loginPage.login("testUser", "password123"); + + await expect(page.locator('[data-testid="welcome-message"]')).toHaveText( + "Welcome, testUser" + ); +}); +``` + +--- + +## **✅ Functional Helper Approach (Better)** + +### **📌 Why is this better?** + +✅ **No extra class** → Directly use Playwright API +✅ **No unnecessary `this.page` assignments** +✅ **Much easier to maintain & debug** + +🔹 **Example (`loginHelpers.js` - Functional Helper Approach)** + +```js +export async function login(page, username, password) { + await page.fill('[data-testid="username"]', username); + await page.fill('[data-testid="password"]', password); + await page.click('[data-testid="login-button"]'); +} +``` + +🔹 **Usage in a Test** + +```js +import { test, expect } from "@playwright/test"; +import { login } from "./loginHelpers.js"; + +test("User can log in", async ({ page }) => { + await login(page, "testUser", "password123"); + + await expect(page.locator('[data-testid="welcome-message"]')).toHaveText( + "Welcome, testUser" + ); +}); +``` + +--- + +## **🔥 Final Thoughts** + +Helper functions are **simpler**, **faster to debug**, and **scale better** in component-driven apps. + +💡 **POM was useful in Selenium/WebDriver days, but today? Just use functions.** + +🔥 **What do you think?** Are you still using POM? Have you already switched to functional helpers? +💬 **Drop a comment below**—I’d love to hear your take on this! + +--- + +## Addendum + +Thanks everyone for their contributions! + +One thing I identified is that Component in testing isn’t used like in UI frameworks. + +For frontend devs, a component is a reusable UI unit with state, props, and lifecycle hooks. + +For QA, a component is often just a logical grouping of elements & interactions within a larger page. + +🔹 ex: A checkout form in testing might represent the entire checkout UI, while in React, it might be broken into FormField, Button, AddressInput. + +🔹 In POM, subcomponents are often instantiated as properties within a page object, mirroring the UI structure but without true reusability. + +While I understand this approach, I don’t fully agree with the terminology. + +I advocate for UI-component-driven testing, as I discuss in my book: https://muratkerem.gitbook.io/cctdd. + +With tools like PW Component Testing, Cy Component Testing & Vitest with UI, we can now test at a lower level, reducing the need for full-page interactions. + +Though still uncommon in QA, this shift solves many POM complexities: + +✅ test components in isolation before e2e +✅ many cases covered at the UI component level +✅ smaller tests = quicker execution + +We only move up the testing pyramid when we must: routing, user flows, backend calls. + +--- + +I see that even the strongest advocates of POM now acknowledge its role has shifted—it’s mainly used to organize and store locators. But with AI-assisted test debugging advancing rapidly, it’s time to rethink this approach entirely. + +Watch this [Playwright demo](https://www.youtube.com/live/NcSk9fOGEac) ~13 min mark by [Debbie O'Brien](https://www.linkedin.com/in/debbie-obrien/) & [Simon Knott](https://www.linkedin.com/in/simon-knott/) + +They showcase an experimental Playwright config that outputs the DOM in a structured way, similar to [aria snapshots](https://playwright.dev/docs/aria-snapshots#aria-snapshots): + +This creates a clean, machine-readable format (like YML) that represents the DOM in a concise way. The AI then analyzes the DOM snapshot, diagnoses the failure, and suggests stable ARIA selectors. + +What does this mean for QA? + +Instead of manually managing locators in a POM abstraction, we should adopt ARIA selectors as the standard for DOM interaction. + +1.ARIA selectors provide a shared understanding between screen readers, test tools like Playwright, and AI. + +2.Debugging can be AI-powered—rather than relying on a static POM file, AI can analyze the ARIA selector tree and suggest better selectors dynamically. + +3.The DOM becomes the source of truth, not a manually maintained POM abstraction + +If the argument is still **“store all selectors in a POM,” I disagree.** The future of test automation is **ARIA-driven**, where the **source code and AI-powered ARIA snapshots** become the **true source of truth**—not a manually curated **selectors file**. + +--- + +This post took off, didn’t it? + +The conversation in the comments has revealed perspectives I initially overlooked. One key takeaway is that many teams still use non-JS/TS languages (Java, C#) for UI testing, meaning they don’t even have the option of functional helpers—they are locked into POM and class-based structures by default. + +I didn’t consider this because, for me, the core unwritten rule is that UI tests should be written in the same language as the source code. If the app is built in JavaScript/TypeScript, it makes sense to test it in JS/TS. + +For teams transitioning from non-JS/TS to modern test frameworks, I now see how this post could feel completely foreign. In that case, the first step isn’t functional helpers vs. POM—it’s reevaluating whether Java/C# are the right tools for UI testing at all. + + +================================================ +FILE: blog/Solving Cross-Execution Issues in Pact Testing with Kafka and Message Queues.mkd +================================================ +# Solving Cross-Execution Issues in Pact Testing with Kafka and Message Queues + +# Solving Cross-Execution Issues in Pact Testing with Kafka and Message Queues + +In our integration tests, Pact, a consumer-driven contract testing tool, has been instrumental in verifying interactions between services. However, when integrating Kafka and message queues, we encountered a perplexing issue where tests were cross-executing between different provider-consumer pairs. In this blog post, we'll delve into the problem we faced, why it was critical to resolve, and how we ultimately solved it. + +You can find the solution in [this PR](https://github.com/muratkeremozcan/pact-js-example-provider/pull/113). + +## The Problem: Cross-Execution of Tests Between Provider-Consumer Pairs + +We had two repositories representing two distinct provider-consumer pairs: + +[Provider (MoviesAPI)](https://github.com/muratkeremozcan/pact-js-example-provider) + +[Consumer (WebConsumer)](https://github.com/muratkeremozcan/pact-js-example-consumer) + +Where The 2 repos have 2 unique contracts between them: + +1. **MoviesAPI ↔ WebConsumer** (HTTP-based interaction) +2. **MoviesAPI-event-producer ↔ WebConsumer-event-consumer** (Kafka message-based interaction) + +Each pair had its own Pact contracts and verification tests. Locally, everything worked flawlessly, in CI PRs as well. + + However, when the consumer triggered a webhook in the provider repository (as part of the CI/CD pipeline), we encountered [a strange issue](https://github.com/muratkeremozcan/pact-js-example-provider/actions/runs/11354888840/job/31583091638): + +- The Kafka test (`provider-kafka.pacttest.ts`), intended for the `MoviesAPI-event-producer` and `WebConsumer-event-consumer`, was inadvertently executing tests meant for the HTTP-based pair (`MoviesAPI` and `WebConsumer`). +- This cross-execution led to test failures and confusion, especially since the tests were running in the context of the wrong provider-consumer pair. + +### Root Cause Analysis + +The crux of the problem was the handling of the `PACT_PAYLOAD_URL` environment variable: + +The `PACT_PAYLOAD_URL` was being set globally, causing both test suites to pick it up, regardless of whether it was relevant to them. + +This is the way things have to be, because during a webhook trigger, which is caused by a `repository_dispatch`, we want an HTTP request from the consumer repo to the provider repo instructing GitHub to start the [webhook action](https://github.com/muratkeremozcan/pact-js-example-provider/blob/main/.github/workflows/webhook.yml). + +A repository_dispatch in GitHub is basically an HTTP request to your GitHub project instructing GitHub to start any action or webhook. In our example, a repository_dispatch with the event type `contract_requiring_verification_published` will trigger the workflow. + +In our local tests and in PRs, PACT_BROKER_BASE_URL is used, and during a web hook trigger it is replaced with PACT_PAYLOAD_URL. This is because we want to verify the newly published contract from the consumer repo PR that caused the trigger. + +> Recall the consumer and provider flow. +> +> The key is that, when there are multiple repos, the provider has to run `test:provider-ci` `(#3)` after the consumer runs `publish:pact` `(#2)` but before the consumer can run `can:i:deploy:consumer` `(#4)` . The trigger to run `test:provider-ci` `(#3)` has to happen automatically, webhooks handle this. +> +> ```bash +> # Consumer +> npm run test:consumer # (1) +> npm run publish:pact # (2) +> npm run can:i:deploy:consumer # (4) +> # only on main +> npm run record:consumer:deployment --env=dev # (5) change the env param as needed +> +> # Provider +> npm run test:provider-ci # (3) triggered by webhooks +> npm run can:i:deploy:provider # (4) +> # only on main +> npm run record:provider:deployment --env=dev # (5) change the env param as needed +> ``` + + + +## Why Was the Problem Worth Solving + +Ensuring that each test suite only executes its intended tests is crucial for several reasons: + +1. **Accuracy**: Tests should only verify the contracts relevant to their specific provider-consumer pair. +2. **Reliability**: Cross-execution can lead to false negatives, causing CI/CD pipelines to fail unnecessarily. +3. **Scalability**: As more provider-consumer pairs are added, the problem would compound, leading to more significant issues down the line. + +By resolving this issue, we aimed to improve the reliability of our test suites, streamline our CI/CD processes, and set a foundation for scalable contract testing. + +## How We Solved It + +To address the issue, we undertook a systematic approach: + +### 1. Refactoring the URL Handling Logic + +We focused on the [`handlePactBrokerUrlAndSelectors`](https://github.com/muratkeremozcan/pact-js-example-provider/pull/113/files#diff-3fe9c15580c38cccec13f1adf95cbb093658c395f41d5fe86f140136ff1cff9eR18) function, which was responsible for configuring the verifier options based on the presence of a `PACT_PAYLOAD_URL`. + +#### Original Issues: + +- **Complexity**: The function had a high cyclomatic complexity, making it hard to read and maintain. +- **Global Variable Usage**: It relied on globally set environment variables, leading to unintended side effects. +- **Lack of Conditional Handling**: There was no check to ensure that the `PACT_PAYLOAD_URL` matched the expected provider and consumer before using it. + +#### Refactored Solution: + +We broke down the `handlePactBrokerUrlAndSelectors` function into smaller, focused functions: + +- [`parseProviderAndConsumerFromUrl`](https://github.com/muratkeremozcan/pact-js-example-provider/pull/113/files#diff-3fe9c15580c38cccec13f1adf95cbb093658c395f41d5fe86f140136ff1cff9eR96): Extracts the provider and consumer names from the `PACT_PAYLOAD_URL`. +- [`processPactPayloadUrl`](https://github.com/muratkeremozcan/pact-js-example-provider/pull/113/files#diff-3fe9c15580c38cccec13f1adf95cbb093658c395f41d5fe86f140136ff1cff9eR53): Determines whether the `PACT_PAYLOAD_URL` should be used based on whether it matches the expected provider and consumer. +- [`usePactPayloadUrl`](https://github.com/muratkeremozcan/pact-js-example-provider/pull/113/files#diff-3fe9c15580c38cccec13f1adf95cbb093658c395f41d5fe86f140136ff1cff9eR119) Configures the verifier options to use the `PACT_PAYLOAD_URL` for verification. +- [`usePactBrokerUrlAndSelectors`](https://github.com/muratkeremozcan/pact-js-example-provider/pull/113/files#diff-3fe9c15580c38cccec13f1adf95cbb093658c395f41d5fe86f140136ff1cff9eR141): Configures the verifier options to use the Pact Broker URL and appropriate consumer version selectors. + +By modularizing the code, we reduced complexity and made it easier to understand and maintain. + +### 2. Implementing Conditional Logic + +We introduced logic to ensure that a test suite only uses the `PACT_PAYLOAD_URL` if it matches its specific provider and consumer: + +```typescript +if (providerMatches && consumerMatches) { + usePactPayloadUrl(pactPayloadUrl, options); + return true; // Indicate that the Pact payload URL was used +} else { + console.log( + `PACT_PAYLOAD_URL does not match the provider (${options.provider}) and consumer (${consumer || 'all'}), ignoring it` + ); +} +``` + +This check prevents a test suite from inadvertently using a `PACT_PAYLOAD_URL` intended for a different provider-consumer pair. + +### 3. Ensuring Proper Fallback + +If the `PACT_PAYLOAD_URL` is not provided or doesn't match, the verifier options are configured to use the Pact Broker URL and consumer version selectors: + +```typescript +// If pactPayloadUrl is not provided or doesn't match, use the Pact Broker URL and selectors +usePactBrokerUrlAndSelectors( + pactBrokerUrl, + consumer, + includeMainAndDeployed, + options +); +``` + +This ensures that the test suite can still run and verify contracts appropriately, even in the absence of a matching `PACT_PAYLOAD_URL`. + +### 4. Updating Test Files + +In our test files (`provider-contract.pacttest.ts` and `provider-kafka.pacttest.ts`), we made sure to specify the correct `provider` and `consumer` when building the verifier options. This information is crucial for the `processPactPayloadUrl` function to correctly determine whether to use the `PACT_PAYLOAD_URL`. + +```typescript +const options = buildVerifierOptions({ + provider: 'MoviesAPI', + consumer: 'WebConsumer', + // ... other options +}) + +/// + +const options = buildMessageVerifierOptions({ + provider: 'MoviesAPI-event-producer', + consumer: 'WebConsumer-event-consumer', +}) +``` + +### 5. Verifying the Solution + +Trigger a push from the consumer repo, and we had a [CI failure at the provider web hook job](https://github.com/muratkeremozcan/pact-js-example-provider/actions/runs/11354888840/job/31583091638). + +vs + +Trigger a push from the consumer repo again, and [no more CI failure]( https://github.com/muratkeremozcan/pact-js-example-provider/actions/runs/11368869071/job/31624971740). + +## Conclusion + +By refactoring our code and introducing conditional logic to handle the `PACT_PAYLOAD_URL` appropriately, we resolved the cross-execution issue in our Pact tests involving Kafka and message queues. This solution not only fixed the immediate problem but also enhanced the maintainability and scalability of our testing framework. + +**Key Takeaways:** + +- **Modular Code Improves Maintainability**: Breaking down complex functions into smaller, focused ones makes the codebase easier to understand and work with. +- **Conditional Logic Prevents Cross-Execution**: Implementing checks to ensure that only the intended tests are executed prevents cross-execution issues. +- **Testing Various Scenarios**: Verifying the solution under different conditions ensures robustness and reliability. + +**Final Thoughts:** + +Addressing this issue was crucial for maintaining the integrity of our testing process. As microservices architectures continue to grow in complexity, having reliable and maintainable testing practices becomes ever more important. We hope that sharing our experience will help others facing similar challenges in their testing journeys. + + +================================================ +FILE: blog/Testing Email-Based Authentication Systems with Cypress, Mailosaur and cypress-data-session.mkd +================================================ +# Testing Email-Based Authentication Systems with Cypress, Mailosaur and cypress-data-session + +These days our inboxes frequently buzz with verification codes, organization invites, or one-time links, each constituting a variant of authentication security. How can we ensure these email processes are robust? In this post we will cover test strategies for email authentication, using an open source application ([link to the repo](https://github.com/muratkeremozcan/prod-ready-serverless)) as well as real life examples of how there could be variants to the same strategy. We will also touch upon how to reduce redundant consumption of such emails using [`cypress-data-session`](https://github.com/bahmutov/cypress-data-session). + +TOC: + +- [Receiving authentication codes - AWS Cognito example](#receiving-authentication-codes---aws-cognito-example) +- [Gleb's email-authentication-code example at Mercari](#glebs-email-authentication-code-example-at-mercari) +- [User invite at Extend \& passwordless-login example](#user-invite-at-extend--passwordless-login-example) +- [Reducing redundant email consumption with `cypress-data-session`](#reducing-redundant-email-consumption-with-cypress-data-session) +- [Sharing the email between it blocks](#sharing-the-email-between-it-blocks) +- [Using one email per machine](#using-one-email-per-machine) +- [Details about the data-session logic (specific to repo example)](#details-about-the-data-session-logic-specific-to-repo-example) +- [Magic links with `cypress-data-session`](#magic-links-with-cypress-data-session) +- [Wrap-up](#wrap-up) + +## Receiving authentication codes - AWS Cognito example + +Our [sample app](https://github.com/muratkeremozcan/prod-ready-serverless) is from Yan Cui's highly acclaimed [Production-Ready Serverless](https://productionreadyserverless.com/) workshop. Yan guides us through building an event driven serverless app in NodeJs with AWS API Gateway, Lambda, Cognito, EventBridge, DDB, and server-side rendering, which uses AWS Cognito for authentication. + +This application showcases an exemplary user registration flow, where the user we create receives an email from AWS Cognito containing a one-time code. This code then grants the user access to the application. The following sign-ins only require an email and a password. Below are the visual steps that should be familiar to all users. + +![img](https://files.cdn.thinkific.com/file_uploads/179095/images/85e/d53/0b1/mod09-002.png) + +![img](https://files.cdn.thinkific.com/file_uploads/179095/images/9fb/92c/4d6/mod09-003.png) + +![img](https://files.cdn.thinkific.com/file_uploads/179095/images/0c8/e1c/b10/mod09-004.png) + +![img](https://files.cdn.thinkific.com/file_uploads/179095/images/f8b/e8b/7fb/mod09-005.png) + +![img](https://files.cdn.thinkific.com/file_uploads/179095/images/1bb/d94/410/mod09-006.png) + +Let us take a look at the automated test at [sign-up-new-user](https://github.com/muratkeremozcan/prod-ready-serverless/blob/main/cypress/e2e/front-end/sign-up-new-user.cy.js). + +We use a utility to randomize a user and visit the app. [The example](https://github.com/muratkeremozcan/prod-ready-serverless/blob/main/cypress/support/generate-random-user.js#L3) uses Chance library, but a FakerJs version is also provided. The only noteworthy item here is the randomized password. If the randomized password is not compliant, we might get 400 errors. + +```js +import {getConfirmationCode} from '../../support/e2e' +import {generateRandomUser} from '../../support/generate-random-user' + +describe('sign up a new user', () => { + it('should register the new user and log in', () => { + const {firstName, lastName, userName, email, password} = generateRandomUser( + Cypress.env('MAILOSAUR_SERVERID'), + ) + + cy.visit('/') + + // ... the rest of the test + }) +}) +``` + +In the next section we drive the UI to fill in the form and register the user. It is important to check the backend response here, as the UI is not sophisticated to show invalid password or user already existing errors. In the real world, we would both verify the backend response and the UI, and possibly test the error/edge cases by stubbing the network via `cy.intercept` preferably in lower level component tests. + +```js + + cy.intercept('POST', 'https://cognito-idp*').as('cognito') + cy.contains('Register').click() + cy.get('#reg-dialog-form').should('be.visible') + cy.get('#first-name').type(firstName, {delay: 0}) + cy.get('#last-name').type(lastName, {delay: 0}) + cy.get('#email').type(email, {delay: 0}) + cy.get('#username').type(userName, {delay: 0}) + cy.get('#password').type(password, {delay: 0}) + cy.contains('button', 'Create an account').click() + cy.wait('@cognito').its('response.statusCode').should('equal', 200) + +``` + +In the next section we check the email with a helper function `getConfirmationCode` which is using [Mailosaur](https://mailosaur.com/)'s Cypress commands (details later), extract the verification code from the email, and continue driving the user until the user is signed in. + +```js +getConfirmationCode(email).then(code => { + cy.get('#verification-code').type(code, {delay: 0}) + cy.contains('button', 'Confirm registration').click() + cy.wait('@cognito') + cy.contains('You are now registered!').should('be.visible') + cy.contains('button', /ok/i).click() + + cy.contains('Sign in').click() + cy.get('#sign-in-username').type(userName, {delay: 0}) + cy.get('#sign-in-password').type(password, {delay: 0}) + cy.contains('button', 'Sign in').click() + cy.wait('@cognito') + + cy.contains('Sign out') +}) +``` + +The interesting part here is receiving the email and extracting the confirmation code from the email via regex. +Receiving the email is near-instant and effortless via [Mailosaur](https://mailosaur.com/). We accomplish that with the command from [cypress-mailosaur](https://github.com/mailosaur/cypress-mailosaur) plugin `cy.mailosaurGetMessage(, {sentTo: })`. + +Remember that the email is just a string with the confirmation code. We can extract the code using a regular expression. + +![img](https://files.cdn.thinkific.com/file_uploads/179095/images/9fb/92c/4d6/mod09-003.png) + +```js +const parseConfirmationCode = str => { + const regex = /Your confirmation code is (\w+)/ + const match = str.match(regex) + + return match ? match[1] : null +} + +export const getConfirmationCode = userEmail => { + return cy + .mailosaurGetMessage(Cypress.env('MAILOSAUR_SERVERID'), { + sentTo: userEmail, + }) + .its('html.body') + .then(parseConfirmationCode) +} +``` + +### Addendum: [Mailosaur](https://mailosaur.com/) makes parsing confirmation codes effortless! + +We found about this later, but when looking at the `html.codes` property, we see that the Mailosaur already extracts the codes for us. That is so convenient because the regex may be different between authentication providers. The previous code becomes so much easier without having to worry about the regex. + +```js +export const getConfirmationCode = userEmail => { + return cy + .mailosaurGetMessage(Cypress.env('MAILOSAUR_SERVERID'), { + sentTo: userEmail, + }) + .its('html.codes.0.value') +} +``` + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/rr33zt9whje59trkdpoj.png) + +## Gleb's email-authentication-code example at Mercari + +In the AWS Cognito example we only needed the verification code so that we can enter it in the UI. + +In my friend [Gleb Bahmutov's blog](https://glebbahmutov.com/blog/minimize-mailosaur-use/), the case study at Mercari is a similar variant, receiving a confirmation code and performing further testing on the email. We randomize a user, fill the form, get the email, and display the email at our test runner, which is a very neat trick because it can allow us to visualize the email as we are executing the test. + +```js +beforeEach(() => { + const userName = 'Joe Bravo' + const serverId = Cypress.env('MAILOSAUR_SERVER_ID') + const randomId = Cypress._.random(1e6) + const userEmail = `user-${randomId}@${serverId}.mailosaur.net` + + // fill the form... + + // get the email + cy.mailosaurGetMessage(serverId, { + sentTo: userEmail, + }) + .then(console.log) + .its('html.body') + // store the HTML under an alias + .as('email) +}) + +beforeEach(function () { + cy.document({ log: false }).invoke({ log: false }, 'write', this.email) +}) +``` + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/yn6nugrt07xlrhvqekfk.png) + +## User invite at [Extend](https://www.extend.com/) & passwordless-login example + +At our company Extend, in one of our applications we have a user being invited by an admin to an organization. Simply, the user receives an email, then follows the link where they create an account and continue to sign on to the app & the org. + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/vobq496okbgvf3fgv55v.png) + +In the test, the interesting part is extracting the href and visiting it. The browser built-in DOMParser Provides the ability to parse XML or HTML source code. + +```ts +/** + * Extracts the href value from an HTML string. + * + * @param {string} htmlString - The HTML string to parse. + * @returns {string|null} The href value, if it exists, otherwise null. + */ +const extractHref = (htmlString: string): string | null => { + const parser = new DOMParser() + const doc = parser.parseFromString(htmlString, 'text/html') + const link = doc.querySelector('#reset-password-link') + return link ? (link as HTMLAnchorElement).href : null +} +``` + +The htmlString is simply the `html.body` we receive in the email. The function chain yields the link that we visit. + +```ts +// as the admin, fill the form with the user's info + +// the user receives the email + cy.mailosaurGetMessage(Cypress.env('MAILOSAUR_SERVERID'), { + sentTo: email, + }) + .its('html.body') + .then(extractHref) + .should('exist') + .then(cy.visit) + +// the user continues creating the account +``` + +In a very similar fashion, in the case of a password-less login, we could receive a magic link in the email, extract and visit it. We fathom even the test code would be the same. + +```ts +// we randomize a user, they receive a magic link + +const extractHref = (htmlString: string): string | null => { + const parser = new DOMParser() + const doc = parser.parseFromString(htmlString, 'text/html') + const link = doc.querySelector('#reset-password-link') + return link ? (link as HTMLAnchorElement).href : null +} + +// we check the email, extract the link and visit it +cy.mailosaurGetMessage(Cypress.env('MAILOSAUR_SERVERID'), { + sentTo: email, +}) + .its('html.body') + .then(extractHref) + .should('exist') + .then(cy.visit) + +// we are logged in to the app +``` + +### Addendum: Mailosaur makes parsing links effortless! + +Similar to not having to parse the regex, we later found out that we do not have to parse the links either. After all, confirmation codes and links are widespread, and Mailosaur already provides this data in the email response. Such convenience! + +```ts +cy.mailosaurGetMessage(Cypress.env('MAILOSAUR_SERVERID'), { + sentTo: email, +}) + .its('html.links.0.href') + .should('exist') + .then(cy.visit) +``` + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/vayhol1af0sadmebig43.png) + +## Reducing redundant email consumption with [`cypress-data-session`](https://github.com/bahmutov/cypress-data-session) + +In the sample repo [prod-ready-serverless](https://github.com/muratkeremozcan/prod-ready-serverless), we are using AWS Cognito which has a [soft limit of 50 emails per day](https://docs.aws.amazon.com/cognito/latest/developerguide/limits.html). Not only there are limits that are not-easy to increase, but also there is a cost to Cognito. Similarly at Extend, we have limits with Okta. In one of the UI apps we have 80 e2e tests and ~500 it blocks, and these can fully execute at a pull request. We may have dozens of commits per day. If every test created a new user via email, even if we had a generous Mailosaur pricing band, email based authentication cannot scale. We would be blocked by our auth provider long before reaching the email limit. Gleb Bahmutov's `cypress-data-session` plugin is the solution. + +## Sharing the email between it blocks + +In Gleb's example, we see data session being utilized across the it blocks in a spec, in order to prevent an email being received for each it block. The random user is created in the beginning, the form is filled, the email is received only once. The return value from the `setup` function is the `html.body` which is auto-aliased to the session name "email". The DOM is populated with the email in each test. + +```js +beforeEach(() => { + cy.dataSession({ + name: 'email', + setup() { + const userName = 'Joe Bravo' + const serverId = Cypress.env('MAILOSAUR_SERVER_ID') + const randomId = Cypress._.random(1e6) + const userEmail = `user-${randomId}@${serverId}.mailosaur.net` + + // fill the form... + + cy.mailosaurGetMessage(serverId, { + sentTo: userEmail, + }) + .its('html.body') + }, + shareAcrossSpecs: true, // the email is reused between the tests + }) +}) + +beforeEach(function () { + cy.document({ log: false }).invoke({ log: false }, 'write', this.email) +}) + +it('shows the code by itself', () => ...) +it('has the confirmation code link', () => ...) +it('has the working code', () => ...) +``` + +![Reusing the same email in all tests](https://glebbahmutov.com/blog/images/minimize-mailosaur-use/m3.png) + +In the `prod-ready-serverless` application -we will share the details of the data-session configuration in the [`registerAndSignIn`](https://github.com/muratkeremozcan/prod-ready-serverless/blob/main/cypress/support/e2e.js#L720) command later- we can contemplate about how a test would look if we would use one email per file. The first test in the suite would go through the complete flow of filling the form, receiving the one time code, using it to sign in. The 2nd test would only sign in. The spec would consume 1 email, similar to the previous example. + +```js +// ./cypress/e2e/front-end/place-order.cy.js +const {generateRandomUser} = require('../../support/generate-random-user') + +describe('place an order', () => { + // randomize a user + const {fullName, userName, email, password} = generateRandomUser( + Cypress.env('MAILOSAUR_SERVERID'), + ) + + beforeEach(() => { + cy.visit('/') + + cy.registerAndSignIn({ + fullName, + userName, + email, + password, + }) + }) + + // the first test registers, receives the email and signs in + it('should place an order', () => { + cy.on('window:alert', cy.stub().as('alert')) + cy.intercept('POST', '**/orders').as('placeOrder') + + cy.get('#restaurantsUl > :nth-child(1)').click() + cy.wait('@placeOrder').its('response.statusCode').should('eq', 200) + cy.get('@alert').should('be.calledOnce') + }) + + // the 2nd test only signs in + it('should do something else', () => { + cy.log('we are logged in with the same user') + }) +}) + +``` + +![Image description](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/lul7r7cgqs0eppbusza4.png) + +## Using one email per machine + +50 emails with Cognito is not a very safe limit. In our repo we want to further reduce the email consumption, and share the random user, therefore the email, between all the tests that execute on that machine. + +The way to accomplish this is to create the random user at the [`cypress.config`](https://github.com/muratkeremozcan/prod-ready-serverless/blob/main/cypress.config.js) file, which executes once per `cy:run` or `cy:open` . We can assign the values as environment variables, and use them as arguments when calling the `registerAndSignIn` command, ensuring the same values are used not only between the it blocks, but the spec files as well. + +```js +const {fullName, userName, email, password} = + generateRandomUser(MAILOSAUR_SERVERID) + +module.exports = defineConfig({ + // ... + env: { + fullName, + userName, + email, + password, + }, + e2e: { + // ... + }, +}) +``` + +We get the same effect at the single test file from before. + +```js +// ./cypress/e2e/front-end/place-order.cy.js + +describe('place an order', () => { + beforeEach(() => { + cy.visit('/') + + cy.registerAndSignIn({ + fullName: Cypress.env('fullName'), + userName: Cypress.env('userName'), + email: Cypress.env('email'), + password: Cypress.env('password'), + }) + }) + + it('should place an order', () => {..}) + + it('should do something else', () => {..}) +}) + +``` + +The key distinction is that other spec files utilize the same user and email as well. + +```js +// cypress/e2e/front-end/sign-up-test-user.cy.js +describe('sign in with test user', () => { + it('should register & log in with the test user', () => { + cy.visit('/') + + cy.registerAndSignIn({ + fullName: Cypress.env('fullName'), + userName: Cypress.env('userName'), + email: Cypress.env('email'), + password: Cypress.env('password'), + }) + }) +}) +``` + +The idea of sharing the test user between different spec files applies to magic-link/passwordless login scenarios as well. Mirroring the above, we would randomize the user & assign the values as environment variables upon launching Cypress, then in the specs we would use the environment variables for the command arguments. + +## Details about the data-session logic (specific to repo example) + +One thing is for certain; with email based authentication, we cannot utilize the built-in [`cy.session`](https://docs.cypress.io/api/commands/session#__docusaurus_skipToContent_fallback) because the email portion of the flow requires more control over the logic compared to a UI login not being repeated. `cypress-data-session` can be applied to login scenarios that `cy.session` would satisfy, but not the other way around. For a comparison of cypress-data-session vs cy.session, take a look at [this repo](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/cypress/support/e2e.js#L68) and [this video](https://www.youtube.com/watch?v=NT-Zjj0fQMQ). + +The way to configure the data-session logic will be different in any app. In Gleb's example, the data-session was focused on the email, receiving one email, rendering it in the DOM and performing assertions on it. In the [`prod-ready-serverless`](https://github.com/muratkeremozcan/prod-ready-serverless/blob/main/cypress.config.js) example we have to satisfy 2 concerns: + +- If it's a new user, we have to go through the registration flow (fill form, receive one time code, login with the code) +- If the user already exists, only sign in + +Let us start breaking apart the functions and build up to the final `registerAndSignIn` command. + +We have a function that simply drives the UI to fill the form for a new user. + +```js +// first part of registration +const fillRegistrationForm = ({fullName, userName, email, password}) => {} +``` + +We have a function to get the confirmation code, using it one time at the UI, yielding out the confirmation code. + +```js +// second part of registration +const confirmRegistration = email => + getConfirmationCode(email).then(confirmationCode => { + cy.intercept('POST', 'https://cognito-idp*').as('cognito') + cy.get('#verification-code').type(confirmationCode, {delay: 0}) + cy.contains('button', 'Confirm registration').click() + cy.wait('@cognito') + cy.contains('You are now registered!').should('be.visible') + cy.contains('button', /ok/i).click() + return cy.wrap(confirmationCode) + }) +} +``` + +We have a function that combines the above two parts for the full registration. + +```js +// the registration flow; the above 2 parts +const register = ({fullName, userName, email, password}) => { + fillRegistrationForm({fullName, userName, email, password}) + return confirmRegistration(email) +}) +``` + +Another function that drives the UI, for sign in. + +```js +// nothing interesting, just driving the UI +const signIn = ({userName, password}) => {} +``` + +The final command makes clear why we had to separate `register` into two. + +Initially we want the full flow of filling out the form, getting the confirmation and signing in; `init()` does the form fill and receives the code, and `recreate()` signs in. + +In subsequent tests, we only want to check if the email & verification code exist, and sign in; `setup()` checks the email & verification code, `recreate()` signs in. + +```js +const registerAndSignIn = ({fullName, userName, email, password}) => + cy.dataSession({ + // unique name of the data session will be the email address. + // With any new email address, the data session will be recreated + name: email, + // initially, we do the full registration flow + init: () => register({fullName, userName, email, password}), + // subsequent tests start from setup, just checking the email + setup: () => confirmRegistration(email), + // recreate runs always, either after init or setup + recreate: () => signIn({userName, password}), + shareAcrossSpecs: true, + }) +Cypress.Commands.add('registerAndSignIn', registerAndSignIn) +``` + +## Magic links with `cypress-data-session` + +How could the above example look for a magic link scenario, where we do not want to keep receiving an email? + +After the setup step where we `visitEmailLink`, we need some logged-in state to be preserved for a of subsequent tests. This state could be local storage, session storage, and or cookies; whatever we need to be logged in after clicking the magic link. + +For instance, in [this example](https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/cypress/support/e2e.js#L65) our setup step logs in an returns the localstorage. That return value is fed-in as the argument to recreate step, and the recreate step rebuilds the localstorage. + +```js +// preserving local storage example from +// https://github.com/muratkeremozcan/appsyncmasterclass-frontend/blob/main/cypress/support/e2e.js#L65 + +Cypress.Commands.add('progLogin', (username, password) => { + cy.then(() => Auth.signIn(username, password)).then(cognitoUser => { + const idToken = cognitoUser.signInUserSession.idToken.jwtToken + const accessToken = cognitoUser.signInUserSession.accessToken.jwtToken + const makeKey = name => + `CognitoIdentityServiceProvider.${cognitoUser.pool.clientId}.${cognitoUser.username}.${name}` + cy.setLocalStorage(makeKey('accessToken'), accessToken) + cy.setLocalStorage(makeKey('idToken'), idToken) + cy.setLocalStorage( + `CognitoIdentityServiceProvider.${cognitoUser.pool.clientId}.LastAuthUser`, + cognitoUser.username, + ) + }) + cy.saveLocalStorage() + // IMPORTANT: preserving the localstorage state + // this could be session storage, or cookies + return cy.visit('/home').then(() => JSON.parse(JSON.stringify(localStorage))) +}) + +Cypress.Commands.add('dataSessionLogin', (email, password) => { + return cy.dataSession({ + name: email, + setup: () => cy.progLogin(email, password), + validate: validateLocalStorage, + // the return of the setup step is yielded as an arg to recreate + // and we rebuild the localstorage + recreate: ls => { + for (const key in ls) { + localStorage[key] = ls[key] + } + cy.visit('/home') + return cy.contains('Home', {timeout: 10000}) + }, + cacheAcrossSpecs: true, + }) +}) +``` + +Here's how the magic link scenario could work: + +```js +const visitEmailLink = (email) => { + // we randomize a user, they receive a magic link + const extractHref = (htmlString: string): string | null => { + const parser = new DOMParser() + const doc = parser.parseFromString(htmlString, 'text/html') + const link = doc.querySelector('#reset-password-link') + return link ? (link as HTMLAnchorElement).href : null + } + + // we check the email, extract the link and visit it + cy.mailosaurGetMessage(Cypress.env('MAILOSAUR_SERVERID'), { + sentTo: email, + }) + .its('html.body') + .then(extractHref) + .should('exist') + .then(cy.visit) + + // IMPORTANT: preserve some state here, and return that + // could be localstorage, sessionstorage, and/or cookies + return cy.then(() => JSON.parse(JSON.stringify(localStorage))) +} + +cy.dataSession({ + // we can again make the unique email the session + name: email, + // something that runs the first time, + // causes an email to be sent + init: () => {..}, + // exract the magic link and visit it + // in this function, make sure to preserve some state + // localstorage, sessionstorage, and/or cookies + setup: () => visitEmailLink(email), + // the return value of the setup gets fed to recreate as an arg + recreate: ls => { + // rebuild the preserved state before visiting + for (const key in ls) { + localStorage[key] = ls[key] + } + cy.visit('/') + } +}) +``` + +## Wrap-up + +We examined the use of Cypress, a powerful testing tool, and [Mailosaur](https://mailosaur.com/), a service designed specifically for testing emails, as a potent combination for testing email based authentication. + +We delved into methods to extract required data from emails, either by using Regular Expressions or directly pulling href values from the email's HTML content. These techniques provide the flexibility to extract almost any information that might be needed from an email for testing purposes. + +Lastly, we looked at how to use `cypress-data-session` to minimize redundant email consumption. This plugin not only makes tests more efficient by eliminating unnecessary email retrievals but also enables sharing of email content within the same test, or even across tests running on the same machine. + +Testing is never an easy task, but with these tools and strategies, even complex systems like email-based authentication can be effectively and efficiently validated. + + +================================================ +FILE: Test_architect_content/Test_architect_course/test design techniques.docx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/Homework 1/Horizon Cloud presentation - Murat .pptx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/Homework 1/Horizon Cloud presentation - Murat .url +================================================ +[{000214A0-0000-0000-C000-000000000046}] +Prop3=19,11 +[InternetShortcut] +IDList= +URL=https://prezi.com/2pnqmdhcdoab/cloud-overall/ +IconFile=https://prezi-a.akamaihd.net/landingdjango-versioned/702-1e8ba438a8f205f33717e862419bef34c734fefd/common/img/favicon.ico?v=2 +IconIndex=1 + + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/Homework 1/Marketplace Input.docx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/Homework 1/Role - 5 Differences.docx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/Homework 3/Horizon Cloud auto pen testing.pptx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/Homework 3/Horizon Cloud BOIC Test Automation.pptx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/phase 1 slides/TeA_WS1-3-1a_RBT-Worksheet_A1.xls +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/README.md +================================================ +This repository is a summary of the notes from Siemens Test Architect certification class. +You may dowload the zip or clone the repository for local access. + + +### Where does Test Architect Fit? +![Process Quality Manager, Test manager, Test Architect](./slides/triumvirate.jpg) + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/TeA_notes.docx +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/Business_Understanding/README.md +================================================ +### The agile process in 4 steps +* Find out where you are +* Take a small step towards your goal +* Adjust your understanding based on what you learned +* Repeat + +### W-Model for Test & Quality - based on System Development Process Model +![W-Model](../slides/W_model.jpg) + +### Uncertainty Metrics +Uncertainty can have opportunities, so long as it does not become a problem. + +![](../slides/problem_vs_uncertainty.jpg) + +### SIPOC +Suppliers, inputs, process, outputs, and customers. +Used by a team to identify all relevant elements of a process improvement project before work begins. + +![SIPOC_1](../slides/SIPOC_1.jpg) +![SIPOC_2](../slides/SIPOC_2.jpg) + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/Requirements_Engineering/README.md +================================================ +# REQUIREMENTS ENGINEERING + +## Sentence Template for System Requirements +Given When Then + +![Sentence Template for System Requirements](../slides/sentenceTemplateSystemRequirements.jpg) + +## Characteristics of Good Requirements + +![Characteristics of Good Requirements](../slides/goodRequirements.jpg) + +## Requirements Traceability +![ Requirements Traceability](../slides/requirementsTraceability.jpg) + +Tracing Usage: + +|Analysis | Description | Process +|---|---|---| +| Derivation | *Why is this here?* | cost-benefit analysis | +| Impact | *What if this changed?* | change management | +| Coverage | *Have I covered all reqs?* | management report | + +## NFRs / Quality Attributes + +[Software Quality Characteristics pdf](./softwareQualityCharacteristics.pdf) + +![ISO IEC 25010 Product Quality Model](../slides/ISO_IEC_25010.jpg) + +### Step 1: Utility Tree +![Utility Tree](../slides/utilityTree.jpg) + +### Step 2: Scenario Description Template / Workflows +To reduce ambiguity and increase testability, each measurable scenario in the Utility Tree gets described with this template. + +![Scenario Description Template](../slides/scenarioDescriptionTemplate.jpg) + +### Step 3: Refine Scenarios +![Scenario Refinement Table](../slides/scenarioRefinement.jpg) + +### Step 4: Implement Design Strategies to support the NFRs +.. and use Design patterns when needed... + +![Design Strategy Template](../slides/designStrategy.jpg) + +![Design Strategy Example](../slides/designStrategyExample.jpg) +![Design Pattern Example](../slides/designPatternExample.jpg) + +## Testing NFRs + +![Testing NFRs](../slides/testingNFRs.jpg) +![NFR Testing Approaches](../slides/NFRtestingApproaches.jpg) + +### KANO Model +For the Product Manager KANO is a tool to find the right mix between the different feature types. +For the Test Architect KANO helps to identify risks. + +![KANO Model](../slides/KANO_model.jpg) + +## Configuration Management +Source Control, configurations, environment... + +**Devops**: continious integration, deployment, testing... + +![Attributes of Configuration Management](../slides/configManagement.jpg) + +## Change Request Management +**Change Request**: any issue that cannot go in a patch or point rev, has to go into the product asap. + +**Change Management**: because requirements are incomplete, erroneous. ambiguous. + +**Defect Management**: because tests reveal defects that need correction. + +Tool -> Feature -> trace & Test + +**CCB**: +![CCB](../slides/CCB.jpg) + + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/slides/RBT-Worksheet.xls +================================================ +[Binary file] + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/Social_Capability/README.md +================================================ +# SOCIAL CAPABILITY + +## Active Listening + +Do not give solution! + +![Active Listening](../slides/activeListening.jpg) + +## Feedback +![I see I feel I wish](../slides/feedback.jpg) + +You can use feedback for blind spots. + +![Johari Window](../slides/johari-window.jpg) + +## Stakeholders +Who's buy-in you need? + +![Stakeholder analysis](../slides/stakeholderAnalysis.jpg) + +## Situational Leadership + +![Knowing Willing Able Allowed](../slides/knowingWillingAbleAllowed.jpg) + +![Situational Leadership](../slides/situationalLeadership.jpg) + +## Conflict Management + +![Getting Past No](../slides/gettingPastNo.jpg) + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/Test_Architecture/README.md +================================================ +# SYSTEMATIC ARCHITECTURE + +What is architecture? +* Abstraction of reality +* Focus on fundamental & critical concepts +* Rationels / Why + +Architecture Design = Creative freedom - Forces: *(func./non-func. reqs + org./biz./processes)* + +# Architecture Design - step by step + +![Architecture Design](../slides/ArchitectureDesign.jpg) + +## Link between TeA and SWA + +![TeA Link to SWA](../slides/TeAlinkSWA.jpg) + +## 1) SWA: Requirements Elicitation - TeA: Identify ASR's + +### Use Case Packages +Each use case is a requirement. + +![Use Case Packages](../slides/UseCasePackages.jpg) + +### Quality Scenarios +* [Requirements Engineering](../Requirements_Engineering/README.md): Given When Then +* NFRs +* Risk based evaluation: [Risk Based Testing](../Testing_&_Quality/README.md) + +![Sentence Template for System Requirements](../slides/sentenceTemplateSystemRequirements.jpg) + +![risk profile](../slides/riskProfile.jpg) + +![Quality Scenarios](../slides/QualityScenarios.jpg) + +### Architectural Significant Requirements (ASR's) + +![ASR's](../slides/ASRs.jpg) + +## 2) SWA: Domain Model - TeA: "Companion" Domain Model +This is not a class diagram, but the **Problem Domain**. **It is a conceptual model that incorporates behavior and data**. + +Question: what is the relation between a Domain Model and a requirement? + > There can be long discussions on requirements – features – use cases – epics – domain models – context diagrams – package diagrams – component diagrams – etc. These are different perspectives/views/notations used to specify something with different intent, purpose, scope, level of detail. + + > * *Requirement* : capability or condition needed by a stakeholder + > * *Domain Model* : conceptual model that incorporates behavior and data + > * *Context diagram* : focuses on what is in/out of scope, boundary between system and environment + + +![Domain Model}](../slides/DomainModel.jpg) + +Your Test System is the "companion" Domain Model, you have to cover all the other elements: +* Human user interaction (simulated or real) +* Communication with other actors (external systems, …) +* Identify capabilities of these elements and design test cases accordingly + +![Test System](../slides/TestSystem.jpg) + + +## 3) SWA: Domain Model Dynamics - TeA: Test System's Model Dynamics +Activity & State Diagrams + +![Domain Model Dynamics](../slides/DomainModelDynamics.jpg) + +## 4) SWA: Determine Scope Boundaries - TeA: Test System's Scope & Boundaries + +### Context Diagram +For the product/ system, clearly define the boundaries: +**What is IN and what is OUT of scope, boundary between system and environment.** + +> Context diagram may look like a use case package diagram but has different intent: +> * context diagram: describes what is in/out of scope, the so-called “context”, the interaction between system and the environment around. +> * use case package diagram: shows packages and relationships between them, but can be more an “internal only” view. + +### Dynamics on Context level +Sequence Diagrams + +![Context Diagram & Dynamics](../slides/ContextDiagram&Dynamics.jpg) + + +## 5) SWA: Conceptual Draft - TeA: Test System's Conceptual Draft +Relations between components. + +![Conceptual Draft](../slides/ConceptualDraft.jpg) + +## 6+) SWA: Structure the baseline Architecture & Introduce Deployment Views - TeA: Refine test architecture & Define test deployment architecture +* Walking Skeleton : most important & highest risk +* Incrementally add use case scenarios + +# Architectural Views & Documentation + +What should be documented? +* Context & boundaries +* UML views of the architecture itself +* Design rationale +* How the architecture addresses FRs & NFRs & cross-cutting concerns + +![Architectural Documentation Stakeholders](../slides/architectureDocStakeholders.jpg) + +Architectural Views: Kruchten, Zachmann... + +4+1 View explained: +* User view: all posibble scenarios the user expects from SUT +* Functional aspects: + * Logical View: how the functionality use cases are modeled. *Component diagrams* + * Implementation/Development View: how the functionality is implemented (source code, libs, executables etc.) *Class diagrams* +* Non-functional aspects: + * Process View: how the artifacts will be executed in terms of concurrency, scalability, synchronization. *Sequence, Activity, State diagrams* + * Deployment View: maps software artifacts to hardware entities and shows the distribution of functionality. *Deployment diagrams* . one view in the 4+1 views by Kruchten, sometimes also called physical view, see [https://en.wikipedia.org/wiki/Deployment_diagram] +![4 + 1 view Kruchten](../slides/krutchen.jpg) + +[Siemens SW Architecture Doc Template](https://wiki.ct.siemens.de/display/MemberSSA/Tools+and+Templates) + +![Siemens SW Architecture Doc Template](../slides/architectureDocTemplate.jpg) + +## Test Architecture Documentation +Driven by Architecture documentation; you must understand & review it as the TeA. + +[ISO/IEC/IEEE 29119-3: Test documentation Overview](https://standards.ieee.org/standard/29119-3-2013.html) +* Part 1: Concepts and Definitions +* Part 2: Test Process +* Part 3: **Test Documentation** +* Part 4: Test Techniques + +**Test Documentation** +* **Organizational test process** + * Test strategy *(test levels, test goals, who does what)* +* **Test management processes** + * Test plan + * Test status + * Test completion report + + *(Test Exit Criteria: test coverage, test progress, defects)* +* **Dynamic test processes** + * Test Designs + * Test case/specs + * Test data + * Test environment + * Test execution log + * Defects + +Test Plan Template + +![Test Plan Template](../slides/testPlanTemplate.jpg) + +# Architecture Quality & Reviews + +Criteria for good architecture (recall NFRs and Quality Characteristics) : +* Reliable +* Maintainable +* Scalable +* Performance +* Security + +![Architecture Quality Reviews](../slides/architectureQualityReviews.jpg) + +**Qualitative Review Phase** +* Prep: clarify review goals / *reviewers* +* Collect: interviews with stakeholders, docs, source *reviewers & stakeholders* +* Elaborate / *reviewers* + * SWOT analysis: Strengths, Weaknesses, Opportunities, Threats + * Dealing with Weaknesses + * ATAM (SWOT alternative:) : Architectural Tradeoff Analysis +* Consolidation : final report / *reviewers* +* Presentation to stakeholders / *reviewers* +* Workshop (optional) *reviewers & stakeholders* + +![Qualitative review toolbox](../slides/qualitativeReviewToolbox.JPG) + +![Error classification](../slides/errorClassification.JPG) + +![Architecture Quality Reviews](../slides/qualitytativeReviews.jpg) + +![Overview Qualitative Architecture Reviews](../slides/overviewQualitativeArchitectureReviews.JPG) + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/Testing_&_Quality/README.md +================================================ +# TESTING & QUALITY + +# Value of Testing +### What is Testing? : investigation of the SUT to provide **information** that results in **improvements**. +### The 5 Dimensions of Testing + +>Coverage: an assessment for the thoroughness or completeness of testing with respect to our test model - *Paul Gerrard* + +![5 dimensions of Testing](../slides/dimensions.jpg) + +### ROI of Testing +![ROI of testing](../slides/roi_of_testing.jpg) + +### Cost of Quality +![Cost_of_quality](../slides/Cost_of_quality.jpg) + +# Test Strategy + +![testStrategy](../slides/testStrategy.jpg) + +Testing serves a purpose (*test mission*) that has goals (*test policy*) +and requires a map (*test strategy*). + +![testPolicy](../slides/testPolicy.jpg) + +### Test levels – V model with architecture testing +![V mdoel with architecture](../slides/v_model_with_architecture_testing.jpg) + +# Risk Based Testing +Risk Profile + +![risk profile](../slides/riskProfile.jpg) + +Risk Based Testing Worksheet. You can download the worksheet in xls . +![RBT Workseet](../slides/RiskBasedTesting.jpg) + + +Relations in RBT worksheet +![RBT summary](../slides/RiskBasedTestingSUMMARY.jpg) + +# Design for Testability + +Goal : Controllable, Observable, Reliable : *Instrinsic Testability* . [More On Heuristics of Testability](http://www.satisfice.com/tools/testability.pdf) + +Why : reduce the cost of testing, diagnosis, maintenance. + +Who : system, software and test architects + +How : TDD, Loose Coupling, Inversion of control, SOLID, follow the best practices of [clean code & architecture](https://clean-code-developer.com/weitere-infos/solid/). + +![Testability](../slides/testability.jpg) + +# Test Exit Criteria +![Test Exit Criteria](../slides/testExitCriteria.jpg) + +# Performance Testing and Scalability +![Performance Testing](../slides/performanceTesting.jpg) + +At a certain load, the response time sky-rockets. + +![Scalability Testing](../slides/scalabilityTesting.jpg) + +# TDD +From Req. to unit test level. +The most effective way of specifying something is to describe how you would test it. + +# Test Design Techniques +[Test Design Techniques pdf](./testDesignTechniques.pdf) +* **Black-box**: req. based, workflow, statistical/markov, eq.class & boundary value, state-based, combinatorial, model based +* **Gray-box**: interfaces between components, services, systems +* **White-box**: statement, branch, path: cyclomatic complexity *(Edges-Nodes-2 = independent paths)* +* **Fault-based**: exploratory, fuzzing, mutation. [Data Type Attacks and Web Tests pdf](./dataTypeAttacks.pdf) +* **Regression**: Risk Based Testing, testing firewall (re-test parts influenced by changes) + +![Test Design Tecniques](../slides/testDesignTechniques.jpg) + +## [Test Automation Patterns website](http://testautomationpatterns.wikispaces.com) +[Test Automation Design Patterns paper](http://testautomationpatterns.wikispaces.com/) + +## Test Environment +**Test environment**: test rig + +**Test infrastructure**: test rig + tools + office network etc. + +**Test suite architecture**: test levels + +![Microservice Test Architecture](../slides/microserviceArchitecture.jpg) + + +# Internal Quality +Negative efects: +* Slows development with unplanned activities +* Rising cost of maintenance, new features, change +* Rising cost of regression testing, system testing for hotfixes +* Rising cost of onboarding +* Complex & risky integration + +![Internal vs External Quality](../slides/internal&ExternalQuality.jpg) + + +## Technical Debt +Lack of internal quality reseults in technical debt. + +![Technical Debt](../slides/technicalDebt.jpg) + +## Measuring and Driving Internal Quality +To measure internal quality +* Static code analysis, linters etc. +* Req. trace +* On-boarding feedback +* Visualize with tools, reviews +* Test gap analysis +* Automated document analysis + +![DSM](../slides/DSM.jpg) + +To drive internal quality, you must monetize it: + +![monetize](../slides/monetize.jpg) + +## Test Code & Architecture Quality Management + +Test Code Quality at different levels: + +| | | | +| --- | --- | --- | +| Micro | code | tools +| Macro | hacky code | review +| Architecture | UML | review, some tools for architecture analysis + +![Test Code Quality](../slides/STCE.jpg) + + +================================================ +FILE: Test_architect_content/Test_architect_course/Training itself/testarchitectnotes/Testing_&_Quality/RBT-Worksheet.xls +================================================ +[Binary file] + diff --git a/src/modules/bmm/testarch/trace-requirements.md b/src/modules/bmm/testarch/trace-requirements.md deleted file mode 100644 index 1a4fae3e..00000000 --- a/src/modules/bmm/testarch/trace-requirements.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# Requirements Traceability v2.0 (Slim) - -```xml - - - Set command_key="*trace" - Load {project-root}/bmad/bmm/testarch/tea-commands.csv and read the matching row - Load {project-root}/bmad/bmm/testarch/tea-knowledge.md emphasising assertions guidance - Use CSV columns preflight, flow_cues, deliverables, halt_rules, notes, knowledge_tags - Split pipe-delimited values into actionable lists - Focus on mapping reality: reference actual files, describe coverage gaps, recommend next steps - - - - Validate prerequisites; halt per halt_rules if unmet - - - Follow flow_cues to map acceptance criteria to implemented tests - Leverage knowledge heuristics to highlight assertion quality and duplication risks - - - Create traceability report described in deliverables - Summarize critical gaps and recommendations - - - - Apply halt_rules from the CSV row - - - Reference notes column for additional emphasis - - - Coverage matrix and narrative summary - - -``` diff --git a/src/modules/bmm/workflows/testarch/README.md b/src/modules/bmm/workflows/testarch/README.md new file mode 100644 index 00000000..51be299d --- /dev/null +++ b/src/modules/bmm/workflows/testarch/README.md @@ -0,0 +1,21 @@ +# Test Architect Workflows + +This directory houses the per-command workflows used by the Test Architect agent (`tea`). Each workflow wraps the standalone instructions that used to live under `testarch/` so they can run through the standard BMAD workflow runner. + +## Available workflows + +- `framework` – scaffolds Playwright/Cypress harnesses. +- `atdd` – generates failing acceptance tests before coding. +- `automate` – expands regression coverage after implementation. +- `ci` – bootstraps CI/CD pipelines aligned with TEA practices. +- `test-design` – combines risk assessment and coverage planning. +- `trace` – maps requirements to implemented automated tests. +- `nfr-assess` – evaluates non-functional requirements. +- `gate` – records the release decision in the gate file. + +Each subdirectory contains: + +- `instructions.md` – the slim workflow instructions. +- `workflow.yaml` – metadata consumed by the BMAD workflow runner. + +The TEA agent now invokes these workflows via `run-workflow` rather than executing instruction files directly. diff --git a/src/modules/bmm/workflows/testarch/atdd/instructions.md b/src/modules/bmm/workflows/testarch/atdd/instructions.md new file mode 100644 index 00000000..c5d0355a --- /dev/null +++ b/src/modules/bmm/workflows/testarch/atdd/instructions.md @@ -0,0 +1,43 @@ + + +# Acceptance TDD v3.0 + +```xml + + + Preflight requirements: + - Story is approved with clear acceptance criteria. + - Development sandbox/environment is ready. + - Framework scaffolding exists (run `*framework` if missing). + + + + Confirm each requirement above; halt if any are missing. + + + Clarify acceptance criteria and affected systems. + Select appropriate test level (E2E/API/Component). + Create failing tests using Given-When-Then with network interception before navigation. + Build data factories and fixture stubs for required entities. + Outline mocks/fixtures infrastructure the dev team must provide. + Generate component tests for critical UI logic. + Compile an implementation checklist mapping each test to code work. + Share failing tests and checklist with the dev agent, maintaining red → green → refactor loop. + + + Output failing acceptance test files, component test stubs, fixture/mocks skeleton, implementation checklist, and data-testid requirements. + + + + If acceptance criteria are ambiguous or the framework is missing, halt and request clarification/set up. + + + Reference `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for heuristics that shape this guidance. + Start red; one assertion per test; keep setup visible (no hidden shared state). + Remind devs to run tests before writing production code; update checklist as tests turn green. + + + Failing acceptance/component test suite plus implementation checklist. + + +``` diff --git a/src/modules/bmm/workflows/testarch/atdd/workflow.yaml b/src/modules/bmm/workflows/testarch/atdd/workflow.yaml new file mode 100644 index 00000000..ce6f930d --- /dev/null +++ b/src/modules/bmm/workflows/testarch/atdd/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: atdd +name: testarch-atdd +description: "Generate failing acceptance tests before implementation." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/atdd" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - atdd + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/automate/instructions.md b/src/modules/bmm/workflows/testarch/automate/instructions.md new file mode 100644 index 00000000..cd1aae95 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/automate/instructions.md @@ -0,0 +1,43 @@ + + +# Automation Expansion v3.0 + +```xml + + + Preflight requirements: + - Acceptance criteria are satisfied. + - Code builds locally without errors. + - Framework scaffolding is configured. + + + + Verify all requirements above; halt if any fail. + + + Review story source/diff to confirm automation targets. + Review quality heuristics from `{project-root}/bmad/bmm/testarch/tea-knowledge.md` before proposing additions. + Ensure fixture architecture exists (Playwright `mergeTests`, Cypress commands); add apiRequest/network/auth/log fixtures if missing. + Map acceptance criteria using `{project-root}/bmad/bmm/testarch/test-levels-framework.md` and avoid duplicate coverage. + Assign priorities using `{project-root}/bmad/bmm/testarch/test-priorities-matrix.md`. + Generate unit/integration/E2E specs (naming `feature-name.spec.ts`) covering happy, negative, and edge paths. + Enforce deterministic waits, self-cleaning factories, and execution under 1.5 minutes per test. + Run the suite, capture Definition of Done results, and update package.json scripts plus README instructions. + + + Create new/enhanced spec files grouped by level, supporting fixtures/helpers, data factory utilities, updated scripts/README notes, and a DoD summary highlighting remaining gaps. + + + + If the automation target is unclear or the framework is missing, halt and request clarification/setup. + + + Never create page objects; keep tests under 300 lines and stateless. + Forbid hard waits/conditional flow; co-locate tests near source. + Flag flaky patterns immediately. + + + Prioritized automation suite updates and DoD summary ready for gating. + + +``` diff --git a/src/modules/bmm/workflows/testarch/automate/workflow.yaml b/src/modules/bmm/workflows/testarch/automate/workflow.yaml new file mode 100644 index 00000000..88995c66 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/automate/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: automate +name: testarch-automate +description: "Expand automation coverage after implementation." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/automate" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - automation + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/ci/instructions.md b/src/modules/bmm/workflows/testarch/ci/instructions.md new file mode 100644 index 00000000..1826485c --- /dev/null +++ b/src/modules/bmm/workflows/testarch/ci/instructions.md @@ -0,0 +1,43 @@ + + +# CI/CD Enablement v3.0 + +```xml + + + Preflight requirements: + - Git repository is initialized. + - Local test suite passes. + - Team agrees on target environments. + - Access to CI platform settings/secrets is available. + + + + Confirm all items above; halt if prerequisites are unmet. + + + Detect CI platform (default GitHub Actions; ask about GitLab/CircleCI/etc.). + Scaffold workflow (e.g., `.github/workflows/test.yml`) with appropriate triggers and caching (Node version from `.nvmrc`, browsers). + Stage jobs sequentially (lint → unit → component → e2e) with matrix parallelization (shard by file, not test). + Add selective execution script(s) for affected tests plus burn-in job rerunning changed specs 3x to catch flakiness. + Attach artifacts on failure (traces/videos/HAR) and configure retries/backoff/concurrency controls. + Document required secrets/environment variables and wire Slack/email notifications; provide local mirror script. + + + Produce workflow file(s), helper scripts (`test-changed`, burn-in), README/ci.md updates, secrets checklist, and any dashboard/badge configuration. + + + + If git repo is absent, tests fail, or CI platform is unspecified, halt and request setup. + + + Reference `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for heuristics that shape this guidance. + Target ~20× speedups via parallel shards and caching; keep jobs under 10 minutes. + Use `wait-on-timeout` ≈120s for app startup; ensure local `npm test` mirrors CI run. + Mention alternative platform paths when not on GitHub. + + + CI pipeline configuration and guidance ready for team adoption. + + +``` diff --git a/src/modules/bmm/workflows/testarch/ci/workflow.yaml b/src/modules/bmm/workflows/testarch/ci/workflow.yaml new file mode 100644 index 00000000..38ebb4f2 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/ci/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: ci +name: testarch-ci +description: "Scaffold or update the CI/CD quality pipeline." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/ci" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - ci-cd + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/framework/instructions.md b/src/modules/bmm/workflows/testarch/framework/instructions.md new file mode 100644 index 00000000..6935ddeb --- /dev/null +++ b/src/modules/bmm/workflows/testarch/framework/instructions.md @@ -0,0 +1,43 @@ + + +# Test Framework Setup v3.0 + +```xml + + + Preflight requirements: + - Confirm `package.json` exists. + - Verify no modern E2E harness is already configured. + - Have architectural/stack context available. + + + + Validate each preflight requirement; stop immediately if any fail. + + + Identify framework stack from `package.json` (React/Vue/Angular/Next.js) and bundler (Vite/Webpack/Rollup/esbuild). + Select Playwright for large/perf-critical repos, Cypress for small DX-first teams. + Create folders `{framework}/tests/`, `{framework}/support/fixtures/`, `{framework}/support/helpers/`. + Configure timeouts (action 15s, navigation 30s, test 60s) and reporters (HTML + JUnit). + Generate `.env.example` with `TEST_ENV`, `BASE_URL`, `API_URL` plus `.nvmrc`. + Implement pure function → fixture → `mergeTests` pattern and faker-based data factories. + Enable failure-only screenshots/videos and document setup in README. + + + Produce Playwright/Cypress scaffold (config + support tree), `.env.example`, `.nvmrc`, seed tests, and README instructions. + + + + If prerequisites fail or an existing harness is detected, halt and notify the user. + + + Reference `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for heuristics that shape this guidance. + Playwright: take advantage of worker parallelism, trace viewer, multi-language support. + Cypress: avoid when dependent API chains are heavy; consider component testing (Vitest/Cypress CT). + Contract testing: suggest Pact for microservices; always recommend data-cy/data-testid selectors. + + + Scaffolded framework assets and summary of what was created. + + +``` diff --git a/src/modules/bmm/workflows/testarch/framework/workflow.yaml b/src/modules/bmm/workflows/testarch/framework/workflow.yaml new file mode 100644 index 00000000..d2517cce --- /dev/null +++ b/src/modules/bmm/workflows/testarch/framework/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: framework +name: testarch-framework +description: "Initialize or refresh the test framework harness." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/framework" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - setup + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/gate/instructions.md b/src/modules/bmm/workflows/testarch/gate/instructions.md new file mode 100644 index 00000000..dabbc6f6 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/gate/instructions.md @@ -0,0 +1,39 @@ + + +# Quality Gate v3.0 + +```xml + + + Preflight requirements: + - Latest assessments (risk/test design, trace, automation, NFR) are available. + - Team has consensus on fixes/mitigations. + + + + Gather required assessments and confirm consensus; halt if information is stale or missing. + + + Assemble story metadata (id, title, links) for the gate file. + Apply deterministic rules: PASS (all critical issues resolved), CONCERNS (minor residual risk), FAIL (critical blockers), WAIVED (business-approved waiver). + Document rationale, residual risks, owners, due dates, and waiver details where applicable. + + + Update gate YAML with schema fields (story info, status, rationale, waiver, top issues, risk summary, recommendations, NFR validation, history). + Provide summary message for the team highlighting decision and next steps. + + + + If reviews are incomplete or risk data is outdated, halt and request the necessary reruns. + + + Reference `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for heuristics that shape this guidance. + FAIL whenever unresolved P0 risks/tests or security issues remain. + CONCERNS when mitigations are planned but residual risk exists; WAIVED requires reason, approver, and expiry. + Maintain audit trail in the history section. + + + Gate YAML entry and communication summary documenting the decision. + + +``` diff --git a/src/modules/bmm/workflows/testarch/gate/workflow.yaml b/src/modules/bmm/workflows/testarch/gate/workflow.yaml new file mode 100644 index 00000000..97bf8bba --- /dev/null +++ b/src/modules/bmm/workflows/testarch/gate/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: gate +name: testarch-gate +description: "Record the quality gate decision for the story." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/gate" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - gate + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/nfr-assess/instructions.md b/src/modules/bmm/workflows/testarch/nfr-assess/instructions.md new file mode 100644 index 00000000..70cd12e8 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/nfr-assess/instructions.md @@ -0,0 +1,39 @@ + + +# NFR Assessment v3.0 + +```xml + + + Preflight requirements: + - Implementation is deployed locally or accessible for evaluation. + - Non-functional goals/SLAs are defined or discoverable. + + + + Confirm prerequisites; halt if targets are unknown and cannot be clarified. + + + Identify which NFRs to assess (default: Security, Performance, Reliability, Maintainability). + Gather thresholds from story/architecture/technical preferences; mark unknown targets. + Inspect evidence (tests, telemetry, logs) for each NFR and classify status using deterministic PASS/CONCERNS/FAIL rules. + List quick wins and recommended actions for any concerns/failures. + + + Produce NFR assessment markdown summarizing evidence, status, and actions; update gate YAML block with NFR findings; compile checklist of evidence gaps and owners. + + + + If NFR targets are undefined and cannot be obtained, halt and request definition. + + + Reference `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for heuristics that shape this guidance. + Unknown thresholds default to CONCERNS—never guess. + Ensure every NFR has evidence or call it out explicitly. + Suggest monitoring hooks and fail-fast mechanisms when gaps exist. + + + NFR assessment report with actionable follow-ups and gate snippet. + + +``` diff --git a/src/modules/bmm/workflows/testarch/nfr-assess/workflow.yaml b/src/modules/bmm/workflows/testarch/nfr-assess/workflow.yaml new file mode 100644 index 00000000..baaea8e7 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/nfr-assess/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: nfr-assess +name: testarch-nfr +description: "Assess non-functional requirements before release." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/nfr-assess" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - nfr + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/test-design/instructions.md b/src/modules/bmm/workflows/testarch/test-design/instructions.md new file mode 100644 index 00000000..ee659674 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/test-design/instructions.md @@ -0,0 +1,43 @@ + + +# Risk & Test Design v3.1 + +```xml + + + Preflight requirements: + - Story markdown, acceptance criteria, PRD/architecture context are available. + + + + Confirm inputs; halt if any are missing or unclear. + + + Consult `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for the latest risk heuristics before scoring. + Filter requirements to isolate genuine risks; review PRD/architecture/story for unresolved gaps. + Classify risks across TECH, SEC, PERF, DATA, BUS, OPS; request clarification when evidence is missing. + Score probability (1 unlikely, 2 possible, 3 likely) and impact (1 minor, 2 degraded, 3 critical); compute totals and highlight scores ≥6. + Plan mitigations with owners, timelines, and update residual risk expectations. + + + Break acceptance criteria into atomic scenarios tied to mitigations. + Choose test levels using `{project-root}/bmad/bmm/testarch/test-levels-framework.md` and avoid duplicate coverage (prefer lower levels when possible). + Assign priorities using `{project-root}/bmad/bmm/testarch/test-priorities-matrix.md`; outline data/tooling prerequisites and execution order. + + + Create risk assessment markdown (category/probability/impact/score) with mitigation matrix and gate snippet totals. + Produce coverage matrix (requirement/level/priority/mitigation) plus recommended execution order. + + + + If story data or criteria are missing, halt and request them. + + + Category definitions: TECH=architecture flaws; SEC=missing controls; PERF=SLA risk; DATA=loss/corruption; BUS=user/business harm; OPS=deployment/run failures. + Rely on evidence, not speculation; tie scenarios back to mitigations; keep scenarios independent and maintainable. + + + Unified risk assessment and coverage strategy ready for implementation. + + +``` diff --git a/src/modules/bmm/workflows/testarch/test-design/workflow.yaml b/src/modules/bmm/workflows/testarch/test-design/workflow.yaml new file mode 100644 index 00000000..4d1b8346 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/test-design/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: test-design +name: testarch-plan +description: "Plan risk mitigation and test coverage before development." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/test-design" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - planning + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true diff --git a/src/modules/bmm/workflows/testarch/trace/instructions.md b/src/modules/bmm/workflows/testarch/trace/instructions.md new file mode 100644 index 00000000..aa97d8c8 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/trace/instructions.md @@ -0,0 +1,39 @@ + + +# Requirements Traceability v3.0 + +```xml + + + Preflight requirements: + - Story has implemented tests (or acknowledge gaps). + - Access to source code and specifications is available. + + + + Confirm prerequisites; halt if tests or specs are unavailable. + + + Gather acceptance criteria and implemented tests. + Map each criterion to concrete tests (file + describe/it) using Given-When-Then narrative. + Classify coverage status as FULL, PARTIAL, NONE, UNIT-ONLY, or INTEGRATION-ONLY. + Flag severity based on priority (P0 gaps are critical) and recommend additional tests or refactors. + Build gate YAML coverage summary reflecting totals and gaps. + + + Generate traceability report under `docs/qa/assessments`, a coverage matrix per criterion, and gate YAML snippet capturing totals/gaps. + + + + If story lacks implemented tests, pause and advise running `*atdd` or writing tests before tracing. + + + Reference `{project-root}/bmad/bmm/testarch/tea-knowledge.md` for heuristics that shape this guidance. + Coverage definitions: FULL=all scenarios validated, PARTIAL=some coverage, NONE=no validation, UNIT-ONLY=missing higher-level validation, INTEGRATION-ONLY=lacks lower-level confidence. + Ensure assertions stay explicit and avoid duplicate coverage. + + + Traceability matrix and gate snippet ready for review. + + +``` diff --git a/src/modules/bmm/workflows/testarch/trace/workflow.yaml b/src/modules/bmm/workflows/testarch/trace/workflow.yaml new file mode 100644 index 00000000..95758c43 --- /dev/null +++ b/src/modules/bmm/workflows/testarch/trace/workflow.yaml @@ -0,0 +1,25 @@ +# Test Architect workflow: trace +name: testarch-trace +description: "Trace requirements to implemented automated tests." +author: "BMad" + +config_source: "{project-root}/bmad/bmm/config.yaml" +output_folder: "{config_source}:output_folder" +user_name: "{config_source}:user_name" +communication_language: "{config_source}:communication_language" +date: system-generated + +installed_path: "{project-root}/bmad/bmm/workflows/testarch/trace" +instructions: "{installed_path}/instructions.md" + +template: false + +tags: + - qa + - traceability + - test-architect + +execution_hints: + interactive: false + autonomous: true + iterative: true