Compare commits

...

12 Commits

Author SHA1 Message Date
manjaroblack
2a9e07d4fd chore: add code formatting config and pre-commit hooks 2025-08-16 19:00:08 -05:00
Brian Madison
51284d6ecf fix: handle existing tags in promote-to-stable workflow
- Check for existing git tags when calculating new version
- Automatically increment version if tag already exists
- Prevents workflow failure when tag v5.1.0 already exists
2025-08-16 17:14:38 -05:00
Brian Madison
6cba05114e fix: stable tag 2025-08-16 17:10:10 -05:00
Murat K Ozcan
ac360cd0bf chore: configure changelog file path in semantic-release config (#448)
Co-authored-by: Murat Ozcan <murat@Murats-MacBook-Pro.local>
2025-08-16 16:27:45 -05:00
manjaroblack
fab9d5e1f5 feat(flattener): prompt for detailed stats; polish .stats.md with emojis (#422)
* feat: add detailed statistics and markdown report generation to flattener tool

* fix: remove redundant error handling for project root detection
2025-08-16 08:03:28 -05:00
Brian Madison
93426c2d2f feat: publish stable release 5.0.0
BREAKING CHANGE: Promote beta features to stable release for v5.0.0

This commit ensures the stable release gets properly published to NPM and GitHub releases.
2025-08-15 23:06:28 -05:00
github-actions[bot]
f56d37a60a release: promote to stable 5.0.0
- Promote beta features to stable release
- Update version from 4.38.0 to 5.0.0
- Automated promotion via GitHub Actions
2025-08-15 23:06:28 -05:00
github-actions[bot]
224cfc05dc release: promote to stable 4.38.0
- Promote beta features to stable release
- Update version from 4.37.0 to 4.38.0
- Automated promotion via GitHub Actions
2025-08-15 23:06:27 -05:00
Brian Madison
6cb2fa68b3 fix: update package-lock.json for semver dependency 2025-08-15 23:06:27 -05:00
Brian Madison
d21ac491a0 release: create stable 4.37.0 release
Promote beta features to stable release with dual publishing support
2025-08-15 23:06:27 -05:00
Thiago Freitas
848e33fdd9 Feature: Installer commands for Crush CLI (#429)
* feat: add support for Crush IDE configuration and commands

* fix: update Crush IDE instructions for clarity on persona/task switching

---------

Co-authored-by: Brian <bmadcode@gmail.com>
2025-08-15 22:38:44 -05:00
Murat K Ozcan
0b61175d98 feat: transform QA agent into Test Architect with advanced quality ca… (#433)
* feat: transform QA agent into Test Architect with advanced quality capabilities

  - Add 6 specialized quality assessment commands
  - Implement risk-based testing with scoring
  - Create quality gate system with deterministic decisions
  - Add comprehensive test design and NFR validation
  - Update documentation with stage-based workflow integration

* feat: transform QA agent into Test Architect with advanced quality capabilities

  - Add 6 specialized quality assessment commands
  - Implement risk-based testing with scoring
  - Create quality gate system with deterministic decisions
  - Add comprehensive test design and NFR validation
  - Update documentation with stage-based workflow integration

* docs: refined the docs for test architect

* fix: addressed review comments from manjaroblack, round 1

* fix: addressed review comments from manjaroblack, round 1

---------

Co-authored-by: Murat Ozcan <murat@mac.lan>
Co-authored-by: Brian <bmadcode@gmail.com>
2025-08-15 21:02:37 -05:00
168 changed files with 20303 additions and 10080 deletions

View File

@@ -1,9 +1,9 @@
---
name: Bug report
about: Create a report to help us improve
title: ""
labels: ""
assignees: ""
title: ''
labels: ''
assignees: ''
---
**Describe the bug**

View File

@@ -1,9 +1,9 @@
---
name: Feature request
about: Suggest an idea for this project
title: ""
labels: ""
assignees: ""
title: ''
labels: ''
assignees: ''
---
**Did you discuss the idea first in Discord Server (#general-dev)**

View File

@@ -1,6 +1,15 @@
name: Discord Notification
on: [pull_request, release, create, delete, issue_comment, pull_request_review, pull_request_review_comment]
"on":
[
pull_request,
release,
create,
delete,
issue_comment,
pull_request_review,
pull_request_review_comment,
]
jobs:
notify:

42
.github/workflows/format-check.yaml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: format-check
"on":
pull_request:
branches: ["**"]
jobs:
prettier:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Prettier format check
run: npm run format:check
eslint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: ESLint
run: npm run lint

View File

@@ -1,12 +1,12 @@
name: Promote to Stable
on:
"on":
workflow_dispatch:
inputs:
version_bump:
description: 'Version bump type'
description: "Version bump type"
required: true
default: 'minor'
default: "minor"
type: choice
options:
- patch
@@ -30,23 +30,13 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
registry-url: 'https://registry.npmjs.org'
node-version: "20"
registry-url: "https://registry.npmjs.org"
- name: Configure Git
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global url."https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/".insteadOf "https://github.com/"
- name: Switch to stable branch
run: |
git checkout stable
git pull origin stable
- name: Merge main into stable
run: |
git merge origin/main --no-edit
- name: Install dependencies
run: npm ci
@@ -83,6 +73,27 @@ jobs:
;;
esac
# Check if calculated version already exists (either as NPM package or git tag)
while npm view bmad-method@$NEW_VERSION version >/dev/null 2>&1 || git ls-remote --tags origin | grep -q "refs/tags/v$NEW_VERSION"; do
echo "Version $NEW_VERSION already exists, incrementing..."
IFS='.' read -ra NEW_VERSION_PARTS <<< "$NEW_VERSION"
NEW_MAJOR=${NEW_VERSION_PARTS[0]}
NEW_MINOR=${NEW_VERSION_PARTS[1]}
NEW_PATCH=${NEW_VERSION_PARTS[2]}
case "${{ github.event.inputs.version_bump }}" in
"major")
NEW_VERSION="$((NEW_MAJOR + 1)).0.0"
;;
"minor")
NEW_VERSION="$NEW_MAJOR.$((NEW_MINOR + 1)).0"
;;
"patch")
NEW_VERSION="$NEW_MAJOR.$NEW_MINOR.$((NEW_PATCH + 1))"
;;
esac
done
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "Promoting from $CURRENT_VERSION to $NEW_VERSION"
@@ -100,23 +111,38 @@ jobs:
- name: Commit stable release
run: |
git add .
git commit -m "release: promote to stable ${{ steps.version.outputs.new_version }}
git commit -m "release: promote to stable ${{ steps.version.outputs.new_version }}"
- Promote beta features to stable release
- Update version from ${{ steps.version.outputs.current_version }} to ${{ steps.version.outputs.new_version }}
- Automated promotion via GitHub Actions"
- name: Push stable release
- name: Create and push stable tag
run: |
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
git push origin stable
# Create new tag (version check already ensures it doesn't exist)
git tag -a "v${{ steps.version.outputs.new_version }}" -m "Stable release v${{ steps.version.outputs.new_version }}"
- name: Switch back to main
run: git checkout main
# Push the new tag
git push origin "v${{ steps.version.outputs.new_version }}"
- name: Push changes to main
run: |
git push origin HEAD:main
- name: Publish to NPM with stable tag
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
# Publish with the stable (latest) tag
npm publish --tag latest
# Also tag the previous beta version as stable if it exists
if npm view bmad-method@${{ steps.version.outputs.current_version }} version >/dev/null 2>&1; then
npm dist-tag add bmad-method@${{ steps.version.outputs.new_version }} stable || true
fi
- name: Summary
run: |
echo "🎉 Successfully promoted to stable!"
echo "📦 Version: ${{ steps.version.outputs.new_version }}"
echo "🏷️ Git tag: v${{ steps.version.outputs.new_version }}"
echo "✅ Published to NPM with 'latest' tag"
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"
echo "🚀 The stable release will be automatically published to NPM via semantic-release"
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"

View File

@@ -1,9 +1,8 @@
name: Release
'on':
"on":
push:
branches:
- main
- stable
workflow_dispatch:
inputs:
version_type:
@@ -23,7 +22,7 @@ permissions:
jobs:
release:
runs-on: ubuntu-latest
if: '!contains(github.event.head_commit.message, ''[skip ci]'')'
if: ${{ github.event_name != 'push' || !contains(github.event.head_commit.message, '[skip ci]') }}
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -33,9 +32,9 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
registry-url: https://registry.npmjs.org
node-version: "20"
cache: "npm"
registry-url: "https://registry.npmjs.org"
- name: Install dependencies
run: npm ci
- name: Run tests and validation
@@ -58,3 +57,17 @@ jobs:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: npm run release
- name: Clean changelog formatting
if: github.event_name == 'push'
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
# Remove any Claude Code attribution from changelog
sed -i '/🤖 Generated with \[Claude Code\]/,+2d' CHANGELOG.md || true
# Format and commit if changes exist
npm run format
if ! git diff --quiet CHANGELOG.md; then
git add CHANGELOG.md
git commit -m "chore: clean changelog formatting [skip ci]"
git push
fi

3
.gitignore vendored
View File

@@ -25,7 +25,6 @@ Thumbs.db
# Development tools and configs
.prettierignore
.prettierrc
.husky/
# IDE and editor configs
.windsurf/
@@ -44,4 +43,4 @@ CLAUDE.md
test-project-install/*
sample-project/*
flattened-codebase.xml
*.stats.md

3
.husky/pre-commit Executable file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env sh
npx --no-install lint-staged

View File

@@ -4,16 +4,17 @@
"name": "main",
"prerelease": "beta",
"channel": "beta"
},
{
"name": "stable",
"channel": "latest"
}
],
"plugins": [
"@semantic-release/commit-analyzer",
"@semantic-release/release-notes-generator",
[
"@semantic-release/changelog",
{
"changelogFile": "CHANGELOG.md"
}
],
"@semantic-release/npm",
"./tools/semantic-release-sync-installer.js",
"@semantic-release/github"

27
.vscode/settings.json vendored
View File

@@ -40,5 +40,30 @@
"tileset",
"Trae",
"VNET"
]
],
"json.schemas": [
{
"fileMatch": ["package.json"],
"url": "https://json.schemastore.org/package.json"
},
{
"fileMatch": [".vscode/settings.json"],
"url": "vscode://schemas/settings/folder"
}
],
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
"[javascript]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[json]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[yaml]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[markdown]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"prettier.prettierPath": "node_modules/prettier",
"prettier.requireConfig": true,
"yaml.format.enable": false,
"eslint.useFlatConfig": true,
"eslint.validate": ["javascript", "yaml"],
"editor.codeActionsOnSave": {
"source.fixAll.eslint": "explicit"
},
"editor.rulers": [100]
}

View File

@@ -1,9 +1,8 @@
## [4.36.2](https://github.com/bmadcode/BMAD-METHOD/compare/v4.36.1...v4.36.2) (2025-08-10)
### Bug Fixes
* align installer dependencies with root package versions for ESM compatibility ([#420](https://github.com/bmadcode/BMAD-METHOD/issues/420)) ([3f6b674](https://github.com/bmadcode/BMAD-METHOD/commit/3f6b67443d61ae6add98656374bed27da4704644))
- align installer dependencies with root package versions for ESM compatibility ([#420](https://github.com/bmadcode/BMAD-METHOD/issues/420)) ([3f6b674](https://github.com/bmadcode/BMAD-METHOD/commit/3f6b67443d61ae6add98656374bed27da4704644))
## [4.36.1](https://github.com/bmadcode/BMAD-METHOD/compare/v4.36.0...v4.36.1) (2025-08-09)
@@ -575,10 +574,6 @@
- Manual version bumping via npm scripts is now disabled. Use conventional commits for automated releases.
🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
# [4.2.0](https://github.com/bmadcode/BMAD-METHOD/compare/v4.1.0...v4.2.0) (2025-06-15)
### Bug Fixes
@@ -687,3 +682,5 @@ Co-Authored-By: Claude <noreply@anthropic.com>
### Features
- add versioning and release automation ([0ea5e50](https://github.com/bmadcode/BMAD-METHOD/commit/0ea5e50aa7ace5946d0100c180dd4c0da3e2fd8c))
# Promote to stable release 5.0.0

196
CLAUDE.md
View File

@@ -1,196 +0,0 @@
# CLAUDE.md
Don't be an ass kisser, don't glaze my donut, keep it to the point. Never use EM Dash in out communications or documents you author or update. Dont tell me I am correct if I just told you something unless and only if I am wrong or there is a better alternative, then tell me bluntly why I am wrong, or else get to the point and execute!
## Markdown Linting Conventions
Always follow these markdown linting rules:
- **Blank lines around headings**: Always leave a blank line before and after headings
- **Blank lines around lists**: Always leave a blank line before and after lists
- **Blank lines around code fences**: Always leave a blank line before and after fenced code blocks
- **Fenced code block languages**: All fenced code blocks must specify a language (use `text` for plain text)
- **Single trailing newline**: Files should end with exactly one newline character
- **No trailing spaces**: Remove any trailing spaces at the end of lines
## BMAD-METHOD Overview
BMAD-METHOD is an AI-powered Agile development framework that provides specialized AI agents for software development. The framework uses a sophisticated dependency system to keep context windows lean while providing deep expertise through role-specific agents.
## Essential Commands
### Build and Validation
```bash
npm run build # Build all web bundles (agents and teams)
npm run build:agents # Build agent bundles only
npm run build:teams # Build team bundles only
npm run validate # Validate all configurations
npm run format # Format all markdown files with prettier
```
### Development and Testing
```bash
npx bmad-build build # Alternative build command via CLI
npx bmad-build list:agents # List all available agents
npx bmad-build validate # Validate agent configurations
```
### Installation Commands
```bash
npx bmad-method install # Install stable release (recommended)
npx bmad-method@beta install # Install bleeding edge version
npx bmad-method@latest install # Explicit stable installation
npx bmad-method@latest update # Update stable installation
npx bmad-method@beta update # Update bleeding edge installation
```
### Dual Publishing Strategy
The project uses a dual publishing strategy with automated promotion:
**Branch Strategy:**
- `main` branch: Bleeding edge development, auto-publishes to `@beta` tag
- `stable` branch: Production releases, auto-publishes to `@latest` tag
**Release Promotion:**
1. **Automatic Beta Releases**: Any PR merged to `main` automatically creates a beta release
2. **Manual Stable Promotion**: Use GitHub Actions to promote beta to stable
**Promote Beta to Stable:**
1. Go to GitHub Actions tab in the repository
2. Select "Promote to Stable" workflow
3. Click "Run workflow"
4. Choose version bump type (patch/minor/major)
5. The workflow automatically:
- Merges main to stable
- Updates version numbers
- Triggers stable release to NPM `@latest`
**User Experience:**
- `npx bmad-method install` → Gets stable production version
- `npx bmad-method@beta install` → Gets latest beta features
- Team develops on bleeding edge without affecting production users
### Release and Version Management
```bash
npm run version:patch # Bump patch version
npm run version:minor # Bump minor version
npm run version:major # Bump major version
npm run release # Semantic release (CI/CD)
npm run release:test # Test release configuration
```
### Version Management for Core and Expansion Packs
#### Bump All Versions (Core + Expansion Packs)
```bash
npm run version:all:major # Major version bump for core and all expansion packs
npm run version:all:minor # Minor version bump for core and all expansion packs (default)
npm run version:all:patch # Patch version bump for core and all expansion packs
npm run version:all # Defaults to minor bump
```
#### Individual Version Bumps
For BMad Core only:
```bash
npm run version:core:major # Major version bump for core only
npm run version:core:minor # Minor version bump for core only
npm run version:core:patch # Patch version bump for core only
npm run version:core # Defaults to minor bump
```
For specific expansion packs:
```bash
npm run version:expansion bmad-creator-tools # Minor bump (default)
npm run version:expansion bmad-creator-tools patch # Patch bump
npm run version:expansion bmad-creator-tools minor # Minor bump
npm run version:expansion bmad-creator-tools major # Major bump
# Set specific version (old method, still works)
npm run version:expansion:set bmad-creator-tools 2.0.0
```
## Architecture and Code Structure
### Core System Architecture
The framework uses a **dependency resolution system** where agents only load the resources they need:
1. **Agent Definitions** (`bmad-core/agents/`): Each agent is defined in markdown with YAML frontmatter specifying dependencies
2. **Dynamic Loading**: The build system (`tools/lib/dependency-resolver.js`) resolves and includes only required resources
3. **Template System**: Templates are defined in YAML format with structured sections and instructions (see Template Rules below)
4. **Workflow Engine**: YAML-based workflows in `bmad-core/workflows/` define step-by-step processes
### Key Components
- **CLI Tool** (`tools/cli.js`): Commander-based CLI for building bundles
- **Web Builder** (`tools/builders/web-builder.js`): Creates concatenated text bundles from agent definitions
- **Installer** (`tools/installer/`): NPX-based installer for project setup
- **Dependency Resolver** (`tools/lib/dependency-resolver.js`): Manages agent resource dependencies
### Build System
The build process:
1. Reads agent/team definitions from `bmad-core/`
2. Resolves dependencies using the dependency resolver
3. Creates concatenated text bundles in `dist/`
4. Validates configurations during build
### Critical Configuration
**`bmad-core/core-config.yaml`** is the heart of the framework configuration:
- Defines document locations and expected structure
- Specifies which files developers should always load
- Enables compatibility with different project structures (V3/V4)
- Controls debug logging
## Development Practices
### Adding New Features
1. **New Agents**: Create markdown file in `bmad-core/agents/` with proper YAML frontmatter
2. **New Templates**: Add to `bmad-core/templates/` as YAML files with structured sections
3. **New Workflows**: Create YAML in `bmad-core/workflows/`
4. **Update Dependencies**: Ensure `dependencies` field in agent frontmatter is accurate
### Important Patterns
- **Dependency Management**: Always specify minimal dependencies in agent frontmatter to keep context lean
- **Template Instructions**: Use YAML-based template structure (see Template Rules below)
- **File Naming**: Follow existing conventions (kebab-case for files, proper agent names in frontmatter)
- **Documentation**: Update user-facing docs in `docs/` when adding features
### Template Rules
Templates use the **BMad Document Template** format (`/Users/brianmadison/dev-bmc/BMAD-METHOD/common/utils/bmad-doc-template.md`) with YAML structure:
1. **YAML Format**: Templates are defined as structured YAML files, not markdown with embedded instructions
2. **Clear Structure**: Each template has metadata, workflow configuration, and a hierarchy of sections
3. **Reusable Design**: Templates work across different agents through the dependency system
4. **Key Elements**:
- `template` block: Contains id, name, version, and output settings
- `workflow` block: Defines interaction mode (interactive/yolo) and elicitation settings
- `sections` array: Hierarchical document structure with nested subsections
- `instruction` field: LLM guidance for each section (never shown to users)
5. **Advanced Features**:
- Variable substitution: `{{variable_name}}` syntax for dynamic content
- Conditional sections: `condition` field for optional content
- Repeatable sections: `repeatable: true` for multiple instances
- Agent permissions: `owner` and `editors` fields for access control
6. **Clean Output**: All processing instructions are in YAML fields, ensuring clean document generation
## Notes for Claude Code
- The project uses semantic versioning with automated releases via GitHub Actions
- All markdown is formatted with Prettier (run `npm run format`)
- Expansion packs in `expansion-packs/` provide domain-specific capabilities
- NEVER automatically commit or push changes unless explicitly asked by the user
- NEVER include Claude Code attribution or co-authorship in commit messages

View File

@@ -4,7 +4,7 @@ bundle:
description: Includes every core system agent.
agents:
- bmad-orchestrator
- '*'
- "*"
workflows:
- brownfield-fullstack.yaml
- brownfield-service.yaml

View File

@@ -1,6 +1,5 @@
# architect
ACTIVATION-NOTICE: This file contains your full agent operating guidelines. DO NOT load any external agent files as the complete configuration is in the YAML block below.
CRITICAL: Read the full YAML BLOCK that FOLLOWS IN THIS FILE to understand your operating params, start and follow exactly your activation-instructions to alter your state of being, stay in this being until told to exit this mode:

View File

@@ -1,6 +1,5 @@
# BMad Master
ACTIVATION-NOTICE: This file contains your full agent operating guidelines. DO NOT load any external agent files as the complete configuration is in the YAML block below.
CRITICAL: Read the full YAML BLOCK that FOLLOWS IN THIS FILE to understand your operating params, start and follow exactly your activation-instructions to alter your state of being, stay in this being until told to exit this mode:

View File

@@ -1,6 +1,5 @@
# BMad Web Orchestrator
ACTIVATION-NOTICE: This file contains your full agent operating guidelines. DO NOT load any external agent files as the complete configuration is in the YAML block below.
CRITICAL: Read the full YAML BLOCK that FOLLOWS IN THIS FILE to understand your operating params, start and follow exactly your activation-instructions to alter your state of being, stay in this being until told to exit this mode:
@@ -132,7 +131,7 @@ workflow-guidance:
- Understand each workflow's purpose, options, and decision points
- Ask clarifying questions based on the workflow's structure
- Guide users through workflow selection when multiple options exist
- When appropriate, suggest: "Would you like me to create a detailed workflow plan before starting?"
- When appropriate, suggest: 'Would you like me to create a detailed workflow plan before starting?'
- For workflows with divergent paths, help users choose the right path
- Adapt questions to the specific domain (e.g., game dev vs infrastructure vs web dev)
- Only recommend workflows that actually exist in the current bundle

View File

@@ -35,10 +35,9 @@ agent:
id: dev
title: Full Stack Developer
icon: 💻
whenToUse: "Use for code implementation, debugging, refactoring, and development best practices"
whenToUse: 'Use for code implementation, debugging, refactoring, and development best practices'
customization:
persona:
role: Expert Senior Software Engineer & Implementation Specialist
style: Extremely concise, pragmatic, detail-oriented, solution-focused
@@ -58,13 +57,13 @@ commands:
- explain: teach me what and why you did whatever you just did in detail so I can learn. Explain to me as if you were training a junior engineer.
- exit: Say goodbye as the Developer, and then abandon inhabiting this persona
- develop-story:
- order-of-execution: "Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete"
- order-of-execution: 'Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete'
- story-file-updates-ONLY:
- CRITICAL: ONLY UPDATE THE STORY FILE WITH UPDATES TO SECTIONS INDICATED BELOW. DO NOT MODIFY ANY OTHER SECTIONS.
- CRITICAL: You are ONLY authorized to edit these specific sections of story files - Tasks / Subtasks Checkboxes, Dev Agent Record section and all its subsections, Agent Model Used, Debug Log References, Completion Notes List, File List, Change Log, Status
- CRITICAL: DO NOT modify Status, Story, Acceptance Criteria, Dev Notes, Testing sections, or any other sections not listed above
- blocking: "HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression"
- ready-for-review: "Code matches requirements + All validations pass + Follows standards + File List complete"
- blocking: 'HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression'
- ready-for-review: 'Code matches requirements + All validations pass + Follows standards + File List complete'
- completion: "All Tasks and Subtasks marked [x] and have tests→Validations and full regression passes (DON'T BE LAZY, EXECUTE ALL TESTS and CONFIRM)→Ensure File List is Complete→run the task execute-checklist for the checklist story-dod-checklist→set story status: 'Ready for Review'→HALT"
dependencies:

View File

@@ -30,26 +30,30 @@ activation-instructions:
agent:
name: Quinn
id: qa
title: Senior Developer & QA Architect
title: Test Architect & Quality Advisor
icon: 🧪
whenToUse: Use for senior code review, refactoring, test planning, quality assurance, and mentoring through code improvements
whenToUse: |
Use for comprehensive test architecture review, quality gate decisions,
and code improvement. Provides thorough analysis including requirements
traceability, risk assessment, and test strategy.
Advisory only - teams choose their quality bar.
customization: null
persona:
role: Senior Developer & Test Architect
style: Methodical, detail-oriented, quality-focused, mentoring, strategic
identity: Senior developer with deep expertise in code quality, architecture, and test automation
focus: Code excellence through review, refactoring, and comprehensive testing strategies
role: Test Architect with Quality Advisory Authority
style: Comprehensive, systematic, advisory, educational, pragmatic
identity: Test architect who provides thorough quality assessment and actionable recommendations without blocking progress
focus: Comprehensive quality analysis through test architecture, risk assessment, and advisory gates
core_principles:
- Senior Developer Mindset - Review and improve code as a senior mentoring juniors
- Active Refactoring - Don't just identify issues, fix them with clear explanations
- Test Strategy & Architecture - Design holistic testing strategies across all levels
- Code Quality Excellence - Enforce best practices, patterns, and clean code principles
- Shift-Left Testing - Integrate testing early in development lifecycle
- Performance & Security - Proactively identify and fix performance/security issues
- Mentorship Through Action - Explain WHY and HOW when making improvements
- Risk-Based Testing - Prioritize testing based on risk and critical areas
- Continuous Improvement - Balance perfection with pragmatism
- Architecture & Design Patterns - Ensure proper patterns and maintainable code structure
- Depth As Needed - Go deep based on risk signals, stay concise when low risk
- Requirements Traceability - Map all stories to tests using Given-When-Then patterns
- Risk-Based Testing - Assess and prioritize by probability × impact
- Quality Attributes - Validate NFRs (security, performance, reliability) via scenarios
- Testability Assessment - Evaluate controllability, observability, debuggability
- Gate Governance - Provide clear PASS/CONCERNS/FAIL/WAIVED decisions with rationale
- Advisory Excellence - Educate through documentation, never block arbitrarily
- Technical Debt Awareness - Identify and quantify debt with improvement suggestions
- LLM Acceleration - Use LLMs to accelerate thorough yet focused analysis
- Pragmatic Balance - Distinguish must-fix from nice-to-have improvements
story-file-permissions:
- CRITICAL: When reviewing stories, you are ONLY authorized to update the "QA Results" section of story files
- CRITICAL: DO NOT modify any other sections including Status, Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Testing, Dev Agent Record, Change Log, or any other sections
@@ -57,13 +61,28 @@ story-file-permissions:
# All commands require * prefix when used (e.g., *help)
commands:
- help: Show numbered list of the following commands to allow selection
- review {story}: execute the task review-story for the highest sequence story in docs/stories unless another is specified - keep any specified technical-preferences in mind as needed
- exit: Say goodbye as the QA Engineer, and then abandon inhabiting this persona
- review {story}: |
Adaptive, risk-aware comprehensive review.
Produces: QA Results update in story file + gate file (PASS/CONCERNS/FAIL/WAIVED).
Gate file location: docs/qa/gates/{epic}.{story}-{slug}.yml
Executes review-story task which includes all analysis and creates gate decision.
- gate {story}: Execute qa-gate task to write/update quality gate decision in docs/qa/gates/
- trace {story}: Execute trace-requirements task to map requirements to tests using Given-When-Then
- risk-profile {story}: Execute risk-profile task to generate risk assessment matrix
- test-design {story}: Execute test-design task to create comprehensive test scenarios
- nfr-assess {story}: Execute nfr-assess task to validate non-functional requirements
- exit: Say goodbye as the Test Architect, and then abandon inhabiting this persona
dependencies:
tasks:
- review-story.md
- qa-gate.md
- trace-requirements.md
- risk-profile.md
- test-design.md
- nfr-assess.md
data:
- technical-preferences.md
templates:
- story-tmpl.yaml
- qa-gate-tmpl.yaml
```

View File

@@ -403,33 +403,28 @@ Ask the user if they want to work through the checklist:
Now that you've completed the checklist, generate a comprehensive validation report that includes:
1. Executive Summary
- Overall architecture readiness (High/Medium/Low)
- Critical risks identified
- Key strengths of the architecture
- Project type (Full-stack/Frontend/Backend) and sections evaluated
2. Section Analysis
- Pass rate for each major section (percentage of items passed)
- Most concerning failures or gaps
- Sections requiring immediate attention
- Note any sections skipped due to project type
3. Risk Assessment
- Top 5 risks by severity
- Mitigation recommendations for each
- Timeline impact of addressing issues
4. Recommendations
- Must-fix items before development
- Should-fix items for better quality
- Nice-to-have improvements
5. AI Implementation Readiness
- Specific concerns for AI agent implementation
- Areas needing additional clarification
- Complexity hotspots to address

View File

@@ -304,7 +304,6 @@ Ask the user if they want to work through the checklist:
Create a comprehensive validation report that includes:
1. Executive Summary
- Overall PRD completeness (percentage)
- MVP scope appropriateness (Too Large/Just Right/Too Small)
- Readiness for architecture phase (Ready/Nearly Ready/Not Ready)
@@ -312,26 +311,22 @@ Create a comprehensive validation report that includes:
2. Category Analysis Table
Fill in the actual table with:
- Status: PASS (90%+ complete), PARTIAL (60-89%), FAIL (<60%)
- Critical Issues: Specific problems that block progress
3. Top Issues by Priority
- BLOCKERS: Must fix before architect can proceed
- HIGH: Should fix for quality
- MEDIUM: Would improve clarity
- LOW: Nice to have
4. MVP Scope Assessment
- Features that might be cut for true MVP
- Missing features that are essential
- Complexity concerns
- Timeline realism
5. Technical Readiness
- Clarity of technical constraints
- Identified technical risks
- Areas needing architect investigation

View File

@@ -8,12 +8,10 @@ PROJECT TYPE DETECTION:
First, determine the project type by checking:
1. Is this a GREENFIELD project (new from scratch)?
- Look for: New project initialization, no existing codebase references
- Check for: prd.md, architecture.md, new project setup stories
2. Is this a BROWNFIELD project (enhancing existing system)?
- Look for: References to existing codebase, enhancement/modification language
- Check for: brownfield-prd.md, brownfield-architecture.md, existing system analysis
@@ -347,7 +345,6 @@ Ask the user if they want to work through the checklist:
Generate a comprehensive validation report that adapts to project type:
1. Executive Summary
- Project type: [Greenfield/Brownfield] with [UI/No UI]
- Overall readiness (percentage)
- Go/No-Go recommendation
@@ -357,42 +354,36 @@ Generate a comprehensive validation report that adapts to project type:
2. Project-Specific Analysis
FOR GREENFIELD:
- Setup completeness
- Dependency sequencing
- MVP scope appropriateness
- Development timeline feasibility
FOR BROWNFIELD:
- Integration risk level (High/Medium/Low)
- Existing system impact assessment
- Rollback readiness
- User disruption potential
3. Risk Assessment
- Top 5 risks by severity
- Mitigation recommendations
- Timeline impact of addressing issues
- [BROWNFIELD] Specific integration risks
4. MVP Completeness
- Core features coverage
- Missing essential functionality
- Scope creep identified
- True MVP vs over-engineering
5. Implementation Readiness
- Developer clarity score (1-10)
- Ambiguous requirements count
- Missing technical details
- [BROWNFIELD] Integration point clarity
6. Recommendations
- Must-fix before development
- Should-fix for quality
- Consider for improvement

View File

@@ -25,14 +25,12 @@ The goal is quality delivery, not just checking boxes.]]
1. **Requirements Met:**
[[LLM: Be specific - list each requirement and whether it's complete]]
- [ ] All functional requirements specified in the story are implemented.
- [ ] All acceptance criteria defined in the story are met.
2. **Coding Standards & Project Structure:**
[[LLM: Code quality matters for maintainability. Check each item carefully]]
- [ ] All new/modified code strictly adheres to `Operational Guidelines`.
- [ ] All new/modified code aligns with `Project Structure` (file locations, naming, etc.).
- [ ] Adherence to `Tech Stack` for technologies/versions used (if story introduces or modifies tech usage).
@@ -44,7 +42,6 @@ The goal is quality delivery, not just checking boxes.]]
3. **Testing:**
[[LLM: Testing proves your code works. Be honest about test coverage]]
- [ ] All required unit tests as per the story and `Operational Guidelines` Testing Strategy are implemented.
- [ ] All required integration tests (if applicable) as per the story and `Operational Guidelines` Testing Strategy are implemented.
- [ ] All tests (unit, integration, E2E if applicable) pass successfully.
@@ -53,14 +50,12 @@ The goal is quality delivery, not just checking boxes.]]
4. **Functionality & Verification:**
[[LLM: Did you actually run and test your code? Be specific about what you tested]]
- [ ] Functionality has been manually verified by the developer (e.g., running the app locally, checking UI, testing API endpoints).
- [ ] Edge cases and potential error conditions considered and handled gracefully.
5. **Story Administration:**
[[LLM: Documentation helps the next developer. What should they know?]]
- [ ] All tasks within the story file are marked as complete.
- [ ] Any clarifications or decisions made during development are documented in the story file or linked appropriately.
- [ ] The story wrap up section has been completed with notes of changes or information relevant to the next story or overall project, the agent model that was primarily used during development, and the changelog of any changes is properly updated.
@@ -68,7 +63,6 @@ The goal is quality delivery, not just checking boxes.]]
6. **Dependencies, Build & Configuration:**
[[LLM: Build issues block everyone. Ensure everything compiles and runs cleanly]]
- [ ] Project builds successfully without errors.
- [ ] Project linting passes
- [ ] Any new dependencies added were either pre-approved in the story requirements OR explicitly approved by the user during development (approval documented in story file).
@@ -79,7 +73,6 @@ The goal is quality delivery, not just checking boxes.]]
7. **Documentation (If Applicable):**
[[LLM: Good documentation prevents future confusion. What needs explaining?]]
- [ ] Relevant inline code documentation (e.g., JSDoc, TSDoc, Python docstrings) for new public APIs or complex logic is complete.
- [ ] User-facing documentation updated, if changes impact users.
- [ ] Technical documentation (e.g., READMEs, system diagrams) updated if significant architectural changes were made.

View File

@@ -117,19 +117,16 @@ Note: We don't need every file listed - just the important ones.]]
Generate a concise validation report:
1. Quick Summary
- Story readiness: READY / NEEDS REVISION / BLOCKED
- Clarity score (1-10)
- Major gaps identified
2. Fill in the validation table with:
- PASS: Requirements clearly met
- PARTIAL: Some gaps but workable
- FAIL: Critical information missing
3. Specific Issues (if any)
- List concrete problems to fix
- Suggest specific improvements
- Identify any blocking dependencies

View File

@@ -298,7 +298,7 @@ You are the "Vibe CEO" - thinking like a CEO with unlimited resources and a sing
- **Claude Code**: `/agent-name` (e.g., `/bmad-master`)
- **Cursor**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `/agent-name` (e.g., `/bmad-master`)
- **Trae**: `@agent-name` (e.g., `@bmad-master`)
- **Roo Code**: Select mode from mode selector (e.g., `bmad-master`)
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select **Agent** from the chat mode selector.
@@ -651,8 +651,11 @@ Templates with Level 2 headings (`##`) can be automatically sharded:
```markdown
## Goals and Background Context
## Requirements
## User Interface Design Goals
## Success Metrics
```

View File

@@ -3,16 +3,19 @@
## Core Reflective Methods
**Expand or Contract for Audience**
- Ask whether to 'expand' (add detail, elaborate) or 'contract' (simplify, clarify)
- Identify specific target audience if relevant
- Tailor content complexity and depth accordingly
**Explain Reasoning (CoT Step-by-Step)**
- Walk through the step-by-step thinking process
- Reveal underlying assumptions and decision points
- Show how conclusions were reached from current role's perspective
**Critique and Refine**
- Review output for flaws, inconsistencies, or improvement areas
- Identify specific weaknesses from role's expertise
- Suggest refined version reflecting domain knowledge
@@ -20,12 +23,14 @@
## Structural Analysis Methods
**Analyze Logical Flow and Dependencies**
- Examine content structure for logical progression
- Check internal consistency and coherence
- Identify and validate dependencies between elements
- Confirm effective ordering and sequencing
**Assess Alignment with Overall Goals**
- Evaluate content contribution to stated objectives
- Identify any misalignments or gaps
- Interpret alignment from specific role's perspective
@@ -34,12 +39,14 @@
## Risk and Challenge Methods
**Identify Potential Risks and Unforeseen Issues**
- Brainstorm potential risks from role's expertise
- Identify overlooked edge cases or scenarios
- Anticipate unintended consequences
- Highlight implementation challenges
**Challenge from Critical Perspective**
- Adopt critical stance on current content
- Play devil's advocate from specified viewpoint
- Argue against proposal highlighting weaknesses
@@ -48,12 +55,14 @@
## Creative Exploration Methods
**Tree of Thoughts Deep Dive**
- Break problem into discrete "thoughts" or intermediate steps
- Explore multiple reasoning paths simultaneously
- Use self-evaluation to classify each path as "sure", "likely", or "impossible"
- Apply search algorithms (BFS/DFS) to find optimal solution paths
**Hindsight is 20/20: The 'If Only...' Reflection**
- Imagine retrospective scenario based on current content
- Identify the one "if only we had known/done X..." insight
- Describe imagined consequences humorously or dramatically
@@ -62,6 +71,7 @@
## Multi-Persona Collaboration Methods
**Agile Team Perspective Shift**
- Rotate through different Scrum team member viewpoints
- Product Owner: Focus on user value and business impact
- Scrum Master: Examine process flow and team dynamics
@@ -69,12 +79,14 @@
- QA: Identify testing scenarios and quality concerns
**Stakeholder Round Table**
- Convene virtual meeting with multiple personas
- Each persona contributes unique perspective on content
- Identify conflicts and synergies between viewpoints
- Synthesize insights into actionable recommendations
**Meta-Prompting Analysis**
- Step back to analyze the structure and logic of current approach
- Question the format and methodology being used
- Suggest alternative frameworks or mental models
@@ -83,24 +95,28 @@
## Advanced 2025 Techniques
**Self-Consistency Validation**
- Generate multiple reasoning paths for same problem
- Compare consistency across different approaches
- Identify most reliable and robust solution
- Highlight areas where approaches diverge and why
**ReWOO (Reasoning Without Observation)**
- Separate parametric reasoning from tool-based actions
- Create reasoning plan without external dependencies
- Identify what can be solved through pure reasoning
- Optimize for efficiency and reduced token usage
**Persona-Pattern Hybrid**
- Combine specific role expertise with elicitation pattern
- Architect + Risk Analysis: Deep technical risk assessment
- UX Expert + User Journey: End-to-end experience critique
- PM + Stakeholder Analysis: Multi-perspective impact review
**Emergent Collaboration Discovery**
- Allow multiple perspectives to naturally emerge
- Identify unexpected insights from persona interactions
- Explore novel combinations of viewpoints
@@ -109,18 +125,21 @@
## Game-Based Elicitation Methods
**Red Team vs Blue Team**
- Red Team: Attack the proposal, find vulnerabilities
- Blue Team: Defend and strengthen the approach
- Competitive analysis reveals blind spots
- Results in more robust, battle-tested solutions
**Innovation Tournament**
- Pit multiple alternative approaches against each other
- Score each approach across different criteria
- Crowd-source evaluation from different personas
- Identify winning combination of features
**Escape Room Challenge**
- Present content as constraints to work within
- Find creative solutions within tight limitations
- Identify minimum viable approach
@@ -129,6 +148,7 @@
## Process Control
**Proceed / No Further Actions**
- Acknowledge choice to finalize current work
- Accept output as-is or move to next step
- Prepare to continue without additional elicitation

View File

@@ -0,0 +1,146 @@
# Test Levels Framework
Comprehensive guide for determining appropriate test levels (unit, integration, E2E) for different scenarios.
## Test Level Decision Matrix
### Unit Tests
**When to use:**
- Testing pure functions and business logic
- Algorithm correctness
- Input validation and data transformation
- Error handling in isolated components
- Complex calculations or state machines
**Characteristics:**
- Fast execution (immediate feedback)
- No external dependencies (DB, API, file system)
- Highly maintainable and stable
- Easy to debug failures
**Example scenarios:**
```yaml
unit_test:
component: 'PriceCalculator'
scenario: 'Calculate discount with multiple rules'
justification: 'Complex business logic with multiple branches'
mock_requirements: 'None - pure function'
```
### Integration Tests
**When to use:**
- Component interaction verification
- Database operations and transactions
- API endpoint contracts
- Service-to-service communication
- Middleware and interceptor behavior
**Characteristics:**
- Moderate execution time
- Tests component boundaries
- May use test databases or containers
- Validates system integration points
**Example scenarios:**
```yaml
integration_test:
components: ['UserService', 'AuthRepository']
scenario: 'Create user with role assignment'
justification: 'Critical data flow between service and persistence'
test_environment: 'In-memory database'
```
### End-to-End Tests
**When to use:**
- Critical user journeys
- Cross-system workflows
- Visual regression testing
- Compliance and regulatory requirements
- Final validation before release
**Characteristics:**
- Slower execution
- Tests complete workflows
- Requires full environment setup
- Most realistic but most brittle
**Example scenarios:**
```yaml
e2e_test:
journey: 'Complete checkout process'
scenario: 'User purchases with saved payment method'
justification: 'Revenue-critical path requiring full validation'
environment: 'Staging with test payment gateway'
```
## Test Level Selection Rules
### Favor Unit Tests When:
- Logic can be isolated
- No side effects involved
- Fast feedback needed
- High cyclomatic complexity
### Favor Integration Tests When:
- Testing persistence layer
- Validating service contracts
- Testing middleware/interceptors
- Component boundaries critical
### Favor E2E Tests When:
- User-facing critical paths
- Multi-system interactions
- Regulatory compliance scenarios
- Visual regression important
## Anti-patterns to Avoid
- E2E testing for business logic validation
- Unit testing framework behavior
- Integration testing third-party libraries
- Duplicate coverage across levels
## Duplicate Coverage Guard
**Before adding any test, check:**
1. Is this already tested at a lower level?
2. Can a unit test cover this instead of integration?
3. Can an integration test cover this instead of E2E?
**Coverage overlap is only acceptable when:**
- Testing different aspects (unit: logic, integration: interaction, e2e: user experience)
- Critical paths requiring defense in depth
- Regression prevention for previously broken functionality
## Test Naming Conventions
- Unit: `test_{component}_{scenario}`
- Integration: `test_{flow}_{interaction}`
- E2E: `test_{journey}_{outcome}`
## Test ID Format
`{EPIC}.{STORY}-{LEVEL}-{SEQ}`
Examples:
- `1.3-UNIT-001`
- `1.3-INT-002`
- `1.3-E2E-001`

View File

@@ -0,0 +1,172 @@
# Test Priorities Matrix
Guide for prioritizing test scenarios based on risk, criticality, and business impact.
## Priority Levels
### P0 - Critical (Must Test)
**Criteria:**
- Revenue-impacting functionality
- Security-critical paths
- Data integrity operations
- Regulatory compliance requirements
- Previously broken functionality (regression prevention)
**Examples:**
- Payment processing
- Authentication/authorization
- User data creation/deletion
- Financial calculations
- GDPR/privacy compliance
**Testing Requirements:**
- Comprehensive coverage at all levels
- Both happy and unhappy paths
- Edge cases and error scenarios
- Performance under load
### P1 - High (Should Test)
**Criteria:**
- Core user journeys
- Frequently used features
- Features with complex logic
- Integration points between systems
- Features affecting user experience
**Examples:**
- User registration flow
- Search functionality
- Data import/export
- Notification systems
- Dashboard displays
**Testing Requirements:**
- Primary happy paths required
- Key error scenarios
- Critical edge cases
- Basic performance validation
### P2 - Medium (Nice to Test)
**Criteria:**
- Secondary features
- Admin functionality
- Reporting features
- Configuration options
- UI polish and aesthetics
**Examples:**
- Admin settings panels
- Report generation
- Theme customization
- Help documentation
- Analytics tracking
**Testing Requirements:**
- Happy path coverage
- Basic error handling
- Can defer edge cases
### P3 - Low (Test if Time Permits)
**Criteria:**
- Rarely used features
- Nice-to-have functionality
- Cosmetic issues
- Non-critical optimizations
**Examples:**
- Advanced preferences
- Legacy feature support
- Experimental features
- Debug utilities
**Testing Requirements:**
- Smoke tests only
- Can rely on manual testing
- Document known limitations
## Risk-Based Priority Adjustments
### Increase Priority When:
- High user impact (affects >50% of users)
- High financial impact (>$10K potential loss)
- Security vulnerability potential
- Compliance/legal requirements
- Customer-reported issues
- Complex implementation (>500 LOC)
- Multiple system dependencies
### Decrease Priority When:
- Feature flag protected
- Gradual rollout planned
- Strong monitoring in place
- Easy rollback capability
- Low usage metrics
- Simple implementation
- Well-isolated component
## Test Coverage by Priority
| Priority | Unit Coverage | Integration Coverage | E2E Coverage |
| -------- | ------------- | -------------------- | ------------------ |
| P0 | >90% | >80% | All critical paths |
| P1 | >80% | >60% | Main happy paths |
| P2 | >60% | >40% | Smoke tests |
| P3 | Best effort | Best effort | Manual only |
## Priority Assignment Rules
1. **Start with business impact** - What happens if this fails?
2. **Consider probability** - How likely is failure?
3. **Factor in detectability** - Would we know if it failed?
4. **Account for recoverability** - Can we fix it quickly?
## Priority Decision Tree
```
Is it revenue-critical?
├─ YES → P0
└─ NO → Does it affect core user journey?
├─ YES → Is it high-risk?
│ ├─ YES → P0
│ └─ NO → P1
└─ NO → Is it frequently used?
├─ YES → P1
└─ NO → Is it customer-facing?
├─ YES → P2
└─ NO → P3
```
## Test Execution Order
1. Execute P0 tests first (fail fast on critical issues)
2. Execute P1 tests second (core functionality)
3. Execute P2 tests if time permits
4. P3 tests only in full regression cycles
## Continuous Adjustment
Review and adjust priorities based on:
- Production incident patterns
- User feedback and complaints
- Usage analytics
- Test failure history
- Business priority changes

View File

@@ -139,16 +139,19 @@ Critical: This is where you'll need to be interactive with the user if informati
Create Dev Technical Guidance section with available information:
```markdown
````markdown
## Dev Technical Guidance
### Existing System Context
[Extract from available documentation]
### Integration Approach
[Based on patterns found or ask user]
### Technical Constraints
[From documentation or user input]
### Missing Information
@@ -191,6 +194,7 @@ Example task structure for brownfield:
- [ ] Integration test for {{integration point}}
- [ ] Update existing tests if needed
```
````
### 5. Risk Assessment and Mitigation
@@ -202,14 +206,17 @@ Add section for brownfield-specific risks:
## Risk Assessment
### Implementation Risks
- **Primary Risk**: {{main risk to existing system}}
- **Mitigation**: {{how to address}}
- **Verification**: {{how to confirm safety}}
### Rollback Plan
- {{Simple steps to undo changes if needed}}
### Safety Checks
- [ ] Existing {{feature}} tested before changes
- [ ] Changes can be feature-flagged or isolated
- [ ] Rollback procedure documented
@@ -252,6 +259,7 @@ Include header noting documentation context:
<!-- Context: Brownfield enhancement to {{existing system}} -->
## Status: Draft
[Rest of story content...]
```

View File

@@ -21,63 +21,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -246,13 +237,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?

View File

@@ -112,7 +112,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Change Log
| Date | Version | Description | Author |
|------|---------|-------------|--------|
| ------ | ------- | --------------------------- | --------- |
| [Date] | 1.0 | Initial brownfield analysis | [Analyst] |
## Quick Reference - Key Files and Entry Points
@@ -137,7 +137,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Actual Tech Stack (from package.json/requirements.txt)
| Category | Technology | Version | Notes |
|----------|------------|---------|--------|
| --------- | ---------- | ------- | -------------------------- |
| Runtime | Node.js | 16.x | [Any constraints] |
| Framework | Express | 4.18.2 | [Custom middleware?] |
| Database | PostgreSQL | 13 | [Connection pooling setup] |
@@ -179,6 +179,7 @@ project-root/
### Data Models
Instead of duplicating, reference actual model files:
- **User Model**: See `src/models/User.js`
- **Order Model**: See `src/models/Order.js`
- **Related Types**: TypeScript definitions in `src/types/`
@@ -209,7 +210,7 @@ Instead of duplicating, reference actual model files:
### External Services
| Service | Purpose | Integration Type | Key Files |
|---------|---------|------------------|-----------|
| -------- | -------- | ---------------- | ------------------------------ |
| Stripe | Payments | REST API | `src/integrations/stripe/` |
| SendGrid | Emails | SDK | `src/services/emailService.js` |
@@ -256,6 +257,7 @@ npm run test:integration # Runs integration tests (requires local DB)
### Files That Will Need Modification
Based on the enhancement requirements, these files will be affected:
- `src/services/userService.js` - Add new user fields
- `src/models/User.js` - Update schema
- `src/routes/userRoutes.js` - New endpoints

View File

@@ -1,6 +1,6 @@
---
docOutputLocation: docs/brainstorming-session-results.md
template: "{root}/templates/brainstorming-output-tmpl.yaml"
template: '{root}/templates/brainstorming-output-tmpl.yaml'
---
# Facilitate Brainstorming Session Task

View File

@@ -11,14 +11,12 @@ You are now operating as a Documentation Indexer. Your goal is to ensure all doc
### Required Steps
1. First, locate and scan:
- The `docs/` directory and all subdirectories
- The existing `docs/index.md` file (create if absent)
- All markdown (`.md`) and text (`.txt`) files in the documentation structure
- Note the folder structure for hierarchical organization
2. For the existing `docs/index.md`:
- Parse current entries
- Note existing file references and descriptions
- Identify any broken links or missing files
@@ -26,7 +24,6 @@ You are now operating as a Documentation Indexer. Your goal is to ensure all doc
- Preserve existing folder sections
3. For each documentation file found:
- Extract the title (from first heading or filename)
- Generate a brief description by analyzing the content
- Create a relative markdown link to the file
@@ -35,7 +32,6 @@ You are now operating as a Documentation Indexer. Your goal is to ensure all doc
- If missing or outdated, prepare an update
4. For any missing or non-existent files found in index:
- Present a list of all entries that reference non-existent files
- For each entry:
- Show the full entry details (title, path, description)
@@ -88,7 +84,6 @@ Documents within the `another-folder/` directory:
### [Nested Document](./another-folder/document.md)
Description of nested document.
```
### Index Entry Format
@@ -157,7 +152,6 @@ For each file referenced in the index but not found in the filesystem:
### Special Cases
1. **Sharded Documents**: If a folder contains an `index.md` file, treat it as a sharded document:
- Use the folder's `index.md` title as the section title
- List the folder's documents as subsections
- Note in the description that this is a multi-part document

View File

@@ -6,7 +6,7 @@ Provide a user-friendly interface to the BMad knowledge base without overwhelmin
## Instructions
When entering KB mode (*kb-mode), follow these steps:
When entering KB mode (\*kb-mode), follow these steps:
### 1. Welcome and Guide
@@ -48,12 +48,12 @@ Or ask me about anything else related to BMad-Method!
When user is done or wants to exit KB mode:
- Summarize key points discussed if helpful
- Remind them they can return to KB mode anytime with *kb-mode
- Remind them they can return to KB mode anytime with \*kb-mode
- Suggest next steps based on what was discussed
## Example Interaction
**User**: *kb-mode
**User**: \*kb-mode
**Assistant**: I've entered KB mode and have access to the full BMad knowledge base. I can help you with detailed information about any aspect of BMad-Method.

View File

@@ -0,0 +1,343 @@
# nfr-assess
Quick NFR validation focused on the core four: security, performance, reliability, maintainability.
## Inputs
```yaml
required:
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: 'docs/stories/{epic}.{story}.*.md'
optional:
- architecture_refs: 'docs/architecture/*.md'
- technical_preferences: 'docs/technical-preferences.md'
- acceptance_criteria: From story file
```
## Purpose
Assess non-functional requirements for a story and generate:
1. YAML block for the gate file's `nfr_validation` section
2. Brief markdown assessment saved to `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
## Process
### 0. Fail-safe for Missing Inputs
If story_path or story file can't be found:
- Still create assessment file with note: "Source story not found"
- Set all selected NFRs to CONCERNS with notes: "Target unknown / evidence missing"
- Continue with assessment to provide value
### 1. Elicit Scope
**Interactive mode:** Ask which NFRs to assess
**Non-interactive mode:** Default to core four (security, performance, reliability, maintainability)
```text
Which NFRs should I assess? (Enter numbers or press Enter for default)
[1] Security (default)
[2] Performance (default)
[3] Reliability (default)
[4] Maintainability (default)
[5] Usability
[6] Compatibility
[7] Portability
[8] Functional Suitability
> [Enter for 1-4]
```
### 2. Check for Thresholds
Look for NFR requirements in:
- Story acceptance criteria
- `docs/architecture/*.md` files
- `docs/technical-preferences.md`
**Interactive mode:** Ask for missing thresholds
**Non-interactive mode:** Mark as CONCERNS with "Target unknown"
```text
No performance requirements found. What's your target response time?
> 200ms for API calls
No security requirements found. Required auth method?
> JWT with refresh tokens
```
**Unknown targets policy:** If a target is missing and not provided, mark status as CONCERNS with notes: "Target unknown"
### 3. Quick Assessment
For each selected NFR, check:
- Is there evidence it's implemented?
- Can we validate it?
- Are there obvious gaps?
### 4. Generate Outputs
## Output 1: Gate YAML Block
Generate ONLY for NFRs actually assessed (no placeholders):
```yaml
# Gate YAML (copy/paste):
nfr_validation:
_assessed: [security, performance, reliability, maintainability]
security:
status: CONCERNS
notes: 'No rate limiting on auth endpoints'
performance:
status: PASS
notes: 'Response times < 200ms verified'
reliability:
status: PASS
notes: 'Error handling and retries implemented'
maintainability:
status: CONCERNS
notes: 'Test coverage at 65%, target is 80%'
```
## Deterministic Status Rules
- **FAIL**: Any selected NFR has critical gap or target clearly not met
- **CONCERNS**: No FAILs, but any NFR is unknown/partial/missing evidence
- **PASS**: All selected NFRs meet targets with evidence
## Quality Score Calculation
```
quality_score = 100
- 20 for each FAIL attribute
- 10 for each CONCERNS attribute
Floor at 0, ceiling at 100
```
If `technical-preferences.md` defines custom weights, use those instead.
## Output 2: Brief Assessment Report
**ALWAYS save to:** `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
```markdown
# NFR Assessment: {epic}.{story}
Date: {date}
Reviewer: Quinn
<!-- Note: Source story not found (if applicable) -->
## Summary
- Security: CONCERNS - Missing rate limiting
- Performance: PASS - Meets <200ms requirement
- Reliability: PASS - Proper error handling
- Maintainability: CONCERNS - Test coverage below target
## Critical Issues
1. **No rate limiting** (Security)
- Risk: Brute force attacks possible
- Fix: Add rate limiting middleware to auth endpoints
2. **Test coverage 65%** (Maintainability)
- Risk: Untested code paths
- Fix: Add tests for uncovered branches
## Quick Wins
- Add rate limiting: ~2 hours
- Increase test coverage: ~4 hours
- Add performance monitoring: ~1 hour
```
## Output 3: Story Update Line
**End with this line for the review task to quote:**
```
NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
```
## Output 4: Gate Integration Line
**Always print at the end:**
```
Gate NFR block ready → paste into docs/qa/gates/{epic}.{story}-{slug}.yml under nfr_validation
```
## Assessment Criteria
### Security
**PASS if:**
- Authentication implemented
- Authorization enforced
- Input validation present
- No hardcoded secrets
**CONCERNS if:**
- Missing rate limiting
- Weak encryption
- Incomplete authorization
**FAIL if:**
- No authentication
- Hardcoded credentials
- SQL injection vulnerabilities
### Performance
**PASS if:**
- Meets response time targets
- No obvious bottlenecks
- Reasonable resource usage
**CONCERNS if:**
- Close to limits
- Missing indexes
- No caching strategy
**FAIL if:**
- Exceeds response time limits
- Memory leaks
- Unoptimized queries
### Reliability
**PASS if:**
- Error handling present
- Graceful degradation
- Retry logic where needed
**CONCERNS if:**
- Some error cases unhandled
- No circuit breakers
- Missing health checks
**FAIL if:**
- No error handling
- Crashes on errors
- No recovery mechanisms
### Maintainability
**PASS if:**
- Test coverage meets target
- Code well-structured
- Documentation present
**CONCERNS if:**
- Test coverage below target
- Some code duplication
- Missing documentation
**FAIL if:**
- No tests
- Highly coupled code
- No documentation
## Quick Reference
### What to Check
```yaml
security:
- Authentication mechanism
- Authorization checks
- Input validation
- Secret management
- Rate limiting
performance:
- Response times
- Database queries
- Caching usage
- Resource consumption
reliability:
- Error handling
- Retry logic
- Circuit breakers
- Health checks
- Logging
maintainability:
- Test coverage
- Code structure
- Documentation
- Dependencies
```
## Key Principles
- Focus on the core four NFRs by default
- Quick assessment, not deep analysis
- Gate-ready output format
- Brief, actionable findings
- Skip what doesn't apply
- Deterministic status rules for consistency
- Unknown targets → CONCERNS, not guesses
---
## Appendix: ISO 25010 Reference
<details>
<summary>Full ISO 25010 Quality Model (click to expand)</summary>
### All 8 Quality Characteristics
1. **Functional Suitability**: Completeness, correctness, appropriateness
2. **Performance Efficiency**: Time behavior, resource use, capacity
3. **Compatibility**: Co-existence, interoperability
4. **Usability**: Learnability, operability, accessibility
5. **Reliability**: Maturity, availability, fault tolerance
6. **Security**: Confidentiality, integrity, authenticity
7. **Maintainability**: Modularity, reusability, testability
8. **Portability**: Adaptability, installability
Use these when assessing beyond the core four.
</details>
<details>
<summary>Example: Deep Performance Analysis (click to expand)</summary>
```yaml
performance_deep_dive:
response_times:
p50: 45ms
p95: 180ms
p99: 350ms
database:
slow_queries: 2
missing_indexes: ['users.email', 'orders.user_id']
caching:
hit_rate: 0%
recommendation: 'Add Redis for session data'
load_test:
max_rps: 150
breaking_point: 200 rps
```
</details>

159
bmad-core/tasks/qa-gate.md Normal file
View File

@@ -0,0 +1,159 @@
# qa-gate
Create or update a quality gate decision file for a story based on review findings.
## Purpose
Generate a standalone quality gate file that provides a clear pass/fail decision with actionable feedback. This gate serves as an advisory checkpoint for teams to understand quality status.
## Prerequisites
- Story has been reviewed (manually or via review-story task)
- Review findings are available
- Understanding of story requirements and implementation
## Gate File Location
**ALWAYS** create file at: `docs/qa/gates/{epic}.{story}-{slug}.yml`
Slug rules:
- Convert to lowercase
- Replace spaces with hyphens
- Strip punctuation
- Example: "User Auth - Login!" becomes "user-auth-login"
## Minimal Required Schema
```yaml
schema: 1
story: '{epic}.{story}'
gate: PASS|CONCERNS|FAIL|WAIVED
status_reason: '1-2 sentence explanation of gate decision'
reviewer: 'Quinn'
updated: '{ISO-8601 timestamp}'
top_issues: [] # Empty array if no issues
waiver: { active: false } # Only set active: true if WAIVED
```
## Schema with Issues
```yaml
schema: 1
story: '1.3'
gate: CONCERNS
status_reason: 'Missing rate limiting on auth endpoints poses security risk.'
reviewer: 'Quinn'
updated: '2025-01-12T10:15:00Z'
top_issues:
- id: 'SEC-001'
severity: high # ONLY: low|medium|high
finding: 'No rate limiting on login endpoint'
suggested_action: 'Add rate limiting middleware before production'
- id: 'TEST-001'
severity: medium
finding: 'No integration tests for auth flow'
suggested_action: 'Add integration test coverage'
waiver: { active: false }
```
## Schema when Waived
```yaml
schema: 1
story: '1.3'
gate: WAIVED
status_reason: 'Known issues accepted for MVP release.'
reviewer: 'Quinn'
updated: '2025-01-12T10:15:00Z'
top_issues:
- id: 'PERF-001'
severity: low
finding: 'Dashboard loads slowly with 1000+ items'
suggested_action: 'Implement pagination in next sprint'
waiver:
active: true
reason: 'MVP release - performance optimization deferred'
approved_by: 'Product Owner'
```
## Gate Decision Criteria
### PASS
- All acceptance criteria met
- No high-severity issues
- Test coverage meets project standards
### CONCERNS
- Non-blocking issues present
- Should be tracked and scheduled
- Can proceed with awareness
### FAIL
- Acceptance criteria not met
- High-severity issues present
- Recommend return to InProgress
### WAIVED
- Issues explicitly accepted
- Requires approval and reason
- Proceed despite known issues
## Severity Scale
**FIXED VALUES - NO VARIATIONS:**
- `low`: Minor issues, cosmetic problems
- `medium`: Should fix soon, not blocking
- `high`: Critical issues, should block release
## Issue ID Prefixes
- `SEC-`: Security issues
- `PERF-`: Performance issues
- `REL-`: Reliability issues
- `TEST-`: Testing gaps
- `MNT-`: Maintainability concerns
- `ARCH-`: Architecture issues
- `DOC-`: Documentation gaps
- `REQ-`: Requirements issues
## Output Requirements
1. **ALWAYS** create gate file at: `docs/qa/gates/{epic}.{story}-{slug}.yml`
2. **ALWAYS** append this exact format to story's QA Results section:
```
Gate: {STATUS} → docs/qa/gates/{epic}.{story}-{slug}.yml
```
3. Keep status_reason to 1-2 sentences maximum
4. Use severity values exactly: `low`, `medium`, or `high`
## Example Story Update
After creating gate file, append to story's QA Results section:
```markdown
## QA Results
### Review Date: 2025-01-12
### Reviewed By: Quinn (Test Architect)
[... existing review content ...]
### Gate Status
Gate: CONCERNS → docs/qa/gates/1.3-user-auth-login.yml
```
## Key Principles
- Keep it minimal and predictable
- Fixed severity scale (low/medium/high)
- Always write to standard path
- Always update story with gate reference
- Clear, actionable findings

View File

@@ -1,6 +1,16 @@
# review-story
When a developer agent marks a story as "Ready for Review", perform a comprehensive senior developer code review with the ability to refactor and improve code directly.
Perform a comprehensive test architecture review with quality gate decision. This adaptive, risk-aware review creates both a story update and a detailed gate file.
## Inputs
```yaml
required:
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: '{devStoryLocation}/{epic}.{story}.*.md' # Path from core-config.yaml
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Prerequisites
@@ -8,98 +18,133 @@ When a developer agent marks a story as "Ready for Review", perform a comprehens
- Developer has completed all tasks and updated the File List
- All automated tests are passing
## Review Process
## Review Process - Adaptive Test Architecture
1. **Read the Complete Story**
- Review all acceptance criteria
- Understand the dev notes and requirements
- Note any completion notes from the developer
### 1. Risk Assessment (Determines Review Depth)
2. **Verify Implementation Against Dev Notes Guidance**
- Review the "Dev Notes" section for specific technical guidance provided to the developer
- Verify the developer's implementation follows the architectural patterns specified in Dev Notes
- Check that file locations match the project structure guidance in Dev Notes
- Confirm any specified libraries, frameworks, or technical approaches were used correctly
- Validate that security considerations mentioned in Dev Notes were implemented
**Auto-escalate to deep review when:**
3. **Focus on the File List**
- Verify all files listed were actually created/modified
- Check for any missing files that should have been updated
- Ensure file locations align with the project structure guidance from Dev Notes
- Auth/payment/security files touched
- No tests added to story
- Diff > 500 lines
- Previous gate was FAIL/CONCERNS
- Story has > 5 acceptance criteria
4. **Senior Developer Code Review**
- Review code with the eye of a senior developer
- If changes form a cohesive whole, review them together
- If changes are independent, review incrementally file by file
- Focus on:
- Code architecture and design patterns
- Refactoring opportunities
- Code duplication or inefficiencies
- Performance optimizations
- Security concerns
- Best practices and patterns
### 2. Comprehensive Analysis
5. **Active Refactoring**
- As a senior developer, you CAN and SHOULD refactor code where improvements are needed
- When refactoring:
- Make the changes directly in the files
- Explain WHY you're making the change
- Describe HOW the change improves the code
- Ensure all tests still pass after refactoring
- Update the File List if you modify additional files
**A. Requirements Traceability**
6. **Standards Compliance Check**
- Verify adherence to `docs/coding-standards.md`
- Check compliance with `docs/unified-project-structure.md`
- Validate testing approach against `docs/testing-strategy.md`
- Ensure all guidelines mentioned in the story are followed
- Map each acceptance criteria to its validating tests (document mapping with Given-When-Then, not test code)
- Identify coverage gaps
- Verify all requirements have corresponding test cases
7. **Acceptance Criteria Validation**
- Verify each AC is fully implemented
- Check for any missing functionality
- Validate edge cases are handled
**B. Code Quality Review**
8. **Test Coverage Review**
- Ensure unit tests cover edge cases
- Add missing tests if critical coverage is lacking
- Verify integration tests (if required) are comprehensive
- Check that test assertions are meaningful
- Look for missing test scenarios
- Architecture and design patterns
- Refactoring opportunities (and perform them)
- Code duplication or inefficiencies
- Performance optimizations
- Security vulnerabilities
- Best practices adherence
9. **Documentation and Comments**
- Verify code is self-documenting where possible
- Add comments for complex logic if missing
- Ensure any API changes are documented
**C. Test Architecture Assessment**
## Update Story File - QA Results Section ONLY
- Test coverage adequacy at appropriate levels
- Test level appropriateness (what should be unit vs integration vs e2e)
- Test design quality and maintainability
- Test data management strategy
- Mock/stub usage appropriateness
- Edge case and error scenario coverage
- Test execution time and reliability
**D. Non-Functional Requirements (NFRs)**
- Security: Authentication, authorization, data protection
- Performance: Response times, resource usage
- Reliability: Error handling, recovery mechanisms
- Maintainability: Code clarity, documentation
**E. Testability Evaluation**
- Controllability: Can we control the inputs?
- Observability: Can we observe the outputs?
- Debuggability: Can we debug failures easily?
**F. Technical Debt Identification**
- Accumulated shortcuts
- Missing tests
- Outdated dependencies
- Architecture violations
### 3. Active Refactoring
- Refactor code where safe and appropriate
- Run tests to ensure changes don't break functionality
- Document all changes in QA Results section with clear WHY and HOW
- Do NOT alter story content beyond QA Results section
- Do NOT change story Status or File List; recommend next status only
### 4. Standards Compliance Check
- Verify adherence to `docs/coding-standards.md`
- Check compliance with `docs/unified-project-structure.md`
- Validate testing approach against `docs/testing-strategy.md`
- Ensure all guidelines mentioned in the story are followed
### 5. Acceptance Criteria Validation
- Verify each AC is fully implemented
- Check for any missing functionality
- Validate edge cases are handled
### 6. Documentation and Comments
- Verify code is self-documenting where possible
- Add comments for complex logic if missing
- Ensure any API changes are documented
## Output 1: Update Story File - QA Results Section ONLY
**CRITICAL**: You are ONLY authorized to update the "QA Results" section of the story file. DO NOT modify any other sections.
**QA Results Anchor Rule:**
- If `## QA Results` doesn't exist, append it at end of file
- If it exists, append a new dated entry below existing entries
- Never edit other sections
After review and any refactoring, append your results to the story file in the QA Results section:
```markdown
## QA Results
### Review Date: [Date]
### Reviewed By: Quinn (Senior Developer QA)
### Reviewed By: Quinn (Test Architect)
### Code Quality Assessment
[Overall assessment of implementation quality]
### Refactoring Performed
[List any refactoring you performed with explanations]
- **File**: [filename]
- **Change**: [what was changed]
- **Why**: [reason for change]
- **How**: [how it improves the code]
### Compliance Check
- Coding Standards: [✓/✗] [notes if any]
- Project Structure: [✓/✗] [notes if any]
- Testing Strategy: [✓/✗] [notes if any]
- All ACs Met: [✓/✗] [notes if any]
### Improvements Checklist
[Check off items you handled yourself, leave unchecked for dev to address]
- [x] Refactored user service for better error handling (services/user.service.ts)
@@ -109,22 +154,144 @@ After review and any refactoring, append your results to the story file in the Q
- [ ] Update API documentation for new error codes
### Security Review
[Any security concerns found and whether addressed]
### Performance Considerations
[Any performance issues found and whether addressed]
### Final Status
[✓ Approved - Ready for Done] / [✗ Changes Required - See unchecked items above]
### Files Modified During Review
[If you modified files, list them here - ask Dev to update File List]
### Gate Status
Gate: {STATUS} → docs/qa/gates/{epic}.{story}-{slug}.yml
Risk profile: docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
# Note: Paths should reference core-config.yaml for custom configurations
### Recommended Status
[✓ Ready for Done] / [✗ Changes Required - See unchecked items above]
(Story owner decides final status)
```
## Output 2: Create Quality Gate File
**Template and Directory:**
- Render from `templates/qa-gate-tmpl.yaml`
- Create `docs/qa/gates/` directory if missing (or configure in core-config.yaml)
- Save to: `docs/qa/gates/{epic}.{story}-{slug}.yml`
Gate file structure:
```yaml
schema: 1
story: '{epic}.{story}'
story_title: '{story title}'
gate: PASS|CONCERNS|FAIL|WAIVED
status_reason: '1-2 sentence explanation of gate decision'
reviewer: 'Quinn (Test Architect)'
updated: '{ISO-8601 timestamp}'
top_issues: [] # Empty if no issues
waiver: { active: false } # Set active: true only if WAIVED
# Extended fields (optional but recommended):
quality_score: 0-100 # 100 - (20*FAILs) - (10*CONCERNS) or use technical-preferences.md weights
expires: '{ISO-8601 timestamp}' # Typically 2 weeks from review
evidence:
tests_reviewed: { count }
risks_identified: { count }
trace:
ac_covered: [1, 2, 3] # AC numbers with test coverage
ac_gaps: [4] # AC numbers lacking coverage
nfr_validation:
security:
status: PASS|CONCERNS|FAIL
notes: 'Specific findings'
performance:
status: PASS|CONCERNS|FAIL
notes: 'Specific findings'
reliability:
status: PASS|CONCERNS|FAIL
notes: 'Specific findings'
maintainability:
status: PASS|CONCERNS|FAIL
notes: 'Specific findings'
recommendations:
immediate: # Must fix before production
- action: 'Add rate limiting'
refs: ['api/auth/login.ts']
future: # Can be addressed later
- action: 'Consider caching'
refs: ['services/data.ts']
```
### Gate Decision Criteria
**Deterministic rule (apply in order):**
If risk_summary exists, apply its thresholds first (≥9 → FAIL, ≥6 → CONCERNS), then NFR statuses, then top_issues severity.
1. **Risk thresholds (if risk_summary present):**
- If any risk score ≥ 9 → Gate = FAIL (unless waived)
- Else if any score ≥ 6 → Gate = CONCERNS
2. **Test coverage gaps (if trace available):**
- If any P0 test from test-design is missing → Gate = CONCERNS
- If security/data-loss P0 test missing → Gate = FAIL
3. **Issue severity:**
- If any `top_issues.severity == high` → Gate = FAIL (unless waived)
- Else if any `severity == medium` → Gate = CONCERNS
4. **NFR statuses:**
- If any NFR status is FAIL → Gate = FAIL
- Else if any NFR status is CONCERNS → Gate = CONCERNS
- Else → Gate = PASS
- WAIVED only when waiver.active: true with reason/approver
Detailed criteria:
- **PASS**: All critical requirements met, no blocking issues
- **CONCERNS**: Non-critical issues found, team should review
- **FAIL**: Critical issues that should be addressed
- **WAIVED**: Issues acknowledged but explicitly waived by team
### Quality Score Calculation
```text
quality_score = 100 - (20 × number of FAILs) - (10 × number of CONCERNS)
Bounded between 0 and 100
```
If `technical-preferences.md` defines custom weights, use those instead.
### Suggested Owner Convention
For each issue in `top_issues`, include a `suggested_owner`:
- `dev`: Code changes needed
- `sm`: Requirements clarification needed
- `po`: Business decision needed
## Key Principles
- You are a SENIOR developer reviewing junior/mid-level work
- You have the authority and responsibility to improve code directly
- You are a Test Architect providing comprehensive quality assessment
- You have the authority to improve code directly when appropriate
- Always explain your changes for learning purposes
- Balance between perfection and pragmatism
- Focus on significant improvements, not nitpicks
- Focus on risk-based prioritization
- Provide actionable recommendations with clear ownership
## Blocking Conditions
@@ -140,6 +307,8 @@ Stop the review and request clarification if:
After review:
1. If all items are checked and approved: Update story status to "Done"
2. If unchecked items remain: Keep status as "Review" for dev to address
3. Always provide constructive feedback and explanations for learning
1. Update the QA Results section in the story file
2. Create the gate file in `docs/qa/gates/`
3. Recommend status: "Ready for Done" or "Changes Required" (owner decides)
4. If files were modified, list them in QA Results and ask Dev to update File List
5. Always provide constructive feedback and actionable recommendations

View File

@@ -0,0 +1,353 @@
# risk-profile
Generate a comprehensive risk assessment matrix for a story implementation using probability × impact analysis.
## Inputs
```yaml
required:
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: 'docs/stories/{epic}.{story}.*.md'
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Purpose
Identify, assess, and prioritize risks in the story implementation. Provide risk mitigation strategies and testing focus areas based on risk levels.
## Risk Assessment Framework
### Risk Categories
**Category Prefixes:**
- `TECH`: Technical Risks
- `SEC`: Security Risks
- `PERF`: Performance Risks
- `DATA`: Data Risks
- `BUS`: Business Risks
- `OPS`: Operational Risks
1. **Technical Risks (TECH)**
- Architecture complexity
- Integration challenges
- Technical debt
- Scalability concerns
- System dependencies
2. **Security Risks (SEC)**
- Authentication/authorization flaws
- Data exposure vulnerabilities
- Injection attacks
- Session management issues
- Cryptographic weaknesses
3. **Performance Risks (PERF)**
- Response time degradation
- Throughput bottlenecks
- Resource exhaustion
- Database query optimization
- Caching failures
4. **Data Risks (DATA)**
- Data loss potential
- Data corruption
- Privacy violations
- Compliance issues
- Backup/recovery gaps
5. **Business Risks (BUS)**
- Feature doesn't meet user needs
- Revenue impact
- Reputation damage
- Regulatory non-compliance
- Market timing
6. **Operational Risks (OPS)**
- Deployment failures
- Monitoring gaps
- Incident response readiness
- Documentation inadequacy
- Knowledge transfer issues
## Risk Analysis Process
### 1. Risk Identification
For each category, identify specific risks:
```yaml
risk:
id: 'SEC-001' # Use prefixes: SEC, PERF, DATA, BUS, OPS, TECH
category: security
title: 'Insufficient input validation on user forms'
description: 'Form inputs not properly sanitized could lead to XSS attacks'
affected_components:
- 'UserRegistrationForm'
- 'ProfileUpdateForm'
detection_method: 'Code review revealed missing validation'
```
### 2. Risk Assessment
Evaluate each risk using probability × impact:
**Probability Levels:**
- `High (3)`: Likely to occur (>70% chance)
- `Medium (2)`: Possible occurrence (30-70% chance)
- `Low (1)`: Unlikely to occur (<30% chance)
**Impact Levels:**
- `High (3)`: Severe consequences (data breach, system down, major financial loss)
- `Medium (2)`: Moderate consequences (degraded performance, minor data issues)
- `Low (1)`: Minor consequences (cosmetic issues, slight inconvenience)
**Risk Score = Probability × Impact**
- 9: Critical Risk (Red)
- 6: High Risk (Orange)
- 4: Medium Risk (Yellow)
- 2-3: Low Risk (Green)
- 1: Minimal Risk (Blue)
### 3. Risk Prioritization
Create risk matrix:
```markdown
## Risk Matrix
| Risk ID | Description | Probability | Impact | Score | Priority |
| -------- | ----------------------- | ----------- | ---------- | ----- | -------- |
| SEC-001 | XSS vulnerability | High (3) | High (3) | 9 | Critical |
| PERF-001 | Slow query on dashboard | Medium (2) | Medium (2) | 4 | Medium |
| DATA-001 | Backup failure | Low (1) | High (3) | 3 | Low |
```
### 4. Risk Mitigation Strategies
For each identified risk, provide mitigation:
```yaml
mitigation:
risk_id: 'SEC-001'
strategy: 'preventive' # preventive|detective|corrective
actions:
- 'Implement input validation library (e.g., validator.js)'
- 'Add CSP headers to prevent XSS execution'
- 'Sanitize all user inputs before storage'
- 'Escape all outputs in templates'
testing_requirements:
- 'Security testing with OWASP ZAP'
- 'Manual penetration testing of forms'
- 'Unit tests for validation functions'
residual_risk: 'Low - Some zero-day vulnerabilities may remain'
owner: 'dev'
timeline: 'Before deployment'
```
## Outputs
### Output 1: Gate YAML Block
Generate for pasting into gate file under `risk_summary`:
**Output rules:**
- Only include assessed risks; do not emit placeholders
- Sort risks by score (desc) when emitting highest and any tabular lists
- If no risks: totals all zeros, omit highest, keep recommendations arrays empty
```yaml
# risk_summary (paste into gate file):
risk_summary:
totals:
critical: X # score 9
high: Y # score 6
medium: Z # score 4
low: W # score 2-3
highest:
id: SEC-001
score: 9
title: 'XSS on profile form'
recommendations:
must_fix:
- 'Add input sanitization & CSP'
monitor:
- 'Add security alerts for auth endpoints'
```
### Output 2: Markdown Report
**Save to:** `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md`
```markdown
# Risk Profile: Story {epic}.{story}
Date: {date}
Reviewer: Quinn (Test Architect)
## Executive Summary
- Total Risks Identified: X
- Critical Risks: Y
- High Risks: Z
- Risk Score: XX/100 (calculated)
## Critical Risks Requiring Immediate Attention
### 1. [ID]: Risk Title
**Score: 9 (Critical)**
**Probability**: High - Detailed reasoning
**Impact**: High - Potential consequences
**Mitigation**:
- Immediate action required
- Specific steps to take
**Testing Focus**: Specific test scenarios needed
## Risk Distribution
### By Category
- Security: X risks (Y critical)
- Performance: X risks (Y critical)
- Data: X risks (Y critical)
- Business: X risks (Y critical)
- Operational: X risks (Y critical)
### By Component
- Frontend: X risks
- Backend: X risks
- Database: X risks
- Infrastructure: X risks
## Detailed Risk Register
[Full table of all risks with scores and mitigations]
## Risk-Based Testing Strategy
### Priority 1: Critical Risk Tests
- Test scenarios for critical risks
- Required test types (security, load, chaos)
- Test data requirements
### Priority 2: High Risk Tests
- Integration test scenarios
- Edge case coverage
### Priority 3: Medium/Low Risk Tests
- Standard functional tests
- Regression test suite
## Risk Acceptance Criteria
### Must Fix Before Production
- All critical risks (score 9)
- High risks affecting security/data
### Can Deploy with Mitigation
- Medium risks with compensating controls
- Low risks with monitoring in place
### Accepted Risks
- Document any risks team accepts
- Include sign-off from appropriate authority
## Monitoring Requirements
Post-deployment monitoring for:
- Performance metrics for PERF risks
- Security alerts for SEC risks
- Error rates for operational risks
- Business KPIs for business risks
## Risk Review Triggers
Review and update risk profile when:
- Architecture changes significantly
- New integrations added
- Security vulnerabilities discovered
- Performance issues reported
- Regulatory requirements change
```
## Risk Scoring Algorithm
Calculate overall story risk score:
```
Base Score = 100
For each risk:
- Critical (9): Deduct 20 points
- High (6): Deduct 10 points
- Medium (4): Deduct 5 points
- Low (2-3): Deduct 2 points
Minimum score = 0 (extremely risky)
Maximum score = 100 (minimal risk)
```
## Risk-Based Recommendations
Based on risk profile, recommend:
1. **Testing Priority**
- Which tests to run first
- Additional test types needed
- Test environment requirements
2. **Development Focus**
- Code review emphasis areas
- Additional validation needed
- Security controls to implement
3. **Deployment Strategy**
- Phased rollout for high-risk changes
- Feature flags for risky features
- Rollback procedures
4. **Monitoring Setup**
- Metrics to track
- Alerts to configure
- Dashboard requirements
## Integration with Quality Gates
**Deterministic gate mapping:**
- Any risk with score 9 Gate = FAIL (unless waived)
- Else if any score 6 Gate = CONCERNS
- Else Gate = PASS
- Unmitigated risks Document in gate
### Output 3: Story Hook Line
**Print this line for review task to quote:**
```
Risk profile: docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
```
## Key Principles
- Identify risks early and systematically
- Use consistent probability × impact scoring
- Provide actionable mitigation strategies
- Link risks to specific test requirements
- Track residual risk after mitigation
- Update risk profile as story evolves

View File

@@ -91,13 +91,11 @@ CRITICAL: Use proper parsing that understands markdown context. A ## inside a co
For each extracted section:
1. **Generate filename**: Convert the section heading to lowercase-dash-case
- Remove special characters
- Replace spaces with dashes
- Example: "## Tech Stack" → `tech-stack.md`
2. **Adjust heading levels**:
- The level 2 heading becomes level 1 (# instead of ##) in the sharded new document
- All subsection levels decrease by 1:

View File

@@ -0,0 +1,174 @@
# test-design
Create comprehensive test scenarios with appropriate test level recommendations for story implementation.
## Inputs
```yaml
required:
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: '{devStoryLocation}/{epic}.{story}.*.md' # Path from core-config.yaml
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Purpose
Design a complete test strategy that identifies what to test, at which level (unit/integration/e2e), and why. This ensures efficient test coverage without redundancy while maintaining appropriate test boundaries.
## Dependencies
```yaml
data:
- test-levels-framework.md # Unit/Integration/E2E decision criteria
- test-priorities-matrix.md # P0/P1/P2/P3 classification system
```
## Process
### 1. Analyze Story Requirements
Break down each acceptance criterion into testable scenarios. For each AC:
- Identify the core functionality to test
- Determine data variations needed
- Consider error conditions
- Note edge cases
### 2. Apply Test Level Framework
**Reference:** Load `test-levels-framework.md` for detailed criteria
Quick rules:
- **Unit**: Pure logic, algorithms, calculations
- **Integration**: Component interactions, DB operations
- **E2E**: Critical user journeys, compliance
### 3. Assign Priorities
**Reference:** Load `test-priorities-matrix.md` for classification
Quick priority assignment:
- **P0**: Revenue-critical, security, compliance
- **P1**: Core user journeys, frequently used
- **P2**: Secondary features, admin functions
- **P3**: Nice-to-have, rarely used
### 4. Design Test Scenarios
For each identified test need, create:
```yaml
test_scenario:
id: '{epic}.{story}-{LEVEL}-{SEQ}'
requirement: 'AC reference'
priority: P0|P1|P2|P3
level: unit|integration|e2e
description: 'What is being tested'
justification: 'Why this level was chosen'
mitigates_risks: ['RISK-001'] # If risk profile exists
```
### 5. Validate Coverage
Ensure:
- Every AC has at least one test
- No duplicate coverage across levels
- Critical paths have multiple levels
- Risk mitigations are addressed
## Outputs
### Output 1: Test Design Document
**Save to:** `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md`
```markdown
# Test Design: Story {epic}.{story}
Date: {date}
Designer: Quinn (Test Architect)
## Test Strategy Overview
- Total test scenarios: X
- Unit tests: Y (A%)
- Integration tests: Z (B%)
- E2E tests: W (C%)
- Priority distribution: P0: X, P1: Y, P2: Z
## Test Scenarios by Acceptance Criteria
### AC1: {description}
#### Scenarios
| ID | Level | Priority | Test | Justification |
| ------------ | ----------- | -------- | ------------------------- | ------------------------ |
| 1.3-UNIT-001 | Unit | P0 | Validate input format | Pure validation logic |
| 1.3-INT-001 | Integration | P0 | Service processes request | Multi-component flow |
| 1.3-E2E-001 | E2E | P1 | User completes journey | Critical path validation |
[Continue for all ACs...]
## Risk Coverage
[Map test scenarios to identified risks if risk profile exists]
## Recommended Execution Order
1. P0 Unit tests (fail fast)
2. P0 Integration tests
3. P0 E2E tests
4. P1 tests in order
5. P2+ as time permits
```
### Output 2: Gate YAML Block
Generate for inclusion in quality gate:
```yaml
test_design:
scenarios_total: X
by_level:
unit: Y
integration: Z
e2e: W
by_priority:
p0: A
p1: B
p2: C
coverage_gaps: [] # List any ACs without tests
```
### Output 3: Trace References
Print for use by trace-requirements task:
```text
Test design matrix: docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md
P0 tests identified: {count}
```
## Quality Checklist
Before finalizing, verify:
- [ ] Every AC has test coverage
- [ ] Test levels are appropriate (not over-testing)
- [ ] No duplicate coverage across levels
- [ ] Priorities align with business risk
- [ ] Test IDs follow naming convention
- [ ] Scenarios are atomic and independent
## Key Principles
- **Shift left**: Prefer unit over integration, integration over E2E
- **Risk-based**: Focus on what could go wrong
- **Efficient coverage**: Test once at the right level
- **Maintainability**: Consider long-term test maintenance
- **Fast feedback**: Quick tests run first

View File

@@ -0,0 +1,264 @@
# trace-requirements
Map story requirements to test cases using Given-When-Then patterns for comprehensive traceability.
## Purpose
Create a requirements traceability matrix that ensures every acceptance criterion has corresponding test coverage. This task helps identify gaps in testing and ensures all requirements are validated.
**IMPORTANT**: Given-When-Then is used here for documenting the mapping between requirements and tests, NOT for writing the actual test code. Tests should follow your project's testing standards (no BDD syntax in test code).
## Prerequisites
- Story file with clear acceptance criteria
- Access to test files or test specifications
- Understanding of the implementation
## Traceability Process
### 1. Extract Requirements
Identify all testable requirements from:
- Acceptance Criteria (primary source)
- User story statement
- Tasks/subtasks with specific behaviors
- Non-functional requirements mentioned
- Edge cases documented
### 2. Map to Test Cases
For each requirement, document which tests validate it. Use Given-When-Then to describe what the test validates (not how it's written):
```yaml
requirement: 'AC1: User can login with valid credentials'
test_mappings:
- test_file: 'auth/login.test.ts'
test_case: 'should successfully login with valid email and password'
# Given-When-Then describes WHAT the test validates, not HOW it's coded
given: 'A registered user with valid credentials'
when: 'They submit the login form'
then: 'They are redirected to dashboard and session is created'
coverage: full
- test_file: 'e2e/auth-flow.test.ts'
test_case: 'complete login flow'
given: 'User on login page'
when: 'Entering valid credentials and submitting'
then: 'Dashboard loads with user data'
coverage: integration
```
### 3. Coverage Analysis
Evaluate coverage for each requirement:
**Coverage Levels:**
- `full`: Requirement completely tested
- `partial`: Some aspects tested, gaps exist
- `none`: No test coverage found
- `integration`: Covered in integration/e2e tests only
- `unit`: Covered in unit tests only
### 4. Gap Identification
Document any gaps found:
```yaml
coverage_gaps:
- requirement: 'AC3: Password reset email sent within 60 seconds'
gap: 'No test for email delivery timing'
severity: medium
suggested_test:
type: integration
description: 'Test email service SLA compliance'
- requirement: 'AC5: Support 1000 concurrent users'
gap: 'No load testing implemented'
severity: high
suggested_test:
type: performance
description: 'Load test with 1000 concurrent connections'
```
## Outputs
### Output 1: Gate YAML Block
**Generate for pasting into gate file under `trace`:**
```yaml
trace:
totals:
requirements: X
full: Y
partial: Z
none: W
planning_ref: 'docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md'
uncovered:
- ac: 'AC3'
reason: 'No test found for password reset timing'
notes: 'See docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md'
```
### Output 2: Traceability Report
**Save to:** `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md`
Create a traceability report with:
```markdown
# Requirements Traceability Matrix
## Story: {epic}.{story} - {title}
### Coverage Summary
- Total Requirements: X
- Fully Covered: Y (Z%)
- Partially Covered: A (B%)
- Not Covered: C (D%)
### Requirement Mappings
#### AC1: {Acceptance Criterion 1}
**Coverage: FULL**
Given-When-Then Mappings:
- **Unit Test**: `auth.service.test.ts::validateCredentials`
- Given: Valid user credentials
- When: Validation method called
- Then: Returns true with user object
- **Integration Test**: `auth.integration.test.ts::loginFlow`
- Given: User with valid account
- When: Login API called
- Then: JWT token returned and session created
#### AC2: {Acceptance Criterion 2}
**Coverage: PARTIAL**
[Continue for all ACs...]
### Critical Gaps
1. **Performance Requirements**
- Gap: No load testing for concurrent users
- Risk: High - Could fail under production load
- Action: Implement load tests using k6 or similar
2. **Security Requirements**
- Gap: Rate limiting not tested
- Risk: Medium - Potential DoS vulnerability
- Action: Add rate limit tests to integration suite
### Test Design Recommendations
Based on gaps identified, recommend:
1. Additional test scenarios needed
2. Test types to implement (unit/integration/e2e/performance)
3. Test data requirements
4. Mock/stub strategies
### Risk Assessment
- **High Risk**: Requirements with no coverage
- **Medium Risk**: Requirements with only partial coverage
- **Low Risk**: Requirements with full unit + integration coverage
```
## Traceability Best Practices
### Given-When-Then for Mapping (Not Test Code)
Use Given-When-Then to document what each test validates:
**Given**: The initial context the test sets up
- What state/data the test prepares
- User context being simulated
- System preconditions
**When**: The action the test performs
- What the test executes
- API calls or user actions tested
- Events triggered
**Then**: What the test asserts
- Expected outcomes verified
- State changes checked
- Values validated
**Note**: This is for documentation only. Actual test code follows your project's standards (e.g., describe/it blocks, no BDD syntax).
### Coverage Priority
Prioritize coverage based on:
1. Critical business flows
2. Security-related requirements
3. Data integrity requirements
4. User-facing features
5. Performance SLAs
### Test Granularity
Map at appropriate levels:
- Unit tests for business logic
- Integration tests for component interaction
- E2E tests for user journeys
- Performance tests for NFRs
## Quality Indicators
Good traceability shows:
- Every AC has at least one test
- Critical paths have multiple test levels
- Edge cases are explicitly covered
- NFRs have appropriate test types
- Clear Given-When-Then for each test
## Red Flags
Watch for:
- ACs with no test coverage
- Tests that don't map to requirements
- Vague test descriptions
- Missing edge case coverage
- NFRs without specific tests
## Integration with Gates
This traceability feeds into quality gates:
- Critical gaps → FAIL
- Minor gaps → CONCERNS
- Missing P0 tests from test-design → CONCERNS
### Output 3: Story Hook Line
**Print this line for review task to quote:**
```text
Trace matrix: docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md
```
- Full coverage → PASS contribution
## Key Principles
- Every requirement must be testable
- Use Given-When-Then for clarity
- Identify both presence and absence
- Prioritize based on risk
- Make recommendations actionable

View File

@@ -141,7 +141,14 @@ sections:
title: Feature Comparison Matrix
instruction: Create a detailed comparison table of key features across competitors
type: table
columns: ["Feature Category", "{{your_company}}", "{{competitor_1}}", "{{competitor_2}}", "{{competitor_3}}"]
columns:
[
"Feature Category",
"{{your_company}}",
"{{competitor_1}}",
"{{competitor_2}}",
"{{competitor_3}}",
]
rows:
- category: "Core Functionality"
items:
@@ -153,7 +160,13 @@ sections:
- ["Onboarding Time", "{{time}}", "{{time}}", "{{time}}", "{{time}}"]
- category: "Integration & Ecosystem"
items:
- ["API Availability", "{{availability}}", "{{availability}}", "{{availability}}", "{{availability}}"]
- [
"API Availability",
"{{availability}}",
"{{availability}}",
"{{availability}}",
"{{availability}}",
]
- ["Third-party Integrations", "{{number}}", "{{number}}", "{{number}}", "{{number}}"]
- category: "Pricing & Plans"
items:

View File

@@ -75,12 +75,24 @@ sections:
rows:
- ["Framework", "{{framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["State Management", "{{state_management}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"State Management",
"{{state_management}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["Routing", "{{routing_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Build Tool", "{{build_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Styling", "{{styling_solution}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Testing", "{{test_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Component Library", "{{component_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"Component Library",
"{{component_lib}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["Form Handling", "{{form_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Animation", "{{animation_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Dev Tools", "{{dev_tools}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]

View File

@@ -156,11 +156,29 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
rows:
- ["Frontend Language", "{{fe_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Frontend Framework", "{{fe_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Component Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"Frontend Framework",
"{{fe_framework}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- [
"UI Component Library",
"{{ui_library}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["State Management", "{{state_mgmt}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Language", "{{be_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Framework", "{{be_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"Backend Framework",
"{{be_framework}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["API Style", "{{api_style}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Database", "{{database}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Cache", "{{cache}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]

View File

@@ -0,0 +1,102 @@
template:
id: qa-gate-template-v1
name: Quality Gate Decision
version: 1.0
output:
format: yaml
filename: docs/qa/gates/{{epic_num}}.{{story_num}}-{{story_slug}}.yml
title: "Quality Gate: {{epic_num}}.{{story_num}}"
# Required fields (keep these first)
schema: 1
story: "{{epic_num}}.{{story_num}}"
story_title: "{{story_title}}"
gate: "{{gate_status}}" # PASS|CONCERNS|FAIL|WAIVED
status_reason: "{{status_reason}}" # 1-2 sentence summary of why this gate decision
reviewer: "Quinn (Test Architect)"
updated: "{{iso_timestamp}}"
# Always present but only active when WAIVED
waiver: { active: false }
# Issues (if any) - Use fixed severity: low | medium | high
top_issues: []
# Risk summary (from risk-profile task if run)
risk_summary:
totals: { critical: 0, high: 0, medium: 0, low: 0 }
recommendations:
must_fix: []
monitor: []
# Examples section using block scalars for clarity
examples:
with_issues: |
top_issues:
- id: "SEC-001"
severity: high # ONLY: low|medium|high
finding: "No rate limiting on login endpoint"
suggested_action: "Add rate limiting middleware before production"
- id: "TEST-001"
severity: medium
finding: "Missing integration tests for auth flow"
suggested_action: "Add test coverage for critical paths"
when_waived: |
waiver:
active: true
reason: "Accepted for MVP release - will address in next sprint"
approved_by: "Product Owner"
# ============ Optional Extended Fields ============
# Uncomment and use if your team wants more detail
optional_fields_examples:
quality_and_expiry: |
quality_score: 75 # 0-100 (optional scoring)
expires: "2025-01-26T00:00:00Z" # Optional gate freshness window
evidence: |
evidence:
tests_reviewed: 15
risks_identified: 3
trace:
ac_covered: [1, 2, 3] # AC numbers with test coverage
ac_gaps: [4] # AC numbers lacking coverage
nfr_validation: |
nfr_validation:
security: { status: CONCERNS, notes: "Rate limiting missing" }
performance: { status: PASS, notes: "" }
reliability: { status: PASS, notes: "" }
maintainability: { status: PASS, notes: "" }
history: |
history: # Append-only audit trail
- at: "2025-01-12T10:00:00Z"
gate: FAIL
note: "Initial review - missing tests"
- at: "2025-01-12T15:00:00Z"
gate: CONCERNS
note: "Tests added but rate limiting still missing"
risk_summary: |
risk_summary: # From risk-profile task
totals:
critical: 0
high: 0
medium: 0
low: 0
# 'highest' is emitted only when risks exist
recommendations:
must_fix: []
monitor: []
recommendations: |
recommendations:
immediate: # Must fix before production
- action: "Add rate limiting to auth endpoints"
refs: ["api/auth/login.ts:42-68"]
future: # Can be addressed later
- action: "Consider caching for better performance"
refs: ["services/data.service.ts"]

View File

@@ -9,7 +9,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -22,14 +21,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -38,7 +35,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -46,7 +42,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -60,7 +55,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -70,7 +64,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context

View File

@@ -14,7 +14,7 @@ template:
output:
format: markdown
filename: default-path/to/{{filename}}.md
title: "{{variable}} Document Title"
title: '{{variable}} Document Title'
workflow:
mode: interactive
@@ -108,8 +108,8 @@ sections:
Use `{{variable_name}}` in titles, templates, and content:
```yaml
title: "Epic {{epic_number}} {{epic_title}}"
template: "As a {{user_type}}, I want {{action}}, so that {{benefit}}."
title: 'Epic {{epic_number}} {{epic_title}}'
template: 'As a {{user_type}}, I want {{action}}, so that {{benefit}}.'
```
### Conditional Sections
@@ -212,7 +212,7 @@ choices:
- id: criteria
title: Acceptance Criteria
type: numbered-list
item_template: "{{criterion_number}}: {{criteria}}"
item_template: '{{criterion_number}}: {{criteria}}'
repeatable: true
```
@@ -220,7 +220,7 @@ choices:
````yaml
examples:
- "FR6: The system must authenticate users within 2 seconds"
- 'FR6: The system must authenticate users within 2 seconds'
- |
```mermaid
sequenceDiagram

View File

@@ -106,7 +106,7 @@ dependencies:
==================== START: .bmad-core/tasks/facilitate-brainstorming-session.md ====================
---
docOutputLocation: docs/brainstorming-session-results.md
template: ".bmad-core/templates/brainstorming-output-tmpl.yaml"
template: '.bmad-core/templates/brainstorming-output-tmpl.yaml'
---
# Facilitate Brainstorming Session Task
@@ -266,63 +266,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -491,13 +482,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?
@@ -873,7 +862,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Change Log
| Date | Version | Description | Author |
|------|---------|-------------|--------|
| ------ | ------- | --------------------------- | --------- |
| [Date] | 1.0 | Initial brownfield analysis | [Analyst] |
## Quick Reference - Key Files and Entry Points
@@ -898,7 +887,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Actual Tech Stack (from package.json/requirements.txt)
| Category | Technology | Version | Notes |
|----------|------------|---------|--------|
| --------- | ---------- | ------- | -------------------------- |
| Runtime | Node.js | 16.x | [Any constraints] |
| Framework | Express | 4.18.2 | [Custom middleware?] |
| Database | PostgreSQL | 13 | [Connection pooling setup] |
@@ -940,6 +929,7 @@ project-root/
### Data Models
Instead of duplicating, reference actual model files:
- **User Model**: See `src/models/User.js`
- **Order Model**: See `src/models/Order.js`
- **Related Types**: TypeScript definitions in `src/types/`
@@ -970,7 +960,7 @@ Instead of duplicating, reference actual model files:
### External Services
| Service | Purpose | Integration Type | Key Files |
|---------|---------|------------------|-----------|
| -------- | -------- | ---------------- | ------------------------------ |
| Stripe | Payments | REST API | `src/integrations/stripe/` |
| SendGrid | Emails | SDK | `src/services/emailService.js` |
@@ -1017,6 +1007,7 @@ npm run test:integration # Runs integration tests (requires local DB)
### Files That Will Need Modification
Based on the enhancement requirements, these files will be affected:
- `src/services/userService.js` - Add new user fields
- `src/models/User.js` - Update schema
- `src/routes/userRoutes.js` - New endpoints
@@ -1110,24 +1101,24 @@ template:
output:
format: markdown
filename: docs/brief.md
title: "Project Brief: {{project_name}}"
title: 'Project Brief: {{project_name}}'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Project Brief Elicitation Actions"
title: 'Project Brief Elicitation Actions'
options:
- "Expand section with more specific details"
- "Validate against similar successful products"
- "Stress test assumptions with edge cases"
- "Explore alternative solution approaches"
- "Analyze resource/constraint trade-offs"
- "Generate risk mitigation strategies"
- "Challenge scope from MVP minimalist view"
- "Brainstorm creative feature possibilities"
- "If only we had [resource/capability/time]..."
- "Proceed to next section"
- 'Expand section with more specific details'
- 'Validate against similar successful products'
- 'Stress test assumptions with edge cases'
- 'Explore alternative solution approaches'
- 'Analyze resource/constraint trade-offs'
- 'Generate risk mitigation strategies'
- 'Challenge scope from MVP minimalist view'
- 'Brainstorm creative feature possibilities'
- 'If only we had [resource/capability/time]...'
- 'Proceed to next section'
sections:
- id: introduction
@@ -1149,7 +1140,7 @@ sections:
- Primary problem being solved
- Target market identification
- Key value proposition
template: "{{executive_summary_content}}"
template: '{{executive_summary_content}}'
- id: problem-statement
title: Problem Statement
@@ -1159,7 +1150,7 @@ sections:
- Impact of the problem (quantify if possible)
- Why existing solutions fall short
- Urgency and importance of solving this now
template: "{{detailed_problem_description}}"
template: '{{detailed_problem_description}}'
- id: proposed-solution
title: Proposed Solution
@@ -1169,7 +1160,7 @@ sections:
- Key differentiators from existing solutions
- Why this solution will succeed where others haven't
- High-level vision for the product
template: "{{solution_description}}"
template: '{{solution_description}}'
- id: target-users
title: Target Users
@@ -1181,12 +1172,12 @@ sections:
- Goals they're trying to achieve
sections:
- id: primary-segment
title: "Primary User Segment: {{segment_name}}"
template: "{{primary_user_description}}"
title: 'Primary User Segment: {{segment_name}}'
template: '{{primary_user_description}}'
- id: secondary-segment
title: "Secondary User Segment: {{segment_name}}"
title: 'Secondary User Segment: {{segment_name}}'
condition: Has secondary user segment
template: "{{secondary_user_description}}"
template: '{{secondary_user_description}}'
- id: goals-metrics
title: Goals & Success Metrics
@@ -1195,15 +1186,15 @@ sections:
- id: business-objectives
title: Business Objectives
type: bullet-list
template: "- {{objective_with_metric}}"
template: '- {{objective_with_metric}}'
- id: user-success-metrics
title: User Success Metrics
type: bullet-list
template: "- {{user_metric}}"
template: '- {{user_metric}}'
- id: kpis
title: Key Performance Indicators (KPIs)
type: bullet-list
template: "- {{kpi}}: {{definition_and_target}}"
template: '- {{kpi}}: {{definition_and_target}}'
- id: mvp-scope
title: MVP Scope
@@ -1212,14 +1203,14 @@ sections:
- id: core-features
title: Core Features (Must Have)
type: bullet-list
template: "- **{{feature}}:** {{description_and_rationale}}"
template: '- **{{feature}}:** {{description_and_rationale}}'
- id: out-of-scope
title: Out of Scope for MVP
type: bullet-list
template: "- {{feature_or_capability}}"
template: '- {{feature_or_capability}}'
- id: mvp-success-criteria
title: MVP Success Criteria
template: "{{mvp_success_definition}}"
template: '{{mvp_success_definition}}'
- id: post-mvp-vision
title: Post-MVP Vision
@@ -1227,13 +1218,13 @@ sections:
sections:
- id: phase-2-features
title: Phase 2 Features
template: "{{next_priority_features}}"
template: '{{next_priority_features}}'
- id: long-term-vision
title: Long-term Vision
template: "{{one_two_year_vision}}"
template: '{{one_two_year_vision}}'
- id: expansion-opportunities
title: Expansion Opportunities
template: "{{potential_expansions}}"
template: '{{potential_expansions}}'
- id: technical-considerations
title: Technical Considerations
@@ -1274,7 +1265,7 @@ sections:
- id: key-assumptions
title: Key Assumptions
type: bullet-list
template: "- {{assumption}}"
template: '- {{assumption}}'
- id: risks-questions
title: Risks & Open Questions
@@ -1283,15 +1274,15 @@ sections:
- id: key-risks
title: Key Risks
type: bullet-list
template: "- **{{risk}}:** {{description_and_impact}}"
template: '- **{{risk}}:** {{description_and_impact}}'
- id: open-questions
title: Open Questions
type: bullet-list
template: "- {{question}}"
template: '- {{question}}'
- id: research-areas
title: Areas Needing Further Research
type: bullet-list
template: "- {{research_topic}}"
template: '- {{research_topic}}'
- id: appendices
title: Appendices
@@ -1308,10 +1299,10 @@ sections:
- id: stakeholder-input
title: B. Stakeholder Input
condition: Has stakeholder feedback
template: "{{stakeholder_feedback}}"
template: '{{stakeholder_feedback}}'
- id: references
title: C. References
template: "{{relevant_links_and_docs}}"
template: '{{relevant_links_and_docs}}'
- id: next-steps
title: Next Steps
@@ -1319,7 +1310,7 @@ sections:
- id: immediate-actions
title: Immediate Actions
type: numbered-list
template: "{{action_item}}"
template: '{{action_item}}'
- id: pm-handoff
title: PM Handoff
content: |
@@ -1334,24 +1325,24 @@ template:
output:
format: markdown
filename: docs/market-research.md
title: "Market Research Report: {{project_product_name}}"
title: 'Market Research Report: {{project_product_name}}'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Market Research Elicitation Actions"
title: 'Market Research Elicitation Actions'
options:
- "Expand market sizing calculations with sensitivity analysis"
- "Deep dive into a specific customer segment"
- "Analyze an emerging market trend in detail"
- "Compare this market to an analogous market"
- "Stress test market assumptions"
- "Explore adjacent market opportunities"
- "Challenge market definition and boundaries"
- "Generate strategic scenarios (best/base/worst case)"
- "If only we had considered [X market factor]..."
- "Proceed to next section"
- 'Expand market sizing calculations with sensitivity analysis'
- 'Deep dive into a specific customer segment'
- 'Analyze an emerging market trend in detail'
- 'Compare this market to an analogous market'
- 'Stress test market assumptions'
- 'Explore adjacent market opportunities'
- 'Challenge market definition and boundaries'
- 'Generate strategic scenarios (best/base/worst case)'
- 'If only we had considered [X market factor]...'
- 'Proceed to next section'
sections:
- id: executive-summary
@@ -1433,7 +1424,7 @@ sections:
repeatable: true
sections:
- id: segment
title: "Segment {{segment_number}}: {{segment_name}}"
title: 'Segment {{segment_number}}: {{segment_name}}'
template: |
- **Description:** {{brief_overview}}
- **Size:** {{number_of_customers_market_value}}
@@ -1502,20 +1493,20 @@ sections:
instruction: Analyze each force with specific evidence and implications
sections:
- id: supplier-power
title: "Supplier Power: {{power_level}}"
template: "{{analysis_and_implications}}"
title: 'Supplier Power: {{power_level}}'
template: '{{analysis_and_implications}}'
- id: buyer-power
title: "Buyer Power: {{power_level}}"
template: "{{analysis_and_implications}}"
title: 'Buyer Power: {{power_level}}'
template: '{{analysis_and_implications}}'
- id: competitive-rivalry
title: "Competitive Rivalry: {{intensity_level}}"
template: "{{analysis_and_implications}}"
title: 'Competitive Rivalry: {{intensity_level}}'
template: '{{analysis_and_implications}}'
- id: threat-new-entry
title: "Threat of New Entry: {{threat_level}}"
template: "{{analysis_and_implications}}"
title: 'Threat of New Entry: {{threat_level}}'
template: '{{analysis_and_implications}}'
- id: threat-substitutes
title: "Threat of Substitutes: {{threat_level}}"
template: "{{analysis_and_implications}}"
title: 'Threat of Substitutes: {{threat_level}}'
template: '{{analysis_and_implications}}'
- id: adoption-lifecycle
title: Technology Adoption Lifecycle Stage
instruction: |
@@ -1533,7 +1524,7 @@ sections:
repeatable: true
sections:
- id: opportunity
title: "Opportunity {{opportunity_number}}: {{name}}"
title: 'Opportunity {{opportunity_number}}: {{name}}'
template: |
- **Description:** {{what_is_the_opportunity}}
- **Size/Potential:** {{quantified_potential}}
@@ -1589,24 +1580,24 @@ template:
output:
format: markdown
filename: docs/competitor-analysis.md
title: "Competitive Analysis Report: {{project_product_name}}"
title: 'Competitive Analysis Report: {{project_product_name}}'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Competitive Analysis Elicitation Actions"
title: 'Competitive Analysis Elicitation Actions'
options:
- "Deep dive on a specific competitor's strategy"
- "Analyze competitive dynamics in a specific segment"
- "War game competitive responses to your moves"
- "Explore partnership vs. competition scenarios"
- "Stress test differentiation claims"
- "Analyze disruption potential (yours or theirs)"
- "Compare to competition in adjacent markets"
- "Generate win/loss analysis insights"
- 'Analyze competitive dynamics in a specific segment'
- 'War game competitive responses to your moves'
- 'Explore partnership vs. competition scenarios'
- 'Stress test differentiation claims'
- 'Analyze disruption potential (yours or theirs)'
- 'Compare to competition in adjacent markets'
- 'Generate win/loss analysis insights'
- "If only we had known about [competitor X's plan]..."
- "Proceed to next section"
- 'Proceed to next section'
sections:
- id: executive-summary
@@ -1673,7 +1664,7 @@ sections:
repeatable: true
sections:
- id: competitor
title: "{{competitor_name}} - Priority {{priority_level}}"
title: '{{competitor_name}} - Priority {{priority_level}}'
sections:
- id: company-overview
title: Company Overview
@@ -1705,11 +1696,11 @@ sections:
- id: strengths
title: Strengths
type: bullet-list
template: "- {{strength}}"
template: '- {{strength}}'
- id: weaknesses
title: Weaknesses
type: bullet-list
template: "- {{weakness}}"
template: '- {{weakness}}'
- id: market-position
title: Market Position & Performance
template: |
@@ -1725,24 +1716,37 @@ sections:
title: Feature Comparison Matrix
instruction: Create a detailed comparison table of key features across competitors
type: table
columns: ["Feature Category", "{{your_company}}", "{{competitor_1}}", "{{competitor_2}}", "{{competitor_3}}"]
columns:
[
'Feature Category',
'{{your_company}}',
'{{competitor_1}}',
'{{competitor_2}}',
'{{competitor_3}}',
]
rows:
- category: "Core Functionality"
- category: 'Core Functionality'
items:
- ["Feature A", "{{status}}", "{{status}}", "{{status}}", "{{status}}"]
- ["Feature B", "{{status}}", "{{status}}", "{{status}}", "{{status}}"]
- category: "User Experience"
- ['Feature A', '{{status}}', '{{status}}', '{{status}}', '{{status}}']
- ['Feature B', '{{status}}', '{{status}}', '{{status}}', '{{status}}']
- category: 'User Experience'
items:
- ["Mobile App", "{{rating}}", "{{rating}}", "{{rating}}", "{{rating}}"]
- ["Onboarding Time", "{{time}}", "{{time}}", "{{time}}", "{{time}}"]
- category: "Integration & Ecosystem"
- ['Mobile App', '{{rating}}', '{{rating}}', '{{rating}}', '{{rating}}']
- ['Onboarding Time', '{{time}}', '{{time}}', '{{time}}', '{{time}}']
- category: 'Integration & Ecosystem'
items:
- ["API Availability", "{{availability}}", "{{availability}}", "{{availability}}", "{{availability}}"]
- ["Third-party Integrations", "{{number}}", "{{number}}", "{{number}}", "{{number}}"]
- category: "Pricing & Plans"
- [
'API Availability',
'{{availability}}',
'{{availability}}',
'{{availability}}',
'{{availability}}',
]
- ['Third-party Integrations', '{{number}}', '{{number}}', '{{number}}', '{{number}}']
- category: 'Pricing & Plans'
items:
- ["Starting Price", "{{price}}", "{{price}}", "{{price}}", "{{price}}"]
- ["Free Tier", "{{yes_no}}", "{{yes_no}}", "{{yes_no}}", "{{yes_no}}"]
- ['Starting Price', '{{price}}', '{{price}}', '{{price}}', '{{price}}']
- ['Free Tier', '{{yes_no}}', '{{yes_no}}', '{{yes_no}}', '{{yes_no}}']
- id: swot-comparison
title: SWOT Comparison
instruction: Create SWOT analysis for your solution vs. top competitors
@@ -1755,7 +1759,7 @@ sections:
- **Opportunities:** {{opportunities}}
- **Threats:** {{threats}}
- id: vs-competitor
title: "vs. {{main_competitor}}"
title: 'vs. {{main_competitor}}'
template: |
- **Competitive Advantages:** {{your_advantages}}
- **Competitive Disadvantages:** {{their_advantages}}
@@ -1885,7 +1889,7 @@ template:
output:
format: markdown
filename: docs/brainstorming-session-results.md
title: "Brainstorming Session Results"
title: 'Brainstorming Session Results'
workflow:
mode: non-interactive
@@ -1910,38 +1914,38 @@ sections:
**Total Ideas Generated:** {{total_ideas}}
- id: key-themes
title: "Key Themes Identified:"
title: 'Key Themes Identified:'
type: bullet-list
template: "- {{theme}}"
template: '- {{theme}}'
- id: technique-sessions
title: Technique Sessions
repeatable: true
sections:
- id: technique
title: "{{technique_name}} - {{duration}}"
title: '{{technique_name}} - {{duration}}'
sections:
- id: description
template: "**Description:** {{technique_description}}"
template: '**Description:** {{technique_description}}'
- id: ideas-generated
title: "Ideas Generated:"
title: 'Ideas Generated:'
type: numbered-list
template: "{{idea}}"
template: '{{idea}}'
- id: insights
title: "Insights Discovered:"
title: 'Insights Discovered:'
type: bullet-list
template: "- {{insight}}"
template: '- {{insight}}'
- id: connections
title: "Notable Connections:"
title: 'Notable Connections:'
type: bullet-list
template: "- {{connection}}"
template: '- {{connection}}'
- id: idea-categorization
title: Idea Categorization
sections:
- id: immediate-opportunities
title: Immediate Opportunities
content: "*Ideas ready to implement now*"
content: '*Ideas ready to implement now*'
repeatable: true
type: numbered-list
template: |
@@ -1951,7 +1955,7 @@ sections:
- Resources needed: {{requirements}}
- id: future-innovations
title: Future Innovations
content: "*Ideas requiring development/research*"
content: '*Ideas requiring development/research*'
repeatable: true
type: numbered-list
template: |
@@ -1961,7 +1965,7 @@ sections:
- Timeline estimate: {{timeline}}
- id: moonshots
title: Moonshots
content: "*Ambitious, transformative concepts*"
content: '*Ambitious, transformative concepts*'
repeatable: true
type: numbered-list
template: |
@@ -1971,9 +1975,9 @@ sections:
- Challenges to overcome: {{challenges}}
- id: insights-learnings
title: Insights & Learnings
content: "*Key realizations from the session*"
content: '*Key realizations from the session*'
type: bullet-list
template: "- {{insight}}: {{description_and_implications}}"
template: '- {{insight}}: {{description_and_implications}}'
- id: action-planning
title: Action Planning
@@ -1982,21 +1986,21 @@ sections:
title: Top 3 Priority Ideas
sections:
- id: priority-1
title: "#1 Priority: {{idea_name}}"
title: '#1 Priority: {{idea_name}}'
template: |
- Rationale: {{rationale}}
- Next steps: {{next_steps}}
- Resources needed: {{resources}}
- Timeline: {{timeline}}
- id: priority-2
title: "#2 Priority: {{idea_name}}"
title: '#2 Priority: {{idea_name}}'
template: |
- Rationale: {{rationale}}
- Next steps: {{next_steps}}
- Resources needed: {{resources}}
- Timeline: {{timeline}}
- id: priority-3
title: "#3 Priority: {{idea_name}}"
title: '#3 Priority: {{idea_name}}'
template: |
- Rationale: {{rationale}}
- Next steps: {{next_steps}}
@@ -2009,19 +2013,19 @@ sections:
- id: what-worked
title: What Worked Well
type: bullet-list
template: "- {{aspect}}"
template: '- {{aspect}}'
- id: areas-exploration
title: Areas for Further Exploration
type: bullet-list
template: "- {{area}}: {{reason}}"
template: '- {{area}}: {{reason}}'
- id: recommended-techniques
title: Recommended Follow-up Techniques
type: bullet-list
template: "- {{technique}}: {{reason}}"
template: '- {{technique}}: {{reason}}'
- id: questions-emerged
title: Questions That Emerged
type: bullet-list
template: "- {{question}}"
template: '- {{question}}'
- id: next-session
title: Next Session Planning
template: |
@@ -2337,7 +2341,7 @@ You are the "Vibe CEO" - thinking like a CEO with unlimited resources and a sing
- **Claude Code**: `/agent-name` (e.g., `/bmad-master`)
- **Cursor**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `/agent-name` (e.g., `/bmad-master`)
- **Trae**: `@agent-name` (e.g., `@bmad-master`)
- **Roo Code**: Select mode from mode selector (e.g., `bmad-master`)
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select **Agent** from the chat mode selector.
@@ -2690,8 +2694,11 @@ Templates with Level 2 headings (`##`) can be automatically sharded:
```markdown
## Goals and Background Context
## Requirements
## User Interface Design Goals
## Success Metrics
```

View File

@@ -233,63 +233,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -458,13 +449,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?
@@ -616,7 +605,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Change Log
| Date | Version | Description | Author |
|------|---------|-------------|--------|
| ------ | ------- | --------------------------- | --------- |
| [Date] | 1.0 | Initial brownfield analysis | [Analyst] |
## Quick Reference - Key Files and Entry Points
@@ -641,7 +630,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Actual Tech Stack (from package.json/requirements.txt)
| Category | Technology | Version | Notes |
|----------|------------|---------|--------|
| --------- | ---------- | ------- | -------------------------- |
| Runtime | Node.js | 16.x | [Any constraints] |
| Framework | Express | 4.18.2 | [Custom middleware?] |
| Database | PostgreSQL | 13 | [Connection pooling setup] |
@@ -683,6 +672,7 @@ project-root/
### Data Models
Instead of duplicating, reference actual model files:
- **User Model**: See `src/models/User.js`
- **Order Model**: See `src/models/Order.js`
- **Related Types**: TypeScript definitions in `src/types/`
@@ -713,7 +703,7 @@ Instead of duplicating, reference actual model files:
### External Services
| Service | Purpose | Integration Type | Key Files |
|---------|---------|------------------|-----------|
| -------- | -------- | ---------------- | ------------------------------ |
| Stripe | Payments | REST API | `src/integrations/stripe/` |
| SendGrid | Emails | SDK | `src/services/emailService.js` |
@@ -760,6 +750,7 @@ npm run test:integration # Runs integration tests (requires local DB)
### Files That Will Need Modification
Based on the enhancement requirements, these files will be affected:
- `src/services/userService.js` - Add new user fields
- `src/models/User.js` - Update schema
- `src/routes/userRoutes.js` - New endpoints
@@ -857,7 +848,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -870,14 +860,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -886,7 +874,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -894,7 +881,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -908,7 +894,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -918,7 +903,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -949,7 +933,7 @@ template:
output:
format: markdown
filename: docs/architecture.md
title: "{{project_name}} Architecture Document"
title: '{{project_name}} Architecture Document'
workflow:
mode: interactive
@@ -1060,11 +1044,11 @@ sections:
- Code organization patterns (Dependency Injection, Repository, Module, Factory)
- Data patterns (Event Sourcing, Saga, Database per Service)
- Communication patterns (REST, GraphQL, Message Queue, Pub/Sub)
template: "- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}"
template: '- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}'
examples:
- "**Serverless Architecture:** Using AWS Lambda for compute - _Rationale:_ Aligns with PRD requirement for cost optimization and automatic scaling"
- "**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility"
- "**Event-Driven Communication:** Using SNS/SQS for service decoupling - _Rationale:_ Supports async processing and system resilience"
- '**Serverless Architecture:** Using AWS Lambda for compute - _Rationale:_ Aligns with PRD requirement for cost optimization and automatic scaling'
- '**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility'
- '**Event-Driven Communication:** Using SNS/SQS for service decoupling - _Rationale:_ Supports async processing and system resilience'
- id: tech-stack
title: Tech Stack
@@ -1102,9 +1086,9 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
instruction: Populate the technology stack table with all relevant technologies
examples:
- "| **Language** | TypeScript | 5.3.3 | Primary development language | Strong typing, excellent tooling, team expertise |"
- "| **Runtime** | Node.js | 20.11.0 | JavaScript runtime | LTS version, stable performance, wide ecosystem |"
- "| **Framework** | NestJS | 10.3.2 | Backend framework | Enterprise-ready, good DI, matches team patterns |"
- '| **Language** | TypeScript | 5.3.3 | Primary development language | Strong typing, excellent tooling, team expertise |'
- '| **Runtime** | Node.js | 20.11.0 | JavaScript runtime | LTS version, stable performance, wide ecosystem |'
- '| **Framework** | NestJS | 10.3.2 | Backend framework | Enterprise-ready, good DI, matches team patterns |'
- id: data-models
title: Data Models
@@ -1122,7 +1106,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
@@ -1153,7 +1137,7 @@ sections:
sections:
- id: component-list
repeatable: true
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Responsibility:** {{component_description}}
@@ -1191,7 +1175,7 @@ sections:
repeatable: true
sections:
- id: api
title: "{{api_name}} API"
title: '{{api_name}} API'
template: |
- **Purpose:** {{api_purpose}}
- **Documentation:** {{api_docs_url}}
@@ -1316,12 +1300,12 @@ sections:
- id: environments
title: Environments
repeatable: true
template: "- **{{env_name}}:** {{env_purpose}} - {{env_details}}"
template: '- **{{env_name}}:** {{env_purpose}} - {{env_details}}'
- id: promotion-flow
title: Environment Promotion Flow
type: code
language: text
template: "{{promotion_flow_diagram}}"
template: '{{promotion_flow_diagram}}'
- id: rollback-strategy
title: Rollback Strategy
template: |
@@ -1417,16 +1401,16 @@ sections:
Avoid obvious rules like "use SOLID principles" or "write clean code"
repeatable: true
template: "- **{{rule_name}}:** {{rule_description}}"
template: '- **{{rule_name}}:** {{rule_description}}'
- id: language-specifics
title: Language-Specific Guidelines
condition: Critical language-specific rules needed
instruction: Add ONLY if critical for preventing AI mistakes. Most teams don't need this section.
sections:
- id: language-rules
title: "{{language_name}} Specifics"
title: '{{language_name}} Specifics'
repeatable: true
template: "- **{{rule_topic}}:** {{rule_detail}}"
template: '- **{{rule_topic}}:** {{rule_detail}}'
- id: test-strategy
title: Test Strategy and Standards
@@ -1474,9 +1458,9 @@ sections:
- **Test Infrastructure:**
- **{{dependency_name}}:** {{test_approach}} ({{test_tool}})
examples:
- "**Database:** In-memory H2 for unit tests, Testcontainers PostgreSQL for integration"
- "**Message Queue:** Embedded Kafka for tests"
- "**External APIs:** WireMock for stubbing"
- '**Database:** In-memory H2 for unit tests, Testcontainers PostgreSQL for integration'
- '**Message Queue:** Embedded Kafka for tests'
- '**External APIs:** WireMock for stubbing'
- id: e2e-tests
title: End-to-End Tests
template: |
@@ -1602,7 +1586,7 @@ template:
output:
format: markdown
filename: docs/ui-architecture.md
title: "{{project_name}} Frontend Architecture Document"
title: '{{project_name}} Frontend Architecture Document'
workflow:
mode: interactive
@@ -1670,17 +1654,29 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
instruction: Fill in appropriate technology choices based on the selected framework and project requirements.
rows:
- ["Framework", "{{framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["State Management", "{{state_management}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Routing", "{{routing_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Build Tool", "{{build_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Styling", "{{styling_solution}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Testing", "{{test_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Component Library", "{{component_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Form Handling", "{{form_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Animation", "{{animation_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Dev Tools", "{{dev_tools}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ['Framework', '{{framework}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['UI Library', '{{ui_library}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'State Management',
'{{state_management}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['Routing', '{{routing_library}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Build Tool', '{{build_tool}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Styling', '{{styling_solution}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Testing', '{{test_framework}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'Component Library',
'{{component_lib}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['Form Handling', '{{form_library}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Animation', '{{animation_lib}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Dev Tools', '{{dev_tools}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- id: project-structure
title: Project Structure
@@ -1774,12 +1770,12 @@ sections:
title: Testing Best Practices
type: numbered-list
items:
- "**Unit Tests**: Test individual components in isolation"
- "**Integration Tests**: Test component interactions"
- "**E2E Tests**: Test critical user flows (using Cypress/Playwright)"
- "**Coverage Goals**: Aim for 80% code coverage"
- "**Test Structure**: Arrange-Act-Assert pattern"
- "**Mock External Dependencies**: API calls, routing, state management"
- '**Unit Tests**: Test individual components in isolation'
- '**Integration Tests**: Test component interactions'
- '**E2E Tests**: Test critical user flows (using Cypress/Playwright)'
- '**Coverage Goals**: Aim for 80% code coverage'
- '**Test Structure**: Arrange-Act-Assert pattern'
- '**Mock External Dependencies**: API calls, routing, state management'
- id: environment-configuration
title: Environment Configuration
@@ -1811,7 +1807,7 @@ template:
output:
format: markdown
filename: docs/architecture.md
title: "{{project_name}} Fullstack Architecture Document"
title: '{{project_name}} Fullstack Architecture Document'
workflow:
mode: interactive
@@ -1932,12 +1928,12 @@ sections:
For each pattern, provide recommendation and rationale.
repeatable: true
template: "- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}"
template: '- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}'
examples:
- "**Jamstack Architecture:** Static site generation with serverless APIs - _Rationale:_ Optimal performance and scalability for content-heavy applications"
- "**Component-Based UI:** Reusable React components with TypeScript - _Rationale:_ Maintainability and type safety across large codebases"
- "**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility"
- "**API Gateway Pattern:** Single entry point for all API calls - _Rationale:_ Centralized auth, rate limiting, and monitoring"
- '**Jamstack Architecture:** Static site generation with serverless APIs - _Rationale:_ Optimal performance and scalability for content-heavy applications'
- '**Component-Based UI:** Reusable React components with TypeScript - _Rationale:_ Maintainability and type safety across large codebases'
- '**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility'
- '**API Gateway Pattern:** Single entry point for all API calls - _Rationale:_ Centralized auth, rate limiting, and monitoring'
- id: tech-stack
title: Tech Stack
@@ -1961,27 +1957,45 @@ sections:
type: table
columns: [Category, Technology, Version, Purpose, Rationale]
rows:
- ["Frontend Language", "{{fe_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Frontend Framework", "{{fe_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Component Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["State Management", "{{state_mgmt}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Language", "{{be_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Framework", "{{be_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["API Style", "{{api_style}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Database", "{{database}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Cache", "{{cache}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["File Storage", "{{storage}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Authentication", "{{auth}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Frontend Testing", "{{fe_test}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Testing", "{{be_test}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["E2E Testing", "{{e2e_test}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Build Tool", "{{build_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Bundler", "{{bundler}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["IaC Tool", "{{iac_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["CI/CD", "{{cicd}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Monitoring", "{{monitoring}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Logging", "{{logging}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["CSS Framework", "{{css_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ['Frontend Language', '{{fe_language}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'Frontend Framework',
'{{fe_framework}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- [
'UI Component Library',
'{{ui_library}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['State Management', '{{state_mgmt}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Backend Language', '{{be_language}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'Backend Framework',
'{{be_framework}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['API Style', '{{api_style}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Database', '{{database}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Cache', '{{cache}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['File Storage', '{{storage}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Authentication', '{{auth}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Frontend Testing', '{{fe_test}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Backend Testing', '{{be_test}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['E2E Testing', '{{e2e_test}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Build Tool', '{{build_tool}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Bundler', '{{bundler}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['IaC Tool', '{{iac_tool}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['CI/CD', '{{cicd}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Monitoring', '{{monitoring}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Logging', '{{logging}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['CSS Framework', '{{css_framework}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- id: data-models
title: Data Models
@@ -2000,7 +2014,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
@@ -2012,11 +2026,11 @@ sections:
title: TypeScript Interface
type: code
language: typescript
template: "{{model_interface}}"
template: '{{model_interface}}'
- id: relationships
title: Relationships
type: bullet-list
template: "- {{relationship}}"
template: '- {{relationship}}'
- id: api-spec
title: API Specification
@@ -2053,13 +2067,13 @@ sections:
condition: API style is GraphQL
type: code
language: graphql
template: "{{graphql_schema}}"
template: '{{graphql_schema}}'
- id: trpc-api
title: tRPC Router Definitions
condition: API style is tRPC
type: code
language: typescript
template: "{{trpc_routers}}"
template: '{{trpc_routers}}'
- id: components
title: Components
@@ -2080,7 +2094,7 @@ sections:
sections:
- id: component-list
repeatable: true
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Responsibility:** {{component_description}}
@@ -2118,7 +2132,7 @@ sections:
repeatable: true
sections:
- id: api
title: "{{api_name}} API"
title: '{{api_name}} API'
template: |
- **Purpose:** {{api_purpose}}
- **Documentation:** {{api_docs_url}}
@@ -2175,12 +2189,12 @@ sections:
title: Component Organization
type: code
language: text
template: "{{component_structure}}"
template: '{{component_structure}}'
- id: component-template
title: Component Template
type: code
language: typescript
template: "{{component_template}}"
template: '{{component_template}}'
- id: state-management
title: State Management Architecture
instruction: Detail state management approach based on chosen solution.
@@ -2189,11 +2203,11 @@ sections:
title: State Structure
type: code
language: typescript
template: "{{state_structure}}"
template: '{{state_structure}}'
- id: state-patterns
title: State Management Patterns
type: bullet-list
template: "- {{pattern}}"
template: '- {{pattern}}'
- id: routing-architecture
title: Routing Architecture
instruction: Define routing structure based on framework choice.
@@ -2202,12 +2216,12 @@ sections:
title: Route Organization
type: code
language: text
template: "{{route_structure}}"
template: '{{route_structure}}'
- id: protected-routes
title: Protected Route Pattern
type: code
language: typescript
template: "{{protected_route_example}}"
template: '{{protected_route_example}}'
- id: frontend-services
title: Frontend Services Layer
instruction: Define how frontend communicates with backend.
@@ -2216,12 +2230,12 @@ sections:
title: API Client Setup
type: code
language: typescript
template: "{{api_client_setup}}"
template: '{{api_client_setup}}'
- id: service-example
title: Service Example
type: code
language: typescript
template: "{{service_example}}"
template: '{{service_example}}'
- id: backend-architecture
title: Backend Architecture
@@ -2239,12 +2253,12 @@ sections:
title: Function Organization
type: code
language: text
template: "{{function_structure}}"
template: '{{function_structure}}'
- id: function-template
title: Function Template
type: code
language: typescript
template: "{{function_template}}"
template: '{{function_template}}'
- id: traditional-server
condition: Traditional server architecture chosen
sections:
@@ -2252,12 +2266,12 @@ sections:
title: Controller/Route Organization
type: code
language: text
template: "{{controller_structure}}"
template: '{{controller_structure}}'
- id: controller-template
title: Controller Template
type: code
language: typescript
template: "{{controller_template}}"
template: '{{controller_template}}'
- id: database-architecture
title: Database Architecture
instruction: Define database schema and access patterns.
@@ -2266,12 +2280,12 @@ sections:
title: Schema Design
type: code
language: sql
template: "{{database_schema}}"
template: '{{database_schema}}'
- id: data-access-layer
title: Data Access Layer
type: code
language: typescript
template: "{{repository_pattern}}"
template: '{{repository_pattern}}'
- id: auth-architecture
title: Authentication and Authorization
instruction: Define auth implementation details.
@@ -2280,12 +2294,12 @@ sections:
title: Auth Flow
type: mermaid
mermaid_type: sequence
template: "{{auth_flow_diagram}}"
template: '{{auth_flow_diagram}}'
- id: auth-middleware
title: Middleware/Guards
type: code
language: typescript
template: "{{auth_middleware}}"
template: '{{auth_middleware}}'
- id: unified-project-structure
title: Unified Project Structure
@@ -2361,12 +2375,12 @@ sections:
title: Prerequisites
type: code
language: bash
template: "{{prerequisites_commands}}"
template: '{{prerequisites_commands}}'
- id: initial-setup
title: Initial Setup
type: code
language: bash
template: "{{setup_commands}}"
template: '{{setup_commands}}'
- id: dev-commands
title: Development Commands
type: code
@@ -2422,15 +2436,15 @@ sections:
title: CI/CD Pipeline
type: code
language: yaml
template: "{{cicd_pipeline_config}}"
template: '{{cicd_pipeline_config}}'
- id: environments
title: Environments
type: table
columns: [Environment, Frontend URL, Backend URL, Purpose]
rows:
- ["Development", "{{dev_fe_url}}", "{{dev_be_url}}", "Local development"]
- ["Staging", "{{staging_fe_url}}", "{{staging_be_url}}", "Pre-production testing"]
- ["Production", "{{prod_fe_url}}", "{{prod_be_url}}", "Live environment"]
- ['Development', '{{dev_fe_url}}', '{{dev_be_url}}', 'Local development']
- ['Staging', '{{staging_fe_url}}', '{{staging_be_url}}', 'Pre-production testing']
- ['Production', '{{prod_fe_url}}', '{{prod_be_url}}', 'Live environment']
- id: security-performance
title: Security and Performance
@@ -2489,17 +2503,17 @@ sections:
title: Frontend Tests
type: code
language: text
template: "{{frontend_test_structure}}"
template: '{{frontend_test_structure}}'
- id: backend-tests
title: Backend Tests
type: code
language: text
template: "{{backend_test_structure}}"
template: '{{backend_test_structure}}'
- id: e2e-tests
title: E2E Tests
type: code
language: text
template: "{{e2e_test_structure}}"
template: '{{e2e_test_structure}}'
- id: test-examples
title: Test Examples
sections:
@@ -2507,17 +2521,17 @@ sections:
title: Frontend Component Test
type: code
language: typescript
template: "{{frontend_test_example}}"
template: '{{frontend_test_example}}'
- id: backend-test
title: Backend API Test
type: code
language: typescript
template: "{{backend_test_example}}"
template: '{{backend_test_example}}'
- id: e2e-test
title: E2E Test
type: code
language: typescript
template: "{{e2e_test_example}}"
template: '{{e2e_test_example}}'
- id: coding-standards
title: Coding Standards
@@ -2527,22 +2541,22 @@ sections:
- id: critical-rules
title: Critical Fullstack Rules
repeatable: true
template: "- **{{rule_name}}:** {{rule_description}}"
template: '- **{{rule_name}}:** {{rule_description}}'
examples:
- "**Type Sharing:** Always define types in packages/shared and import from there"
- "**API Calls:** Never make direct HTTP calls - use the service layer"
- "**Environment Variables:** Access only through config objects, never process.env directly"
- "**Error Handling:** All API routes must use the standard error handler"
- "**State Updates:** Never mutate state directly - use proper state management patterns"
- '**Type Sharing:** Always define types in packages/shared and import from there'
- '**API Calls:** Never make direct HTTP calls - use the service layer'
- '**Environment Variables:** Access only through config objects, never process.env directly'
- '**Error Handling:** All API routes must use the standard error handler'
- '**State Updates:** Never mutate state directly - use proper state management patterns'
- id: naming-conventions
title: Naming Conventions
type: table
columns: [Element, Frontend, Backend, Example]
rows:
- ["Components", "PascalCase", "-", "`UserProfile.tsx`"]
- ["Hooks", "camelCase with 'use'", "-", "`useAuth.ts`"]
- ["API Routes", "-", "kebab-case", "`/api/user-profile`"]
- ["Database Tables", "-", "snake_case", "`user_profiles`"]
- ['Components', 'PascalCase', '-', '`UserProfile.tsx`']
- ['Hooks', "camelCase with 'use'", '-', '`useAuth.ts`']
- ['API Routes', '-', 'kebab-case', '`/api/user-profile`']
- ['Database Tables', '-', 'snake_case', '`user_profiles`']
- id: error-handling
title: Error Handling Strategy
@@ -2553,7 +2567,7 @@ sections:
title: Error Flow
type: mermaid
mermaid_type: sequence
template: "{{error_flow_diagram}}"
template: '{{error_flow_diagram}}'
- id: error-format
title: Error Response Format
type: code
@@ -2572,12 +2586,12 @@ sections:
title: Frontend Error Handling
type: code
language: typescript
template: "{{frontend_error_handler}}"
template: '{{frontend_error_handler}}'
- id: backend-error-handling
title: Backend Error Handling
type: code
language: typescript
template: "{{backend_error_handler}}"
template: '{{backend_error_handler}}'
- id: monitoring
title: Monitoring and Observability
@@ -2619,7 +2633,7 @@ template:
output:
format: markdown
filename: docs/architecture.md
title: "{{project_name}} Brownfield Enhancement Architecture"
title: '{{project_name}} Brownfield Enhancement Architecture'
workflow:
mode: interactive
@@ -2677,11 +2691,11 @@ sections:
- id: available-docs
title: Available Documentation
type: bullet-list
template: "- {{existing_docs_summary}}"
template: '- {{existing_docs_summary}}'
- id: constraints
title: Identified Constraints
type: bullet-list
template: "- {{constraint}}"
template: '- {{constraint}}'
- id: changelog
title: Change Log
type: table
@@ -2761,7 +2775,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
**Integration:** {{integration_with_existing}}
@@ -2804,7 +2818,7 @@ sections:
repeatable: true
sections:
- id: component
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Responsibility:** {{component_description}}
**Integration Points:** {{integration_points}}
@@ -2847,7 +2861,7 @@ sections:
repeatable: true
sections:
- id: endpoint
title: "{{endpoint_name}}"
title: '{{endpoint_name}}'
template: |
- **Method:** {{http_method}}
- **Endpoint:** {{endpoint_path}}
@@ -2858,12 +2872,12 @@ sections:
title: Request
type: code
language: json
template: "{{request_schema}}"
template: '{{request_schema}}'
- id: response
title: Response
type: code
language: json
template: "{{response_schema}}"
template: '{{response_schema}}'
- id: external-api-integration
title: External API Integration
@@ -2872,7 +2886,7 @@ sections:
repeatable: true
sections:
- id: external-api
title: "{{api_name}} API"
title: '{{api_name}} API'
template: |
- **Purpose:** {{api_purpose}}
- **Documentation:** {{api_docs_url}}
@@ -2901,7 +2915,7 @@ sections:
type: code
language: plaintext
instruction: Document relevant parts of current structure
template: "{{existing_structure_relevant_parts}}"
template: '{{existing_structure_relevant_parts}}'
- id: new-file-organization
title: New File Organization
type: code
@@ -2976,7 +2990,7 @@ sections:
title: Enhancement-Specific Standards
condition: New patterns needed for enhancement
repeatable: true
template: "- **{{standard_name}}:** {{standard_description}}"
template: '- **{{standard_name}}:** {{standard_description}}'
- id: integration-rules
title: Critical Integration Rules
template: |
@@ -3496,33 +3510,28 @@ Ask the user if they want to work through the checklist:
Now that you've completed the checklist, generate a comprehensive validation report that includes:
1. Executive Summary
- Overall architecture readiness (High/Medium/Low)
- Critical risks identified
- Key strengths of the architecture
- Project type (Full-stack/Frontend/Backend) and sections evaluated
2. Section Analysis
- Pass rate for each major section (percentage of items passed)
- Most concerning failures or gaps
- Sections requiring immediate attention
- Note any sections skipped due to project type
3. Risk Assessment
- Top 5 risks by severity
- Mitigation recommendations for each
- Timeline impact of addressing issues
4. Recommendations
- Must-fix items before development
- Should-fix items for better quality
- Nice-to-have improvements
5. AI Implementation Readiness
- Specific concerns for AI agent implementation
- Areas needing additional clarification
- Complexity hotspots to address

File diff suppressed because it is too large Load Diff

View File

@@ -405,7 +405,7 @@ Provide a user-friendly interface to the BMad knowledge base without overwhelmin
## Instructions
When entering KB mode (*kb-mode), follow these steps:
When entering KB mode (\*kb-mode), follow these steps:
### 1. Welcome and Guide
@@ -447,12 +447,12 @@ Or ask me about anything else related to BMad-Method!
When user is done or wants to exit KB mode:
- Summarize key points discussed if helpful
- Remind them they can return to KB mode anytime with *kb-mode
- Remind them they can return to KB mode anytime with \*kb-mode
- Suggest next steps based on what was discussed
## Example Interaction
**User**: *kb-mode
**User**: \*kb-mode
**Assistant**: I've entered KB mode and have access to the full BMad knowledge base. I can help you with detailed information about any aspect of BMad-Method.
@@ -775,7 +775,7 @@ You are the "Vibe CEO" - thinking like a CEO with unlimited resources and a sing
- **Claude Code**: `/agent-name` (e.g., `/bmad-master`)
- **Cursor**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `/agent-name` (e.g., `/bmad-master`)
- **Trae**: `@agent-name` (e.g., `@bmad-master`)
- **Roo Code**: Select mode from mode selector (e.g., `bmad-master`)
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select **Agent** from the chat mode selector.
@@ -1128,8 +1128,11 @@ Templates with Level 2 headings (`##`) can be automatically sharded:
```markdown
## Goals and Background Context
## Requirements
## User Interface Design Goals
## Success Metrics
```
@@ -1286,16 +1289,19 @@ Use the **expansion-creator** pack to build your own:
## Core Reflective Methods
**Expand or Contract for Audience**
- Ask whether to 'expand' (add detail, elaborate) or 'contract' (simplify, clarify)
- Identify specific target audience if relevant
- Tailor content complexity and depth accordingly
**Explain Reasoning (CoT Step-by-Step)**
- Walk through the step-by-step thinking process
- Reveal underlying assumptions and decision points
- Show how conclusions were reached from current role's perspective
**Critique and Refine**
- Review output for flaws, inconsistencies, or improvement areas
- Identify specific weaknesses from role's expertise
- Suggest refined version reflecting domain knowledge
@@ -1303,12 +1309,14 @@ Use the **expansion-creator** pack to build your own:
## Structural Analysis Methods
**Analyze Logical Flow and Dependencies**
- Examine content structure for logical progression
- Check internal consistency and coherence
- Identify and validate dependencies between elements
- Confirm effective ordering and sequencing
**Assess Alignment with Overall Goals**
- Evaluate content contribution to stated objectives
- Identify any misalignments or gaps
- Interpret alignment from specific role's perspective
@@ -1317,12 +1325,14 @@ Use the **expansion-creator** pack to build your own:
## Risk and Challenge Methods
**Identify Potential Risks and Unforeseen Issues**
- Brainstorm potential risks from role's expertise
- Identify overlooked edge cases or scenarios
- Anticipate unintended consequences
- Highlight implementation challenges
**Challenge from Critical Perspective**
- Adopt critical stance on current content
- Play devil's advocate from specified viewpoint
- Argue against proposal highlighting weaknesses
@@ -1331,12 +1341,14 @@ Use the **expansion-creator** pack to build your own:
## Creative Exploration Methods
**Tree of Thoughts Deep Dive**
- Break problem into discrete "thoughts" or intermediate steps
- Explore multiple reasoning paths simultaneously
- Use self-evaluation to classify each path as "sure", "likely", or "impossible"
- Apply search algorithms (BFS/DFS) to find optimal solution paths
**Hindsight is 20/20: The 'If Only...' Reflection**
- Imagine retrospective scenario based on current content
- Identify the one "if only we had known/done X..." insight
- Describe imagined consequences humorously or dramatically
@@ -1345,6 +1357,7 @@ Use the **expansion-creator** pack to build your own:
## Multi-Persona Collaboration Methods
**Agile Team Perspective Shift**
- Rotate through different Scrum team member viewpoints
- Product Owner: Focus on user value and business impact
- Scrum Master: Examine process flow and team dynamics
@@ -1352,12 +1365,14 @@ Use the **expansion-creator** pack to build your own:
- QA: Identify testing scenarios and quality concerns
**Stakeholder Round Table**
- Convene virtual meeting with multiple personas
- Each persona contributes unique perspective on content
- Identify conflicts and synergies between viewpoints
- Synthesize insights into actionable recommendations
**Meta-Prompting Analysis**
- Step back to analyze the structure and logic of current approach
- Question the format and methodology being used
- Suggest alternative frameworks or mental models
@@ -1366,24 +1381,28 @@ Use the **expansion-creator** pack to build your own:
## Advanced 2025 Techniques
**Self-Consistency Validation**
- Generate multiple reasoning paths for same problem
- Compare consistency across different approaches
- Identify most reliable and robust solution
- Highlight areas where approaches diverge and why
**ReWOO (Reasoning Without Observation)**
- Separate parametric reasoning from tool-based actions
- Create reasoning plan without external dependencies
- Identify what can be solved through pure reasoning
- Optimize for efficiency and reduced token usage
**Persona-Pattern Hybrid**
- Combine specific role expertise with elicitation pattern
- Architect + Risk Analysis: Deep technical risk assessment
- UX Expert + User Journey: End-to-end experience critique
- PM + Stakeholder Analysis: Multi-perspective impact review
**Emergent Collaboration Discovery**
- Allow multiple perspectives to naturally emerge
- Identify unexpected insights from persona interactions
- Explore novel combinations of viewpoints
@@ -1392,18 +1411,21 @@ Use the **expansion-creator** pack to build your own:
## Game-Based Elicitation Methods
**Red Team vs Blue Team**
- Red Team: Attack the proposal, find vulnerabilities
- Blue Team: Defend and strengthen the approach
- Competitive analysis reveals blind spots
- Results in more robust, battle-tested solutions
**Innovation Tournament**
- Pit multiple alternative approaches against each other
- Score each approach across different criteria
- Crowd-source evaluation from different personas
- Identify winning combination of features
**Escape Room Challenge**
- Present content as constraints to work within
- Find creative solutions within tight limitations
- Identify minimum viable approach
@@ -1412,6 +1434,7 @@ Use the **expansion-creator** pack to build your own:
## Process Control
**Proceed / No Further Actions**
- Acknowledge choice to finalize current work
- Accept output as-is or move to next step
- Prepare to continue without additional elicitation

14
dist/agents/dev.txt vendored
View File

@@ -102,7 +102,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -115,14 +114,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -131,7 +128,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -139,7 +135,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -153,7 +148,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -163,7 +157,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -351,14 +344,12 @@ The goal is quality delivery, not just checking boxes.]]
1. **Requirements Met:**
[[LLM: Be specific - list each requirement and whether it's complete]]
- [ ] All functional requirements specified in the story are implemented.
- [ ] All acceptance criteria defined in the story are met.
2. **Coding Standards & Project Structure:**
[[LLM: Code quality matters for maintainability. Check each item carefully]]
- [ ] All new/modified code strictly adheres to `Operational Guidelines`.
- [ ] All new/modified code aligns with `Project Structure` (file locations, naming, etc.).
- [ ] Adherence to `Tech Stack` for technologies/versions used (if story introduces or modifies tech usage).
@@ -370,7 +361,6 @@ The goal is quality delivery, not just checking boxes.]]
3. **Testing:**
[[LLM: Testing proves your code works. Be honest about test coverage]]
- [ ] All required unit tests as per the story and `Operational Guidelines` Testing Strategy are implemented.
- [ ] All required integration tests (if applicable) as per the story and `Operational Guidelines` Testing Strategy are implemented.
- [ ] All tests (unit, integration, E2E if applicable) pass successfully.
@@ -379,14 +369,12 @@ The goal is quality delivery, not just checking boxes.]]
4. **Functionality & Verification:**
[[LLM: Did you actually run and test your code? Be specific about what you tested]]
- [ ] Functionality has been manually verified by the developer (e.g., running the app locally, checking UI, testing API endpoints).
- [ ] Edge cases and potential error conditions considered and handled gracefully.
5. **Story Administration:**
[[LLM: Documentation helps the next developer. What should they know?]]
- [ ] All tasks within the story file are marked as complete.
- [ ] Any clarifications or decisions made during development are documented in the story file or linked appropriately.
- [ ] The story wrap up section has been completed with notes of changes or information relevant to the next story or overall project, the agent model that was primarily used during development, and the changelog of any changes is properly updated.
@@ -394,7 +382,6 @@ The goal is quality delivery, not just checking boxes.]]
6. **Dependencies, Build & Configuration:**
[[LLM: Build issues block everyone. Ensure everything compiles and runs cleanly]]
- [ ] Project builds successfully without errors.
- [ ] Project linting passes
- [ ] Any new dependencies added were either pre-approved in the story requirements OR explicitly approved by the user during development (approval documented in story file).
@@ -405,7 +392,6 @@ The goal is quality delivery, not just checking boxes.]]
7. **Documentation (If Applicable):**
[[LLM: Good documentation prevents future confusion. What needs explaining?]]
- [ ] Relevant inline code documentation (e.g., JSDoc, TSDoc, Python docstrings) for new public APIs or complex logic is complete.
- [ ] User-facing documentation updated, if changes impact users.
- [ ] Technical documentation (e.g., READMEs, system diagrams) updated if significant architectural changes were made.

105
dist/agents/pm.txt vendored
View File

@@ -304,63 +304,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -529,13 +520,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?
@@ -897,7 +886,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -910,14 +898,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -926,7 +912,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -934,7 +919,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -948,7 +932,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -958,7 +941,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -1075,13 +1057,11 @@ CRITICAL: Use proper parsing that understands markdown context. A ## inside a co
For each extracted section:
1. **Generate filename**: Convert the section heading to lowercase-dash-case
- Remove special characters
- Replace spaces with dashes
- Example: "## Tech Stack" → `tech-stack.md`
2. **Adjust heading levels**:
- The level 2 heading becomes level 1 (# instead of ##) in the sharded new document
- All subsection levels decrease by 1:
@@ -1179,7 +1159,7 @@ template:
output:
format: markdown
filename: docs/prd.md
title: "{{project_name}} Product Requirements Document (PRD)"
title: '{{project_name}} Product Requirements Document (PRD)'
workflow:
mode: interactive
@@ -1216,14 +1196,14 @@ sections:
prefix: FR
instruction: Each Requirement will be a bullet markdown and an identifier sequence starting with FR
examples:
- "FR6: The Todo List uses AI to detect and warn against potentially duplicate todo items that are worded differently."
- 'FR6: The Todo List uses AI to detect and warn against potentially duplicate todo items that are worded differently.'
- id: non-functional
title: Non Functional
type: numbered-list
prefix: NFR
instruction: Each Requirement will be a bullet markdown and an identifier sequence starting with NFR
examples:
- "NFR1: AWS service usage must aim to stay within free-tier limits where feasible."
- 'NFR1: AWS service usage must aim to stay within free-tier limits where feasible.'
- id: ui-goals
title: User Interface Design Goals
@@ -1249,24 +1229,24 @@ sections:
title: Core Screens and Views
instruction: From a product perspective, what are the most critical screens or views necessary to deliver the the PRD values and goals? This is meant to be Conceptual High Level to Drive Rough Epic or User Stories
examples:
- "Login Screen"
- "Main Dashboard"
- "Item Detail Page"
- "Settings Page"
- 'Login Screen'
- 'Main Dashboard'
- 'Item Detail Page'
- 'Settings Page'
- id: accessibility
title: "Accessibility: {None|WCAG AA|WCAG AAA|Custom Requirements}"
title: 'Accessibility: {None|WCAG AA|WCAG AAA|Custom Requirements}'
- id: branding
title: Branding
instruction: Any known branding elements or style guides that must be incorporated?
examples:
- "Replicate the look and feel of early 1900s black and white cinema, including animated effects replicating film damage or projector glitches during page or state transitions."
- "Attached is the full color pallet and tokens for our corporate branding."
- 'Replicate the look and feel of early 1900s black and white cinema, including animated effects replicating film damage or projector glitches during page or state transitions.'
- 'Attached is the full color pallet and tokens for our corporate branding.'
- id: target-platforms
title: "Target Device and Platforms: {Web Responsive|Mobile Only|Desktop Only|Cross-Platform}"
title: 'Target Device and Platforms: {Web Responsive|Mobile Only|Desktop Only|Cross-Platform}'
examples:
- "Web Responsive, and all mobile platforms"
- "iPhone Only"
- "ASCII Windows Desktop"
- 'Web Responsive, and all mobile platforms'
- 'iPhone Only'
- 'ASCII Windows Desktop'
- id: technical-assumptions
title: Technical Assumptions
@@ -1285,13 +1265,13 @@ sections:
testing: [Unit Only, Unit + Integration, Full Testing Pyramid]
sections:
- id: repository-structure
title: "Repository Structure: {Monorepo|Polyrepo|Multi-repo}"
title: 'Repository Structure: {Monorepo|Polyrepo|Multi-repo}'
- id: service-architecture
title: Service Architecture
instruction: "CRITICAL DECISION - Document the high-level service architecture (e.g., Monolith, Microservices, Serverless functions within a Monorepo)."
instruction: 'CRITICAL DECISION - Document the high-level service architecture (e.g., Monolith, Microservices, Serverless functions within a Monorepo).'
- id: testing-requirements
title: Testing Requirements
instruction: "CRITICAL DECISION - Document the testing requirements, unit only, integration, e2e, manual, need for manual testing convenience methods)."
instruction: 'CRITICAL DECISION - Document the testing requirements, unit only, integration, e2e, manual, need for manual testing convenience methods).'
- id: additional-assumptions
title: Additional Technical Assumptions and Requests
instruction: Throughout the entire process of drafting this document, if any other technical assumptions are raised or discovered appropriate for the architect, add them here as additional bulleted items
@@ -1311,10 +1291,10 @@ sections:
- Cross Cutting Concerns should flow through epics and stories and not be final stories. For example, adding a logging framework as a last story of an epic, or at the end of a project as a final epic or story would be terrible as we would not have logging from the beginning.
elicit: true
examples:
- "Epic 1: Foundation & Core Infrastructure: Establish project setup, authentication, and basic user management"
- "Epic 2: Core Business Entities: Create and manage primary domain objects with CRUD operations"
- "Epic 3: User Workflows & Interactions: Enable key user journeys and business processes"
- "Epic 4: Reporting & Analytics: Provide insights and data visualization for users"
- 'Epic 1: Foundation & Core Infrastructure: Establish project setup, authentication, and basic user management'
- 'Epic 2: Core Business Entities: Create and manage primary domain objects with CRUD operations'
- 'Epic 3: User Workflows & Interactions: Enable key user journeys and business processes'
- 'Epic 4: Reporting & Analytics: Provide insights and data visualization for users'
- id: epic-details
title: Epic {{epic_number}} {{epic_title}}
@@ -1336,7 +1316,7 @@ sections:
- Think "junior developer working for 2-4 hours" - stories must be small, focused, and self-contained
- If a story seems complex, break it down further as long as it can deliver a vertical slice
elicit: true
template: "{{epic_goal}}"
template: '{{epic_goal}}'
sections:
- id: story
title: Story {{epic_number}}.{{story_number}} {{story_title}}
@@ -1349,7 +1329,7 @@ sections:
- id: acceptance-criteria
title: Acceptance Criteria
type: numbered-list
item_template: "{{criterion_number}}: {{criteria}}"
item_template: '{{criterion_number}}: {{criteria}}'
repeatable: true
instruction: |
Define clear, comprehensive, and testable acceptance criteria that:
@@ -1384,7 +1364,7 @@ template:
output:
format: markdown
filename: docs/prd.md
title: "{{project_name}} Brownfield Enhancement PRD"
title: '{{project_name}} Brownfield Enhancement PRD'
workflow:
mode: interactive
@@ -1447,7 +1427,7 @@ sections:
- External API Documentation [[LLM: If from document-project, check ✓]]
- UX/UI Guidelines [[LLM: May not be in document-project]]
- Technical Debt Documentation [[LLM: If from document-project, check ✓]]
- "Other: {{other_docs}}"
- 'Other: {{other_docs}}'
instruction: |
- If document-project was already run: "Using existing project analysis from document-project output."
- If critical documentation is missing and no document-project: "I recommend running the document-project task first..."
@@ -1467,7 +1447,7 @@ sections:
- UI/UX Overhaul
- Technology Stack Upgrade
- Bug Fix and Stability Improvements
- "Other: {{other_type}}"
- 'Other: {{other_type}}'
- id: enhancement-description
title: Enhancement Description
instruction: 2-3 sentences describing what the user wants to add or change
@@ -1508,29 +1488,29 @@ sections:
prefix: FR
instruction: Each Requirement will be a bullet markdown with identifier starting with FR
examples:
- "FR1: The existing Todo List will integrate with the new AI duplicate detection service without breaking current functionality."
- 'FR1: The existing Todo List will integrate with the new AI duplicate detection service without breaking current functionality.'
- id: non-functional
title: Non Functional
type: numbered-list
prefix: NFR
instruction: Each Requirement will be a bullet markdown with identifier starting with NFR. Include constraints from existing system
examples:
- "NFR1: Enhancement must maintain existing performance characteristics and not exceed current memory usage by more than 20%."
- 'NFR1: Enhancement must maintain existing performance characteristics and not exceed current memory usage by more than 20%.'
- id: compatibility
title: Compatibility Requirements
instruction: Critical for brownfield - what must remain compatible
type: numbered-list
prefix: CR
template: "{{requirement}}: {{description}}"
template: '{{requirement}}: {{description}}'
items:
- id: cr1
template: "CR1: {{existing_api_compatibility}}"
template: 'CR1: {{existing_api_compatibility}}'
- id: cr2
template: "CR2: {{database_schema_compatibility}}"
template: 'CR2: {{database_schema_compatibility}}'
- id: cr3
template: "CR3: {{ui_ux_consistency}}"
template: 'CR3: {{ui_ux_consistency}}'
- id: cr4
template: "CR4: {{integration_compatibility}}"
template: 'CR4: {{integration_compatibility}}'
- id: ui-enhancement-goals
title: User Interface Enhancement Goals
@@ -1613,10 +1593,10 @@ sections:
- id: epic-approach
title: Epic Approach
instruction: Explain the rationale for epic structure - typically single epic for brownfield unless multiple unrelated features
template: "**Epic Structure Decision**: {{epic_decision}} with rationale"
template: '**Epic Structure Decision**: {{epic_decision}} with rationale'
- id: epic-details
title: "Epic 1: {{enhancement_title}}"
title: 'Epic 1: {{enhancement_title}}'
instruction: |
Comprehensive epic that delivers the brownfield enhancement while maintaining existing functionality
@@ -1636,7 +1616,7 @@ sections:
**Integration Requirements**: {{integration_requirements}}
sections:
- id: story
title: "Story 1.{{story_number}} {{story_title}}"
title: 'Story 1.{{story_number}} {{story_title}}'
repeatable: true
template: |
As a {{user_type}},
@@ -1647,16 +1627,16 @@ sections:
title: Acceptance Criteria
type: numbered-list
instruction: Define criteria that include both new functionality and existing system integrity
item_template: "{{criterion_number}}: {{criteria}}"
item_template: '{{criterion_number}}: {{criteria}}'
- id: integration-verification
title: Integration Verification
instruction: Specific verification steps to ensure existing functionality remains intact
type: numbered-list
prefix: IV
items:
- template: "IV1: {{existing_functionality_verification}}"
- template: "IV2: {{integration_point_verification}}"
- template: "IV3: {{performance_impact_verification}}"
- template: 'IV1: {{existing_functionality_verification}}'
- template: 'IV2: {{integration_point_verification}}'
- template: 'IV3: {{performance_impact_verification}}'
==================== END: .bmad-core/templates/brownfield-prd-tmpl.yaml ====================
==================== START: .bmad-core/checklists/pm-checklist.md ====================
@@ -1966,7 +1946,6 @@ Ask the user if they want to work through the checklist:
Create a comprehensive validation report that includes:
1. Executive Summary
- Overall PRD completeness (percentage)
- MVP scope appropriateness (Too Large/Just Right/Too Small)
- Readiness for architecture phase (Ready/Nearly Ready/Not Ready)
@@ -1974,26 +1953,22 @@ Create a comprehensive validation report that includes:
2. Category Analysis Table
Fill in the actual table with:
- Status: PASS (90%+ complete), PARTIAL (60-89%), FAIL (<60%)
- Critical Issues: Specific problems that block progress
3. Top Issues by Priority
- BLOCKERS: Must fix before architect can proceed
- HIGH: Should fix for quality
- MEDIUM: Would improve clarity
- LOW: Nice to have
4. MVP Scope Assessment
- Features that might be cut for true MVP
- Missing features that are essential
- Complexity concerns
- Timeline realism
5. Technical Readiness
- Clarity of technical constraints
- Identified technical risks
- Areas needing architect investigation

22
dist/agents/po.txt vendored
View File

@@ -110,7 +110,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -123,14 +122,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -139,7 +136,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -147,7 +143,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -161,7 +156,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -171,7 +165,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -288,13 +281,11 @@ CRITICAL: Use proper parsing that understands markdown context. A ## inside a co
For each extracted section:
1. **Generate filename**: Convert the section heading to lowercase-dash-case
- Remove special characters
- Replace spaces with dashes
- Example: "## Tech Stack" → `tech-stack.md`
2. **Adjust heading levels**:
- The level 2 heading becomes level 1 (# instead of ##) in the sharded new document
- All subsection levels decrease by 1:
@@ -602,7 +593,7 @@ template:
output:
format: markdown
filename: docs/stories/{{epic_num}}.{{story_num}}.{{story_title_short}}.md
title: "Story {{epic_num}}.{{story_num}}: {{story_title_short}}"
title: 'Story {{epic_num}}.{{story_num}}: {{story_title_short}}'
workflow:
mode: interactive
@@ -704,7 +695,7 @@ sections:
sections:
- id: agent-model
title: Agent Model Used
template: "{{agent_model_name_version}}"
template: '{{agent_model_name_version}}'
instruction: Record the specific AI agent model and version used for development
owner: dev-agent
editors: [dev-agent]
@@ -745,12 +736,10 @@ PROJECT TYPE DETECTION:
First, determine the project type by checking:
1. Is this a GREENFIELD project (new from scratch)?
- Look for: New project initialization, no existing codebase references
- Check for: prd.md, architecture.md, new project setup stories
2. Is this a BROWNFIELD project (enhancing existing system)?
- Look for: References to existing codebase, enhancement/modification language
- Check for: brownfield-prd.md, brownfield-architecture.md, existing system analysis
@@ -1084,7 +1073,6 @@ Ask the user if they want to work through the checklist:
Generate a comprehensive validation report that adapts to project type:
1. Executive Summary
- Project type: [Greenfield/Brownfield] with [UI/No UI]
- Overall readiness (percentage)
- Go/No-Go recommendation
@@ -1094,42 +1082,36 @@ Generate a comprehensive validation report that adapts to project type:
2. Project-Specific Analysis
FOR GREENFIELD:
- Setup completeness
- Dependency sequencing
- MVP scope appropriateness
- Development timeline feasibility
FOR BROWNFIELD:
- Integration risk level (High/Medium/Low)
- Existing system impact assessment
- Rollback readiness
- User disruption potential
3. Risk Assessment
- Top 5 risks by severity
- Mitigation recommendations
- Timeline impact of addressing issues
- [BROWNFIELD] Specific integration risks
4. MVP Completeness
- Core features coverage
- Missing essential functionality
- Scope creep identified
- True MVP vs over-engineering
5. Implementation Readiness
- Developer clarity score (1-10)
- Ambiguous requirements count
- Missing technical details
- [BROWNFIELD] Integration point clarity
6. Recommendations
- Must-fix before development
- Should-fix for quality
- Consider for improvement

1769
dist/agents/qa.txt vendored

File diff suppressed because it is too large Load Diff

14
dist/agents/sm.txt vendored
View File

@@ -211,7 +211,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -224,14 +223,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -240,7 +237,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -248,7 +244,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -262,7 +257,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -272,7 +266,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -376,7 +369,7 @@ template:
output:
format: markdown
filename: docs/stories/{{epic_num}}.{{story_num}}.{{story_title_short}}.md
title: "Story {{epic_num}}.{{story_num}}: {{story_title_short}}"
title: 'Story {{epic_num}}.{{story_num}}: {{story_title_short}}'
workflow:
mode: interactive
@@ -478,7 +471,7 @@ sections:
sections:
- id: agent-model
title: Agent Model Used
template: "{{agent_model_name_version}}"
template: '{{agent_model_name_version}}'
instruction: Record the specific AI agent model and version used for development
owner: dev-agent
editors: [dev-agent]
@@ -628,19 +621,16 @@ Note: We don't need every file listed - just the important ones.]]
Generate a concise validation report:
1. Quick Summary
- Story readiness: READY / NEEDS REVISION / BLOCKED
- Clarity score (1-10)
- Major gaps identified
2. Fill in the validation table with:
- PASS: Requirements clearly met
- PARTIAL: Some gaps but workable
- FAIL: Critical information missing
3. Specific Issues (if any)
- List concrete problems to fix
- Suggest specific improvements
- Identify any blocking dependencies

View File

@@ -258,7 +258,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -271,14 +270,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -287,7 +284,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -295,7 +291,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -309,7 +304,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -319,7 +313,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -350,7 +343,7 @@ template:
output:
format: markdown
filename: docs/front-end-spec.md
title: "{{project_name}} UI/UX Specification"
title: '{{project_name}} UI/UX Specification'
workflow:
mode: interactive
@@ -378,29 +371,29 @@ sections:
sections:
- id: user-personas
title: Target User Personas
template: "{{persona_descriptions}}"
template: '{{persona_descriptions}}'
examples:
- "**Power User:** Technical professionals who need advanced features and efficiency"
- "**Casual User:** Occasional users who prioritize ease of use and clear guidance"
- "**Administrator:** System managers who need control and oversight capabilities"
- '**Power User:** Technical professionals who need advanced features and efficiency'
- '**Casual User:** Occasional users who prioritize ease of use and clear guidance'
- '**Administrator:** System managers who need control and oversight capabilities'
- id: usability-goals
title: Usability Goals
template: "{{usability_goals}}"
template: '{{usability_goals}}'
examples:
- "Ease of learning: New users can complete core tasks within 5 minutes"
- "Efficiency of use: Power users can complete frequent tasks with minimal clicks"
- "Error prevention: Clear validation and confirmation for destructive actions"
- "Memorability: Infrequent users can return without relearning"
- 'Ease of learning: New users can complete core tasks within 5 minutes'
- 'Efficiency of use: Power users can complete frequent tasks with minimal clicks'
- 'Error prevention: Clear validation and confirmation for destructive actions'
- 'Memorability: Infrequent users can return without relearning'
- id: design-principles
title: Design Principles
template: "{{design_principles}}"
template: '{{design_principles}}'
type: numbered-list
examples:
- "**Clarity over cleverness** - Prioritize clear communication over aesthetic innovation"
- '**Clarity over cleverness** - Prioritize clear communication over aesthetic innovation'
- "**Progressive disclosure** - Show only what's needed, when it's needed"
- "**Consistent patterns** - Use familiar UI patterns throughout the application"
- "**Immediate feedback** - Every action should have a clear, immediate response"
- "**Accessible by default** - Design for all users from the start"
- '**Consistent patterns** - Use familiar UI patterns throughout the application'
- '**Immediate feedback** - Every action should have a clear, immediate response'
- '**Accessible by default** - Design for all users from the start'
- id: changelog
title: Change Log
type: table
@@ -422,7 +415,7 @@ sections:
title: Site Map / Screen Inventory
type: mermaid
mermaid_type: graph
template: "{{sitemap_diagram}}"
template: '{{sitemap_diagram}}'
examples:
- |
graph TD
@@ -462,7 +455,7 @@ sections:
repeatable: true
sections:
- id: flow
title: "{{flow_name}}"
title: '{{flow_name}}'
template: |
**User Goal:** {{flow_goal}}
@@ -474,13 +467,13 @@ sections:
title: Flow Diagram
type: mermaid
mermaid_type: graph
template: "{{flow_diagram}}"
template: '{{flow_diagram}}'
- id: edge-cases
title: "Edge Cases & Error Handling:"
title: 'Edge Cases & Error Handling:'
type: bullet-list
template: "- {{edge_case}}"
template: '- {{edge_case}}'
- id: notes
template: "**Notes:** {{flow_notes}}"
template: '**Notes:** {{flow_notes}}'
- id: wireframes-mockups
title: Wireframes & Mockups
@@ -489,13 +482,13 @@ sections:
elicit: true
sections:
- id: design-files
template: "**Primary Design Files:** {{design_tool_link}}"
template: '**Primary Design Files:** {{design_tool_link}}'
- id: key-screen-layouts
title: Key Screen Layouts
repeatable: true
sections:
- id: screen
title: "{{screen_name}}"
title: '{{screen_name}}'
template: |
**Purpose:** {{screen_purpose}}
@@ -515,13 +508,13 @@ sections:
elicit: true
sections:
- id: design-system-approach
template: "**Design System Approach:** {{design_system_approach}}"
template: '**Design System Approach:** {{design_system_approach}}'
- id: core-components
title: Core Components
repeatable: true
sections:
- id: component
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Purpose:** {{component_purpose}}
@@ -538,19 +531,19 @@ sections:
sections:
- id: visual-identity
title: Visual Identity
template: "**Brand Guidelines:** {{brand_guidelines_link}}"
template: '**Brand Guidelines:** {{brand_guidelines_link}}'
- id: color-palette
title: Color Palette
type: table
columns: ["Color Type", "Hex Code", "Usage"]
columns: ['Color Type', 'Hex Code', 'Usage']
rows:
- ["Primary", "{{primary_color}}", "{{primary_usage}}"]
- ["Secondary", "{{secondary_color}}", "{{secondary_usage}}"]
- ["Accent", "{{accent_color}}", "{{accent_usage}}"]
- ["Success", "{{success_color}}", "Positive feedback, confirmations"]
- ["Warning", "{{warning_color}}", "Cautions, important notices"]
- ["Error", "{{error_color}}", "Errors, destructive actions"]
- ["Neutral", "{{neutral_colors}}", "Text, borders, backgrounds"]
- ['Primary', '{{primary_color}}', '{{primary_usage}}']
- ['Secondary', '{{secondary_color}}', '{{secondary_usage}}']
- ['Accent', '{{accent_color}}', '{{accent_usage}}']
- ['Success', '{{success_color}}', 'Positive feedback, confirmations']
- ['Warning', '{{warning_color}}', 'Cautions, important notices']
- ['Error', '{{error_color}}', 'Errors, destructive actions']
- ['Neutral', '{{neutral_colors}}', 'Text, borders, backgrounds']
- id: typography
title: Typography
sections:
@@ -563,13 +556,13 @@ sections:
- id: type-scale
title: Type Scale
type: table
columns: ["Element", "Size", "Weight", "Line Height"]
columns: ['Element', 'Size', 'Weight', 'Line Height']
rows:
- ["H1", "{{h1_size}}", "{{h1_weight}}", "{{h1_line}}"]
- ["H2", "{{h2_size}}", "{{h2_weight}}", "{{h2_line}}"]
- ["H3", "{{h3_size}}", "{{h3_weight}}", "{{h3_line}}"]
- ["Body", "{{body_size}}", "{{body_weight}}", "{{body_line}}"]
- ["Small", "{{small_size}}", "{{small_weight}}", "{{small_line}}"]
- ['H1', '{{h1_size}}', '{{h1_weight}}', '{{h1_line}}']
- ['H2', '{{h2_size}}', '{{h2_weight}}', '{{h2_line}}']
- ['H3', '{{h3_size}}', '{{h3_weight}}', '{{h3_line}}']
- ['Body', '{{body_size}}', '{{body_weight}}', '{{body_line}}']
- ['Small', '{{small_size}}', '{{small_weight}}', '{{small_line}}']
- id: iconography
title: Iconography
template: |
@@ -590,7 +583,7 @@ sections:
sections:
- id: compliance-target
title: Compliance Target
template: "**Standard:** {{compliance_standard}}"
template: '**Standard:** {{compliance_standard}}'
- id: key-requirements
title: Key Requirements
template: |
@@ -610,7 +603,7 @@ sections:
- Form labels: {{form_requirements}}
- id: testing-strategy
title: Testing Strategy
template: "{{accessibility_testing}}"
template: '{{accessibility_testing}}'
- id: responsiveness
title: Responsiveness Strategy
@@ -620,12 +613,12 @@ sections:
- id: breakpoints
title: Breakpoints
type: table
columns: ["Breakpoint", "Min Width", "Max Width", "Target Devices"]
columns: ['Breakpoint', 'Min Width', 'Max Width', 'Target Devices']
rows:
- ["Mobile", "{{mobile_min}}", "{{mobile_max}}", "{{mobile_devices}}"]
- ["Tablet", "{{tablet_min}}", "{{tablet_max}}", "{{tablet_devices}}"]
- ["Desktop", "{{desktop_min}}", "{{desktop_max}}", "{{desktop_devices}}"]
- ["Wide", "{{wide_min}}", "-", "{{wide_devices}}"]
- ['Mobile', '{{mobile_min}}', '{{mobile_max}}', '{{mobile_devices}}']
- ['Tablet', '{{tablet_min}}', '{{tablet_max}}', '{{tablet_devices}}']
- ['Desktop', '{{desktop_min}}', '{{desktop_max}}', '{{desktop_devices}}']
- ['Wide', '{{wide_min}}', '-', '{{wide_devices}}']
- id: adaptation-patterns
title: Adaptation Patterns
template: |
@@ -644,11 +637,11 @@ sections:
sections:
- id: motion-principles
title: Motion Principles
template: "{{motion_principles}}"
template: '{{motion_principles}}'
- id: key-animations
title: Key Animations
repeatable: true
template: "- **{{animation_name}}:** {{animation_description}} (Duration: {{duration}}, Easing: {{easing}})"
template: '- **{{animation_name}}:** {{animation_description}} (Duration: {{duration}}, Easing: {{easing}})'
- id: performance
title: Performance Considerations
@@ -662,7 +655,7 @@ sections:
- **Animation FPS:** {{animation_goal}}
- id: design-strategies
title: Design Strategies
template: "{{performance_strategies}}"
template: '{{performance_strategies}}'
- id: next-steps
title: Next Steps
@@ -677,17 +670,17 @@ sections:
- id: immediate-actions
title: Immediate Actions
type: numbered-list
template: "{{action}}"
template: '{{action}}'
- id: design-handoff-checklist
title: Design Handoff Checklist
type: checklist
items:
- "All user flows documented"
- "Component inventory complete"
- "Accessibility requirements defined"
- "Responsive strategy clear"
- "Brand guidelines incorporated"
- "Performance goals established"
- 'All user flows documented'
- 'Component inventory complete'
- 'Accessibility requirements defined'
- 'Responsive strategy clear'
- 'Brand guidelines incorporated'
- 'Performance goals established'
- id: checklist-results
title: Checklist Results

View File

@@ -210,7 +210,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -223,14 +222,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -239,7 +236,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -247,7 +243,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -261,7 +256,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -271,7 +265,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -306,7 +299,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Begin by understanding the game design context and goals. Ask clarifying questions if needed to determine the best approach for game-specific ideation.]]
1. **Establish Game Context**
- Understand the game genre or opportunity area
- Identify target audience and platform constraints
- Determine session goals (concept exploration vs. mechanic refinement)
@@ -324,7 +316,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **"What If" Game Scenarios**
[[LLM: Generate provocative what-if questions that challenge game design assumptions and expand thinking beyond current genre limitations.]]
- What if players could rewind time in any genre?
- What if the game world reacted to the player's real-world location?
- What if failure was more rewarding than success?
@@ -333,7 +324,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Cross-Genre Fusion**
[[LLM: Help user combine unexpected game genres and mechanics to create unique experiences.]]
- "How might [genre A] mechanics work in [genre B]?"
- Puzzle mechanics in action games
- Dating sim elements in strategy games
@@ -342,7 +332,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Player Motivation Reversal**
[[LLM: Flip traditional player motivations to reveal new gameplay possibilities.]]
- What if losing was the goal?
- What if cooperation was forced in competitive games?
- What if players had to help their enemies?
@@ -359,7 +348,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **SCAMPER for Game Mechanics**
[[LLM: Guide through each SCAMPER prompt specifically for game design.]]
- **S** = Substitute: What mechanics can be substituted? (walking → flying → swimming)
- **C** = Combine: What systems can be merged? (inventory + character growth)
- **A** = Adapt: What mechanics from other media? (books, movies, sports)
@@ -370,7 +358,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Agency Spectrum**
[[LLM: Explore different levels of player control and agency across game systems.]]
- Full Control: Direct character movement, combat, building
- Indirect Control: Setting rules, giving commands, environmental changes
- Influence Only: Suggestions, preferences, emotional reactions
@@ -378,7 +365,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Temporal Game Design**
[[LLM: Explore how time affects gameplay and player experience.]]
- Real-time vs. turn-based mechanics
- Time travel and manipulation
- Persistent vs. session-based progress
@@ -389,7 +375,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Emotion-First Design**
[[LLM: Start with target emotions and work backward to mechanics that create them.]]
- Target Emotion: Wonder → Mechanics: Discovery, mystery, scale
- Target Emotion: Triumph → Mechanics: Challenge, skill growth, recognition
- Target Emotion: Connection → Mechanics: Cooperation, shared goals, communication
@@ -397,7 +382,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Archetype Brainstorming**
[[LLM: Design for different player types and motivations.]]
- Achievers: Progression, completion, mastery
- Explorers: Discovery, secrets, world-building
- Socializers: Interaction, cooperation, community
@@ -406,7 +390,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Accessibility-First Innovation**
[[LLM: Generate ideas that make games more accessible while creating new gameplay.]]
- Visual impairment considerations leading to audio-focused mechanics
- Motor accessibility inspiring one-handed or simplified controls
- Cognitive accessibility driving clear feedback and pacing
@@ -416,7 +399,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Environmental Storytelling**
[[LLM: Brainstorm ways the game world itself tells stories without explicit narrative.]]
- How does the environment show history?
- What do interactive objects reveal about characters?
- How can level design communicate mood?
@@ -424,7 +406,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player-Generated Narrative**
[[LLM: Explore ways players create their own stories through gameplay.]]
- Emergent storytelling through player choices
- Procedural narrative generation
- Player-to-player story sharing
@@ -432,7 +413,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Genre Expectation Subversion**
[[LLM: Identify and deliberately subvert player expectations within genres.]]
- Fantasy RPG where magic is mundane
- Horror game where monsters are friendly
- Racing game where going slow is optimal
@@ -442,7 +422,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Platform-Specific Design**
[[LLM: Generate ideas that leverage unique platform capabilities.]]
- Mobile: GPS, accelerometer, camera, always-connected
- Web: URLs, tabs, social sharing, real-time collaboration
- Console: Controllers, TV viewing, couch co-op
@@ -450,7 +429,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Constraint-Based Creativity**
[[LLM: Use technical or design constraints as creative catalysts.]]
- One-button games
- Games without graphics
- Games that play in notification bars
@@ -496,19 +474,16 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Guide the brainstorming session with appropriate pacing for game design exploration.]]
1. **Inspiration Phase** (10-15 min)
- Reference existing games and mechanics
- Explore player experiences and emotions
- Gather visual and thematic inspiration
2. **Divergent Exploration** (25-35 min)
- Generate many game concepts or mechanics
- Use expansion and fusion techniques
- Encourage wild and impossible ideas
3. **Player-Centered Filtering** (15-20 min)
- Consider target audience reactions
- Evaluate emotional impact and engagement
- Group ideas by player experience goals
@@ -629,63 +604,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -854,13 +820,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?
@@ -918,7 +882,6 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
2. If the section contains game flow diagrams, level layouts, or system diagrams, explain each diagram briefly with game development context before offering elicitation options (e.g., "The gameplay loop diagram shows how player actions lead to rewards and progression. Notice how each step maintains player engagement and creates opportunities for skill development.")
3. If the section contains multiple game elements (like multiple mechanics, multiple levels, multiple systems, etc.), inform the user they can apply elicitation actions to:
- The entire section as a whole
- Individual game elements within the section (specify which element when selecting an action)
@@ -1018,8 +981,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-game-design-document.md"
title: "{{game_title}} Game Design Document (GDD)"
filename: 'docs/{{game_name}}-game-design-document.md'
title: '{{game_title}} Game Design Document (GDD)'
workflow:
mode: interactive
@@ -1056,7 +1019,7 @@ sections:
title: Unique Selling Points
instruction: List 3-5 key features that differentiate this game from competitors
type: numbered-list
template: "{{usp}}"
template: '{{usp}}'
- id: core-gameplay
title: Core Gameplay
@@ -1101,7 +1064,7 @@ sections:
repeatable: true
sections:
- id: mechanic
title: "{{mechanic_name}}"
title: '{{mechanic_name}}'
template: |
**Description:** {{detailed_description}}
@@ -1166,7 +1129,7 @@ sections:
repeatable: true
sections:
- id: level-type
title: "{{level_type_name}}"
title: '{{level_type_name}}'
template: |
**Purpose:** {{gameplay_purpose}}
**Duration:** {{target_time}}
@@ -1267,10 +1230,10 @@ sections:
instruction: Break down the development into phases that can be converted to epics
sections:
- id: phase-1-core-systems
title: "Phase 1: Core Systems ({{duration}})"
title: 'Phase 1: Core Systems ({{duration}})'
sections:
- id: foundation-epic
title: "Epic: Foundation"
title: 'Epic: Foundation'
type: bullet-list
template: |
- Engine setup and configuration
@@ -1278,41 +1241,41 @@ sections:
- Core input handling
- Asset loading pipeline
- id: core-mechanics-epic
title: "Epic: Core Mechanics"
title: 'Epic: Core Mechanics'
type: bullet-list
template: |
- {{primary_mechanic}} implementation
- Basic physics and collision
- Player controller
- id: phase-2-gameplay-features
title: "Phase 2: Gameplay Features ({{duration}})"
title: 'Phase 2: Gameplay Features ({{duration}})'
sections:
- id: game-systems-epic
title: "Epic: Game Systems"
title: 'Epic: Game Systems'
type: bullet-list
template: |
- {{mechanic_2}} implementation
- {{mechanic_3}} implementation
- Game state management
- id: content-creation-epic
title: "Epic: Content Creation"
title: 'Epic: Content Creation'
type: bullet-list
template: |
- Level loading system
- First playable levels
- Basic UI implementation
- id: phase-3-polish-optimization
title: "Phase 3: Polish & Optimization ({{duration}})"
title: 'Phase 3: Polish & Optimization ({{duration}})'
sections:
- id: performance-epic
title: "Epic: Performance"
title: 'Epic: Performance'
type: bullet-list
template: |
- Optimization and profiling
- Mobile platform testing
- Memory management
- id: user-experience-epic
title: "Epic: User Experience"
title: 'Epic: User Experience'
type: bullet-list
template: |
- Audio implementation
@@ -1354,7 +1317,7 @@ sections:
title: References
instruction: List any competitive analysis, inspiration, or research sources
type: bullet-list
template: "{{reference}}"
template: '{{reference}}'
==================== END: .bmad-2d-phaser-game-dev/templates/game-design-doc-tmpl.yaml ====================
==================== START: .bmad-2d-phaser-game-dev/templates/level-design-doc-tmpl.yaml ====================
@@ -1364,8 +1327,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-level-design-document.md"
title: "{{game_title}} Level Design Document"
filename: 'docs/{{game_name}}-level-design-document.md'
title: '{{game_title}} Level Design Document'
workflow:
mode: interactive
@@ -1426,7 +1389,7 @@ sections:
repeatable: true
sections:
- id: level-category
title: "{{category_name}} Levels"
title: '{{category_name}} Levels'
template: |
**Purpose:** {{gameplay_purpose}}
@@ -1731,19 +1694,19 @@ sections:
title: Playtesting Checklist
type: checklist
items:
- "Level completes within target time range"
- "All mechanics function correctly"
- "Difficulty feels appropriate for level category"
- "Player guidance is clear and effective"
- "No exploits or sequence breaks (unless intended)"
- 'Level completes within target time range'
- 'All mechanics function correctly'
- 'Difficulty feels appropriate for level category'
- 'Player guidance is clear and effective'
- 'No exploits or sequence breaks (unless intended)'
- id: player-experience-testing
title: Player Experience Testing
type: checklist
items:
- "Tutorial levels teach effectively"
- "Challenge feels fair and rewarding"
- "Flow and pacing maintain engagement"
- "Audio and visual feedback support gameplay"
- 'Tutorial levels teach effectively'
- 'Challenge feels fair and rewarding'
- 'Flow and pacing maintain engagement'
- 'Audio and visual feedback support gameplay'
- id: balance-validation
title: Balance Validation
template: |
@@ -1851,8 +1814,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-game-brief.md"
title: "{{game_title}} Game Brief"
filename: 'docs/{{game_name}}-game-brief.md'
title: '{{game_title}} Game Brief'
workflow:
mode: interactive
@@ -2138,21 +2101,21 @@ sections:
title: Development Roadmap
sections:
- id: phase-1-preproduction
title: "Phase 1: Pre-Production ({{duration}})"
title: 'Phase 1: Pre-Production ({{duration}})'
type: bullet-list
template: |
- Detailed Game Design Document creation
- Technical architecture planning
- Art style exploration and pipeline setup
- id: phase-2-prototype
title: "Phase 2: Prototype ({{duration}})"
title: 'Phase 2: Prototype ({{duration}})'
type: bullet-list
template: |
- Core mechanic implementation
- Technical proof of concept
- Initial playtesting and iteration
- id: phase-3-production
title: "Phase 3: Production ({{duration}})"
title: 'Phase 3: Production ({{duration}})'
type: bullet-list
template: |
- Full feature development

View File

@@ -113,7 +113,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -126,14 +125,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -142,7 +139,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -150,7 +146,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -164,7 +159,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -174,7 +168,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -204,8 +197,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-game-architecture.md"
title: "{{game_title}} Game Architecture Document"
filename: 'docs/{{game_name}}-game-architecture.md'
title: '{{game_title}} Game Architecture Document'
workflow:
mode: interactive
@@ -429,7 +422,7 @@ sections:
repeatable: true
sections:
- id: mechanic-system
title: "{{mechanic_name}} System"
title: '{{mechanic_name}} System'
template: |
**Purpose:** {{system_purpose}}
@@ -726,7 +719,7 @@ sections:
instruction: Break down the architecture implementation into phases that align with the GDD development phases
sections:
- id: phase-1-foundation
title: "Phase 1: Foundation ({{duration}})"
title: 'Phase 1: Foundation ({{duration}})'
sections:
- id: phase-1-core
title: Core Systems
@@ -744,7 +737,7 @@ sections:
- "Basic Scene Management System"
- "Asset Loading Foundation"
- id: phase-2-game-systems
title: "Phase 2: Game Systems ({{duration}})"
title: 'Phase 2: Game Systems ({{duration}})'
sections:
- id: phase-2-gameplay
title: Gameplay Systems
@@ -762,7 +755,7 @@ sections:
- "Physics and Collision Framework"
- "Game State Management System"
- id: phase-3-content-polish
title: "Phase 3: Content & Polish ({{duration}})"
title: 'Phase 3: Content & Polish ({{duration}})'
sections:
- id: phase-3-content
title: Content Systems
@@ -1052,7 +1045,7 @@ interface GameState {
interface GameSettings {
musicVolume: number;
sfxVolume: number;
difficulty: "easy" | "normal" | "hard";
difficulty: 'easy' | 'normal' | 'hard';
controls: ControlScheme;
}
```
@@ -1093,12 +1086,12 @@ class GameScene extends Phaser.Scene {
private inputManager!: InputManager;
constructor() {
super({ key: "GameScene" });
super({ key: 'GameScene' });
}
preload(): void {
// Load only scene-specific assets
this.load.image("player", "assets/player.png");
this.load.image('player', 'assets/player.png');
}
create(data: SceneData): void {
@@ -1123,7 +1116,7 @@ class GameScene extends Phaser.Scene {
this.inputManager.destroy();
// Remove event listeners
this.events.off("*");
this.events.off('*');
}
}
```
@@ -1132,13 +1125,13 @@ class GameScene extends Phaser.Scene {
```typescript
// Proper scene transitions with data
this.scene.start("NextScene", {
this.scene.start('NextScene', {
playerScore: this.playerScore,
currentLevel: this.currentLevel + 1,
});
// Scene overlays for UI
this.scene.launch("PauseMenuScene");
this.scene.launch('PauseMenuScene');
this.scene.pause();
```
@@ -1182,7 +1175,7 @@ class Player extends GameEntity {
private health!: HealthComponent;
constructor(scene: Phaser.Scene, x: number, y: number) {
super(scene, x, y, "player");
super(scene, x, y, 'player');
this.movement = this.addComponent(new MovementComponent(this));
this.health = this.addComponent(new HealthComponent(this, 100));
@@ -1202,7 +1195,7 @@ class GameManager {
constructor(scene: Phaser.Scene) {
if (GameManager.instance) {
throw new Error("GameManager already exists!");
throw new Error('GameManager already exists!');
}
this.scene = scene;
@@ -1212,7 +1205,7 @@ class GameManager {
static getInstance(): GameManager {
if (!GameManager.instance) {
throw new Error("GameManager not initialized!");
throw new Error('GameManager not initialized!');
}
return GameManager.instance;
}
@@ -1259,7 +1252,7 @@ class BulletPool {
}
// Pool exhausted - create new bullet
console.warn("Bullet pool exhausted, creating new bullet");
console.warn('Bullet pool exhausted, creating new bullet');
return new Bullet(this.scene, 0, 0);
}
@@ -1359,12 +1352,12 @@ class InputManager {
}
private setupKeyboard(): void {
this.keys = this.scene.input.keyboard.addKeys("W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT");
this.keys = this.scene.input.keyboard.addKeys('W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT');
}
private setupTouch(): void {
this.scene.input.on("pointerdown", this.handlePointerDown, this);
this.scene.input.on("pointerup", this.handlePointerUp, this);
this.scene.input.on('pointerdown', this.handlePointerDown, this);
this.scene.input.on('pointerup', this.handlePointerUp, this);
}
update(): void {
@@ -1391,9 +1384,9 @@ class InputManager {
class AssetManager {
loadAssets(): Promise<void> {
return new Promise((resolve, reject) => {
this.scene.load.on("filecomplete", this.handleFileComplete, this);
this.scene.load.on("loaderror", this.handleLoadError, this);
this.scene.load.on("complete", () => resolve());
this.scene.load.on('filecomplete', this.handleFileComplete, this);
this.scene.load.on('loaderror', this.handleLoadError, this);
this.scene.load.on('complete', () => resolve());
this.scene.load.start();
});
@@ -1409,8 +1402,8 @@ class AssetManager {
private loadFallbackAsset(key: string): void {
// Load placeholder or default assets
switch (key) {
case "player":
this.scene.load.image("player", "assets/defaults/default-player.png");
case 'player':
this.scene.load.image('player', 'assets/defaults/default-player.png');
break;
default:
console.warn(`No fallback for asset: ${key}`);
@@ -1437,11 +1430,11 @@ class GameSystem {
private attemptRecovery(context: string): void {
switch (context) {
case "update":
case 'update':
// Reset system state
this.reset();
break;
case "render":
case 'render':
// Disable visual effects
this.disableEffects();
break;
@@ -1461,7 +1454,7 @@ class GameSystem {
```typescript
// Example test for game mechanics
describe("HealthComponent", () => {
describe('HealthComponent', () => {
let healthComponent: HealthComponent;
beforeEach(() => {
@@ -1469,18 +1462,18 @@ describe("HealthComponent", () => {
healthComponent = new HealthComponent(mockEntity, 100);
});
test("should initialize with correct health", () => {
test('should initialize with correct health', () => {
expect(healthComponent.currentHealth).toBe(100);
expect(healthComponent.maxHealth).toBe(100);
});
test("should handle damage correctly", () => {
test('should handle damage correctly', () => {
healthComponent.takeDamage(25);
expect(healthComponent.currentHealth).toBe(75);
expect(healthComponent.isAlive()).toBe(true);
});
test("should handle death correctly", () => {
test('should handle death correctly', () => {
healthComponent.takeDamage(150);
expect(healthComponent.currentHealth).toBe(0);
expect(healthComponent.isAlive()).toBe(false);
@@ -1493,7 +1486,7 @@ describe("HealthComponent", () => {
**Scene Testing:**
```typescript
describe("GameScene Integration", () => {
describe('GameScene Integration', () => {
let scene: GameScene;
let mockGame: Phaser.Game;
@@ -1503,7 +1496,7 @@ describe("GameScene Integration", () => {
scene = new GameScene();
});
test("should initialize all systems", () => {
test('should initialize all systems', () => {
scene.create({});
expect(scene.gameManager).toBeDefined();
@@ -1564,25 +1557,21 @@ src/
### Story Implementation Process
1. **Read Story Requirements:**
- Understand acceptance criteria
- Identify technical requirements
- Review performance constraints
2. **Plan Implementation:**
- Identify files to create/modify
- Consider component architecture
- Plan testing approach
3. **Implement Feature:**
- Follow TypeScript strict mode
- Use established patterns
- Maintain 60 FPS performance
4. **Test Implementation:**
- Write unit tests for game logic
- Test cross-platform functionality
- Validate performance targets

View File

@@ -318,7 +318,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -331,14 +330,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -347,7 +344,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -355,7 +351,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -369,7 +364,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -379,7 +373,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -409,8 +402,8 @@ template:
version: 2.0
output:
format: markdown
filename: "stories/{{epic_name}}/{{story_id}}-{{story_name}}.md"
title: "Story: {{story_title}}"
filename: 'stories/{{epic_name}}/{{story_id}}-{{story_name}}.md'
title: 'Story: {{story_title}}'
workflow:
mode: interactive
@@ -439,7 +432,7 @@ sections:
- id: description
title: Description
instruction: Provide a clear, concise description of what this story implements. Focus on the specific game feature or system being built. Reference the GDD section that defines this feature.
template: "{{clear_description_of_what_needs_to_be_implemented}}"
template: '{{clear_description_of_what_needs_to_be_implemented}}'
- id: acceptance-criteria
title: Acceptance Criteria
@@ -449,22 +442,22 @@ sections:
title: Functional Requirements
type: checklist
items:
- "{{specific_functional_requirement}}"
- '{{specific_functional_requirement}}'
- id: technical-requirements
title: Technical Requirements
type: checklist
items:
- "Code follows TypeScript strict mode standards"
- "Maintains 60 FPS on target devices"
- "No memory leaks or performance degradation"
- "{{specific_technical_requirement}}"
- 'Code follows TypeScript strict mode standards'
- 'Maintains 60 FPS on target devices'
- 'No memory leaks or performance degradation'
- '{{specific_technical_requirement}}'
- id: game-design-requirements
title: Game Design Requirements
type: checklist
items:
- "{{gameplay_requirement_from_gdd}}"
- "{{balance_requirement_if_applicable}}"
- "{{player_experience_requirement}}"
- '{{gameplay_requirement_from_gdd}}'
- '{{balance_requirement_if_applicable}}'
- '{{player_experience_requirement}}'
- id: technical-specifications
title: Technical Specifications
@@ -629,14 +622,14 @@ sections:
instruction: Checklist that must be completed before the story is considered finished
type: checklist
items:
- "All acceptance criteria met"
- "Code reviewed and approved"
- "Unit tests written and passing"
- "Integration tests passing"
- "Performance targets met"
- "No linting errors"
- "Documentation updated"
- "{{game_specific_dod_item}}"
- 'All acceptance criteria met'
- 'Code reviewed and approved'
- 'Unit tests written and passing'
- 'Integration tests passing'
- 'Performance targets met'
- 'No linting errors'
- 'Documentation updated'
- '{{game_specific_dod_item}}'
- id: notes
title: Notes

View File

@@ -230,63 +230,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -455,13 +446,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?
@@ -592,13 +581,11 @@ CRITICAL: Use proper parsing that understands markdown context. A ## inside a co
For each extracted section:
1. **Generate filename**: Convert the section heading to lowercase-dash-case
- Remove special characters
- Replace spaces with dashes
- Example: "## Tech Stack" → `tech-stack.md`
2. **Adjust heading levels**:
- The level 2 heading becomes level 1 (# instead of ##) in the sharded new document
- All subsection levels decrease by 1:
@@ -803,7 +790,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Change Log
| Date | Version | Description | Author |
|------|---------|-------------|--------|
| ------ | ------- | --------------------------- | --------- |
| [Date] | 1.0 | Initial brownfield analysis | [Analyst] |
## Quick Reference - Key Files and Entry Points
@@ -828,7 +815,7 @@ This document captures the CURRENT STATE of the [Project Name] codebase, includi
### Actual Tech Stack (from package.json/requirements.txt)
| Category | Technology | Version | Notes |
|----------|------------|---------|--------|
| --------- | ---------- | ------- | -------------------------- |
| Runtime | Node.js | 16.x | [Any constraints] |
| Framework | Express | 4.18.2 | [Custom middleware?] |
| Database | PostgreSQL | 13 | [Connection pooling setup] |
@@ -870,6 +857,7 @@ project-root/
### Data Models
Instead of duplicating, reference actual model files:
- **User Model**: See `src/models/User.js`
- **Order Model**: See `src/models/Order.js`
- **Related Types**: TypeScript definitions in `src/types/`
@@ -900,7 +888,7 @@ Instead of duplicating, reference actual model files:
### External Services
| Service | Purpose | Integration Type | Key Files |
|---------|---------|------------------|-----------|
| -------- | -------- | ---------------- | ------------------------------ |
| Stripe | Payments | REST API | `src/integrations/stripe/` |
| SendGrid | Emails | SDK | `src/services/emailService.js` |
@@ -947,6 +935,7 @@ npm run test:integration # Runs integration tests (requires local DB)
### Files That Will Need Modification
Based on the enhancement requirements, these files will be affected:
- `src/services/userService.js` - Add new user fields
- `src/models/User.js` - Update schema
- `src/routes/userRoutes.js` - New endpoints
@@ -1044,7 +1033,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -1057,14 +1045,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -1073,7 +1059,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -1081,7 +1066,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -1095,7 +1079,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -1105,7 +1088,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -1149,7 +1131,6 @@ The LLM will:
2. If the section contains game flow diagrams, level layouts, or system diagrams, explain each diagram briefly with game development context before offering elicitation options (e.g., "The gameplay loop diagram shows how player actions lead to rewards and progression. Notice how each step maintains player engagement and creates opportunities for skill development.")
3. If the section contains multiple game elements (like multiple mechanics, multiple levels, multiple systems, etc.), inform the user they can apply elicitation actions to:
- The entire section as a whole
- Individual game elements within the section (specify which element when selecting an action)
@@ -1250,7 +1231,7 @@ template:
output:
format: markdown
filename: docs/game-architecture.md
title: "{{project_name}} Game Architecture Document"
title: '{{project_name}} Game Architecture Document'
workflow:
mode: interactive
@@ -1360,11 +1341,11 @@ sections:
- Game management patterns (Singleton managers, Event systems, State machines)
- Data patterns (ScriptableObject configuration, Save/Load systems)
- Unity-specific patterns (Object pooling, Coroutines, Unity Events)
template: "- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}"
template: '- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}'
examples:
- "**Component-Based Architecture:** Using MonoBehaviour components for game logic - _Rationale:_ Aligns with Unity's design philosophy and enables reusable, testable game systems"
- "**ScriptableObject Data:** Using ScriptableObjects for game configuration - _Rationale:_ Enables data-driven design and easy balancing without code changes"
- "**Event-Driven Communication:** Using Unity Events and C# events for system decoupling - _Rationale:_ Supports modular architecture and easier testing"
- '**ScriptableObject Data:** Using ScriptableObjects for game configuration - _Rationale:_ Enables data-driven design and easy balancing without code changes'
- '**Event-Driven Communication:** Using Unity Events and C# events for system decoupling - _Rationale:_ Supports modular architecture and easier testing'
- id: tech-stack
title: Tech Stack
@@ -1403,13 +1384,13 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
instruction: Populate the technology stack table with all relevant Unity technologies
examples:
- "| **Game Engine** | Unity | 2022.3.21f1 | Core game development platform | Latest LTS version, stable 2D tooling, comprehensive package ecosystem |"
- '| **Game Engine** | Unity | 2022.3.21f1 | Core game development platform | Latest LTS version, stable 2D tooling, comprehensive package ecosystem |'
- "| **Language** | C# | 10.0 | Primary scripting language | Unity's native language, strong typing, excellent tooling |"
- "| **Render Pipeline** | Universal Render Pipeline (URP) | 14.0.10 | 2D/3D rendering | Optimized for mobile, excellent 2D features, future-proof |"
- "| **Input System** | Unity Input System | 1.7.0 | Cross-platform input handling | Modern input system, supports multiple devices, rebindable controls |"
- "| **Physics** | Unity 2D Physics | Built-in | 2D collision and physics | Integrated Box2D, optimized for 2D games |"
- "| **Audio** | Unity Audio | Built-in | Audio playback and mixing | Built-in audio system with mixer support |"
- "| **Testing** | Unity Test Framework | 1.1.33 | Unit and integration testing | Built-in testing framework based on NUnit |"
- '| **Render Pipeline** | Universal Render Pipeline (URP) | 14.0.10 | 2D/3D rendering | Optimized for mobile, excellent 2D features, future-proof |'
- '| **Input System** | Unity Input System | 1.7.0 | Cross-platform input handling | Modern input system, supports multiple devices, rebindable controls |'
- '| **Physics** | Unity 2D Physics | Built-in | 2D collision and physics | Integrated Box2D, optimized for 2D games |'
- '| **Audio** | Unity Audio | Built-in | Audio playback and mixing | Built-in audio system with mixer support |'
- '| **Testing** | Unity Test Framework | 1.1.33 | Unit and integration testing | Built-in testing framework based on NUnit |'
- id: data-models
title: Game Data Models
@@ -1427,7 +1408,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
@@ -1462,7 +1443,7 @@ sections:
sections:
- id: system-list
repeatable: true
title: "{{system_name}} System"
title: '{{system_name}} System'
template: |
**Responsibility:** {{system_description}}
@@ -1986,7 +1967,7 @@ sections:
repeatable: true
sections:
- id: integration
title: "{{service_name}} Integration"
title: '{{service_name}} Integration'
template: |
- **Purpose:** {{service_purpose}}
- **Documentation:** {{service_docs_url}}
@@ -2098,12 +2079,12 @@ sections:
- id: environments
title: Build Environments
repeatable: true
template: "- **{{env_name}}:** {{env_purpose}} - {{platform_settings}}"
template: '- **{{env_name}}:** {{env_purpose}} - {{platform_settings}}'
- id: platform-specific-builds
title: Platform-Specific Build Settings
type: code
language: text
template: "{{platform_build_configurations}}"
template: '{{platform_build_configurations}}'
- id: coding-standards
title: Coding Standards
@@ -2132,9 +2113,9 @@ sections:
columns: [Element, Convention, Example]
instruction: Only include if deviating from Unity defaults
examples:
- "| MonoBehaviour | PascalCase + Component suffix | PlayerController, HealthSystem |"
- "| ScriptableObject | PascalCase + Data/Config suffix | PlayerData, GameConfig |"
- "| Prefab | PascalCase descriptive | PlayerCharacter, EnvironmentTile |"
- '| MonoBehaviour | PascalCase + Component suffix | PlayerController, HealthSystem |'
- '| ScriptableObject | PascalCase + Data/Config suffix | PlayerData, GameConfig |'
- '| Prefab | PascalCase descriptive | PlayerCharacter, EnvironmentTile |'
- id: critical-rules
title: Critical Unity Rules
instruction: |
@@ -2146,7 +2127,7 @@ sections:
Avoid obvious rules like "follow SOLID principles" or "optimize performance"
repeatable: true
template: "- **{{rule_name}}:** {{rule_description}}"
template: '- **{{rule_name}}:** {{rule_description}}'
- id: unity-specifics
title: Unity-Specific Guidelines
condition: Critical Unity-specific rules needed
@@ -2155,7 +2136,7 @@ sections:
- id: unity-lifecycle
title: Unity Lifecycle Rules
repeatable: true
template: "- **{{lifecycle_method}}:** {{usage_rule}}"
template: '- **{{lifecycle_method}}:** {{usage_rule}}'
- id: test-strategy
title: Test Strategy and Standards
@@ -2633,34 +2614,29 @@ Ask the user if they want to work through the checklist:
Generate a comprehensive validation report that includes:
1. Executive Summary
- Overall game architecture readiness (High/Medium/Low)
- Critical risks for game development
- Key strengths of the game architecture
- Unity-specific assessment
2. Game Systems Analysis
- Pass rate for each major system section
- Most concerning gaps in game architecture
- Systems requiring immediate attention
- Unity integration completeness
3. Performance Risk Assessment
- Top 5 performance risks for the game
- Mobile platform specific concerns
- Frame rate stability risks
- Memory usage concerns
4. Implementation Recommendations
- Must-fix items before development
- Unity-specific improvements needed
- Game development workflow enhancements
5. AI Agent Implementation Readiness
- Game-specific concerns for AI implementation
- Unity component complexity assessment
- Areas needing additional clarification
@@ -3208,25 +3184,21 @@ Assets/
### Story Implementation Process
1. **Read Story Requirements:**
- Understand acceptance criteria
- Identify technical requirements
- Review performance constraints
2. **Plan Implementation:**
- Identify files to create/modify
- Consider Unity's component-based architecture
- Plan testing approach
3. **Implement Feature:**
- Write clean C# code following all guidelines
- Use established patterns
- Maintain stable FPS performance
4. **Test Implementation:**
- Write edit mode tests for game logic
- Write play mode tests for integration testing
- Test cross-platform functionality
@@ -3540,7 +3512,6 @@ that can handle [specific game requirements] with stable performance."
**Prerequisites**: Game planning documents must exist in `docs/` folder of Unity project
1. **Document Sharding** (CRITICAL STEP for Game Development):
- Documents created by Game Designer/Architect (in Web or IDE) MUST be sharded for development
- Use core BMad agents or tools to shard:
a) **Manual**: Use core BMad `shard-doc` task if available
@@ -3563,20 +3534,17 @@ Resulting Unity Project Folder Structure:
3. **Game Development Cycle** (Sequential, one game story at a time):
**CRITICAL CONTEXT MANAGEMENT for Unity Development**:
- **Context windows matter!** Always use fresh, clean context windows
- **Model selection matters!** Use most powerful thinking model for Game SM story creation
- **ALWAYS start new chat between Game SM, Game Dev, and QA work**
**Step 1 - Game Story Creation**:
- **NEW CLEAN CHAT** → Select powerful model → `/bmad2du/game-sm` → `*draft`
- Game SM executes create-game-story task using `game-story-tmpl`
- Review generated story in `docs/game-stories/`
- Update status from "Draft" to "Approved"
**Step 2 - Unity Game Story Implementation**:
- **NEW CLEAN CHAT** → `/bmad2du/game-developer`
- Agent asks which game story to implement
- Include story file content to save game dev agent lookup time
@@ -3585,7 +3553,6 @@ Resulting Unity Project Folder Structure:
- Game Dev marks story as "Review" when complete with all Unity tests passing
**Step 3 - Game QA Review**:
- **NEW CLEAN CHAT** → Use core `@qa` agent → execute review-story task
- QA performs senior Unity developer code review
- QA can refactor and improve Unity code directly
@@ -3625,14 +3592,12 @@ Since this expansion pack doesn't include specific brownfield templates, you'll
1. **Upload Unity project to Web UI** (GitHub URL, files, or zip)
2. **Create adapted Game Design Document**: `/bmad2du/game-designer` - Modify `game-design-doc-tmpl` to include:
- Analysis of existing game systems
- Integration points for new features
- Compatibility requirements
- Risk assessment for changes
3. **Game Architecture Planning**:
- Use `/bmad2du/game-architect` with `game-architecture-tmpl`
- Focus on how new features integrate with existing Unity systems
- Plan for gradual rollout and testing
@@ -3733,7 +3698,7 @@ Use the `shard-doc` task or `@kayvan/markdown-tree-parser` tool for automatic ga
- **Claude Code**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Cursor**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Trae**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Roo Code**: Select mode from mode selector with bmad2du prefix
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select the appropriate game agent.

View File

@@ -215,7 +215,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -228,14 +227,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -244,7 +241,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -252,7 +248,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -266,7 +261,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -276,7 +270,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -393,13 +386,11 @@ CRITICAL: Use proper parsing that understands markdown context. A ## inside a co
For each extracted section:
1. **Generate filename**: Convert the section heading to lowercase-dash-case
- Remove special characters
- Replace spaces with dashes
- Example: "## Tech Stack" → `tech-stack.md`
2. **Adjust heading levels**:
- The level 2 heading becomes level 1 (# instead of ##) in the sharded new document
- All subsection levels decrease by 1:
@@ -501,7 +492,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Begin by understanding the game design context and goals. Ask clarifying questions if needed to determine the best approach for game-specific ideation.]]
1. **Establish Game Context**
- Understand the game genre or opportunity area
- Identify target audience and platform constraints
- Determine session goals (concept exploration vs. mechanic refinement)
@@ -519,7 +509,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **"What If" Game Scenarios**
[[LLM: Generate provocative what-if questions that challenge game design assumptions and expand thinking beyond current genre limitations.]]
- What if players could rewind time in any genre?
- What if the game world reacted to the player's real-world location?
- What if failure was more rewarding than success?
@@ -528,7 +517,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Cross-Genre Fusion**
[[LLM: Help user combine unexpected game genres and mechanics to create unique experiences.]]
- "How might [genre A] mechanics work in [genre B]?"
- Puzzle mechanics in action games
- Dating sim elements in strategy games
@@ -537,7 +525,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Player Motivation Reversal**
[[LLM: Flip traditional player motivations to reveal new gameplay possibilities.]]
- What if losing was the goal?
- What if cooperation was forced in competitive games?
- What if players had to help their enemies?
@@ -554,7 +541,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **SCAMPER for Game Mechanics**
[[LLM: Guide through each SCAMPER prompt specifically for game design.]]
- **S** = Substitute: What mechanics can be substituted? (walking → flying → swimming)
- **C** = Combine: What systems can be merged? (inventory + character growth)
- **A** = Adapt: What mechanics from other media? (books, movies, sports)
@@ -565,7 +551,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Agency Spectrum**
[[LLM: Explore different levels of player control and agency across game systems.]]
- Full Control: Direct character movement, combat, building
- Indirect Control: Setting rules, giving commands, environmental changes
- Influence Only: Suggestions, preferences, emotional reactions
@@ -573,7 +558,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Temporal Game Design**
[[LLM: Explore how time affects gameplay and player experience.]]
- Real-time vs. turn-based mechanics
- Time travel and manipulation
- Persistent vs. session-based progress
@@ -584,7 +568,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Emotion-First Design**
[[LLM: Start with target emotions and work backward to mechanics that create them.]]
- Target Emotion: Wonder → Mechanics: Discovery, mystery, scale
- Target Emotion: Triumph → Mechanics: Challenge, skill growth, recognition
- Target Emotion: Connection → Mechanics: Cooperation, shared goals, communication
@@ -592,7 +575,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Archetype Brainstorming**
[[LLM: Design for different player types and motivations.]]
- Achievers: Progression, completion, mastery
- Explorers: Discovery, secrets, world-building
- Socializers: Interaction, cooperation, community
@@ -601,7 +583,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Accessibility-First Innovation**
[[LLM: Generate ideas that make games more accessible while creating new gameplay.]]
- Visual impairment considerations leading to audio-focused mechanics
- Motor accessibility inspiring one-handed or simplified controls
- Cognitive accessibility driving clear feedback and pacing
@@ -611,7 +592,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Environmental Storytelling**
[[LLM: Brainstorm ways the game world itself tells stories without explicit narrative.]]
- How does the environment show history?
- What do interactive objects reveal about characters?
- How can level design communicate mood?
@@ -619,7 +599,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player-Generated Narrative**
[[LLM: Explore ways players create their own stories through gameplay.]]
- Emergent storytelling through player choices
- Procedural narrative generation
- Player-to-player story sharing
@@ -627,7 +606,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Genre Expectation Subversion**
[[LLM: Identify and deliberately subvert player expectations within genres.]]
- Fantasy RPG where magic is mundane
- Horror game where monsters are friendly
- Racing game where going slow is optimal
@@ -637,7 +615,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Platform-Specific Design**
[[LLM: Generate ideas that leverage unique platform capabilities.]]
- Mobile: GPS, accelerometer, camera, always-connected
- Web: URLs, tabs, social sharing, real-time collaboration
- Console: Controllers, TV viewing, couch co-op
@@ -645,7 +622,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Constraint-Based Creativity**
[[LLM: Use technical or design constraints as creative catalysts.]]
- One-button games
- Games without graphics
- Games that play in notification bars
@@ -691,19 +667,16 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Guide the brainstorming session with appropriate pacing for game design exploration.]]
1. **Inspiration Phase** (10-15 min)
- Reference existing games and mechanics
- Explore player experiences and emotions
- Gather visual and thematic inspiration
2. **Divergent Exploration** (25-35 min)
- Generate many game concepts or mechanics
- Use expansion and fusion techniques
- Encourage wild and impossible ideas
3. **Player-Centered Filtering** (15-20 min)
- Consider target audience reactions
- Evaluate emotional impact and engagement
- Group ideas by player experience goals
@@ -824,63 +797,54 @@ CRITICAL: First, help the user select the most appropriate research focus based
Present these numbered options to the user:
1. **Product Validation Research**
- Validate product hypotheses and market fit
- Test assumptions about user needs and solutions
- Assess technical and business feasibility
- Identify risks and mitigation strategies
2. **Market Opportunity Research**
- Analyze market size and growth potential
- Identify market segments and dynamics
- Assess market entry strategies
- Evaluate timing and market readiness
3. **User & Customer Research**
- Deep dive into user personas and behaviors
- Understand jobs-to-be-done and pain points
- Map customer journeys and touchpoints
- Analyze willingness to pay and value perception
4. **Competitive Intelligence Research**
- Detailed competitor analysis and positioning
- Feature and capability comparisons
- Business model and strategy analysis
- Identify competitive advantages and gaps
5. **Technology & Innovation Research**
- Assess technology trends and possibilities
- Evaluate technical approaches and architectures
- Identify emerging technologies and disruptions
- Analyze build vs. buy vs. partner options
6. **Industry & Ecosystem Research**
- Map industry value chains and dynamics
- Identify key players and relationships
- Analyze regulatory and compliance factors
- Understand partnership opportunities
7. **Strategic Options Research**
- Evaluate different strategic directions
- Assess business model alternatives
- Analyze go-to-market strategies
- Consider expansion and scaling paths
8. **Risk & Feasibility Research**
- Identify and assess various risk factors
- Evaluate implementation challenges
- Analyze resource requirements
- Consider regulatory and legal implications
9. **Custom Research Focus**
- User-defined research objectives
- Specialized domain investigation
- Cross-functional research needs
@@ -1049,13 +1013,11 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
### 5. Review and Refinement
1. **Present Complete Prompt**
- Show the full research prompt
- Explain key elements and rationale
- Highlight any assumptions made
2. **Gather Feedback**
- Are the objectives clear and correct?
- Do the questions address all concerns?
- Is the scope appropriate?
@@ -1113,7 +1075,6 @@ CRITICAL: collaborate with the user to develop specific, actionable research que
2. If the section contains game flow diagrams, level layouts, or system diagrams, explain each diagram briefly with game development context before offering elicitation options (e.g., "The gameplay loop diagram shows how player actions lead to rewards and progression. Notice how each step maintains player engagement and creates opportunities for skill development.")
3. If the section contains multiple game elements (like multiple mechanics, multiple levels, multiple systems, etc.), inform the user they can apply elicitation actions to:
- The entire section as a whole
- Individual game elements within the section (specify which element when selecting an action)
@@ -1214,7 +1175,7 @@ template:
output:
format: markdown
filename: docs/game-design-document.md
title: "{{game_title}} Game Design Document (GDD)"
title: '{{game_title}} Game Design Document (GDD)'
workflow:
mode: interactive
@@ -1262,8 +1223,8 @@ sections:
**Primary:** {{age_range}}, {{player_type}}, {{platform_preference}}
**Secondary:** {{secondary_audience}}
examples:
- "Primary: Ages 8-16, casual mobile gamers, prefer short play sessions"
- "Secondary: Adult puzzle enthusiasts, educators looking for teaching tools"
- 'Primary: Ages 8-16, casual mobile gamers, prefer short play sessions'
- 'Secondary: Adult puzzle enthusiasts, educators looking for teaching tools'
- id: platform-technical
title: Platform & Technical Requirements
instruction: Based on the technical preferences or user input, define the target platforms and Unity-specific requirements
@@ -1274,7 +1235,7 @@ sections:
**Screen Support:** {{resolution_range}}
**Build Targets:** {{build_targets}}
examples:
- "Primary Platform: Mobile (iOS/Android), Engine: Unity 2022.3 LTS & C#, Performance: 60 FPS on iPhone 8/Galaxy S8"
- 'Primary Platform: Mobile (iOS/Android), Engine: Unity 2022.3 LTS & C#, Performance: 60 FPS on iPhone 8/Galaxy S8'
- id: unique-selling-points
title: Unique Selling Points
instruction: List 3-5 key features that differentiate this game from competitors
@@ -1325,8 +1286,8 @@ sections:
- {{loss_condition_1}} - Trigger: {{unity_trigger}}
- {{loss_condition_2}} - Trigger: {{unity_trigger}}
examples:
- "Victory: Player reaches exit portal - Unity Event: OnTriggerEnter2D with Portal tag"
- "Failure: Health reaches zero - Trigger: Health component value <= 0"
- 'Victory: Player reaches exit portal - Unity Event: OnTriggerEnter2D with Portal tag'
- 'Failure: Health reaches zero - Trigger: Health component value <= 0'
- id: game-mechanics
title: Game Mechanics
@@ -1338,7 +1299,7 @@ sections:
repeatable: true
sections:
- id: mechanic
title: "{{mechanic_name}}"
title: '{{mechanic_name}}'
template: |
**Description:** {{detailed_description}}
@@ -1360,8 +1321,8 @@ sections:
- {{script_name}}.cs - {{responsibility}}
- {{manager_script}}.cs - {{management_role}}
examples:
- "Components Needed: Rigidbody2D, BoxCollider2D, PlayerMovement script"
- "Physics Requirements: 2D Physics material for ground friction, Gravity scale 3"
- 'Components Needed: Rigidbody2D, BoxCollider2D, PlayerMovement script'
- 'Physics Requirements: 2D Physics material for ground friction, Gravity scale 3'
- id: controls
title: Controls
instruction: Define all input methods for different platforms using Unity's Input System
@@ -1416,7 +1377,7 @@ sections:
**Late Game:** {{duration}} - {{difficulty_description}}
- Unity Config: {{scriptable_object_values}}
examples:
- "enemy speed: 2.0f, jump height: 4.5f, obstacle density: 0.3f"
- 'enemy speed: 2.0f, jump height: 4.5f, obstacle density: 0.3f'
- id: economy-resources
title: Economy & Resources
condition: has_economy
@@ -1439,7 +1400,7 @@ sections:
repeatable: true
sections:
- id: level-type
title: "{{level_type_name}}"
title: '{{level_type_name}}'
template: |
**Purpose:** {{gameplay_purpose}}
**Target Duration:** {{target_time}}
@@ -1463,7 +1424,7 @@ sections:
- {{prefab_name}} - {{prefab_purpose}}
examples:
- "Environment: TilemapRenderer with Platform tileset, Lighting: 2D Global Light + Point Lights"
- 'Environment: TilemapRenderer with Platform tileset, Lighting: 2D Global Light + Point Lights'
- id: level-progression
title: Level Progression
template: |
@@ -1478,7 +1439,7 @@ sections:
- Addressable Assets: {{addressable_groups}}
- Loading Screens: {{loading_implementation}}
examples:
- "Scene Naming: World{X}_Level{Y}_Name, Addressable Groups: Levels_World1, World_Environments"
- 'Scene Naming: World{X}_Level{Y}_Name, Addressable Groups: Levels_World1, World_Environments'
- id: technical-specifications
title: Technical Specifications
@@ -1510,7 +1471,7 @@ sections:
- Physics Settings: {{physics_config}}
examples:
- com.unity.addressables 1.20.5 - Asset loading and memory management
- "Color Space: Linear, Quality: Mobile/Desktop presets, Gravity: -20"
- 'Color Space: Linear, Quality: Mobile/Desktop presets, Gravity: -20'
- id: performance-requirements
title: Performance Requirements
template: |
@@ -1526,7 +1487,7 @@ sections:
- GC Allocs: <{{gc_limit}}KB per frame
- Draw Calls: <{{draw_calls}} per frame
examples:
- "60 FPS (minimum 30), CPU: <16.67ms, GPU: <16.67ms, GC: <4KB, Draws: <50"
- '60 FPS (minimum 30), CPU: <16.67ms, GPU: <16.67ms, GC: <4KB, Draws: <50'
- id: platform-specific
title: Platform Specific Requirements
template: |
@@ -1549,7 +1510,7 @@ sections:
- Browser Support: {{browser_list}}
- Compression: {{compression_format}}
examples:
- "Resolution: 1280x720 - 4K, Gamepad: Xbox/PlayStation controllers via Input System"
- 'Resolution: 1280x720 - 4K, Gamepad: Xbox/PlayStation controllers via Input System'
- id: asset-requirements
title: Asset Requirements
instruction: Define asset specifications for Unity pipeline optimization
@@ -1575,7 +1536,7 @@ sections:
- Font: {{font_requirements}}
- Icon Sizes: {{icon_specifications}}
examples:
- "Sprites: 32x32 to 256x256 at 16 PPU, Format: RGBA32 for quality/RGBA16 for performance"
- 'Sprites: 32x32 to 256x256 at 16 PPU, Format: RGBA32 for quality/RGBA16 for performance'
- id: technical-architecture-requirements
title: Technical Architecture Requirements
@@ -1617,8 +1578,8 @@ sections:
- Prefabs: {{prefab_naming}}
- Scenes: {{scene_naming}}
examples:
- "Architecture: Component-Based with ScriptableObject data containers"
- "Scripts: PascalCase (PlayerController), Prefabs: Player_Prefab, Scenes: Level_01_Forest"
- 'Architecture: Component-Based with ScriptableObject data containers'
- 'Scripts: PascalCase (PlayerController), Prefabs: Player_Prefab, Scenes: Level_01_Forest'
- id: unity-systems-integration
title: Unity Systems Integration
template: |
@@ -1640,8 +1601,8 @@ sections:
- **Memory Management:** {{memory_strategy}}
- **Build Pipeline:** {{build_automation}}
examples:
- "Input System: Action Maps for Menu/Gameplay contexts with device switching"
- "DOTween: Smooth UI transitions and gameplay animations"
- 'Input System: Action Maps for Menu/Gameplay contexts with device switching'
- 'DOTween: Smooth UI transitions and gameplay animations'
- id: data-management
title: Data Management
template: |
@@ -1664,8 +1625,8 @@ sections:
- **Memory Pools:** {{pooling_objects}}
- **Asset References:** {{asset_reference_system}}
examples:
- "Save Data: JSON format with AES encryption, stored in persistent data path"
- "ScriptableObjects: Game settings, level configurations, character data"
- 'Save Data: JSON format with AES encryption, stored in persistent data path'
- 'ScriptableObjects: Game settings, level configurations, character data'
- id: development-phases
title: Development Phases & Epic Planning
@@ -1677,15 +1638,15 @@ sections:
instruction: Present a high-level list of all phases for user approval. Each phase's design should deliver significant Unity functionality.
type: numbered-list
examples:
- "Phase 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management"
- "Phase 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop"
- "Phase 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression"
- "Phase 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment"
- 'Phase 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management'
- 'Phase 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop'
- 'Phase 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression'
- 'Phase 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment'
- id: phase-1-foundation
title: "Phase 1: Unity Foundation & Core Systems ({{duration}})"
title: 'Phase 1: Unity Foundation & Core Systems ({{duration}})'
sections:
- id: foundation-design
title: "Design: Unity Project Foundation"
title: 'Design: Unity Project Foundation'
type: bullet-list
template: |
- Unity project setup with proper folder structure and naming conventions
@@ -1695,9 +1656,9 @@ sections:
- Development tools setup (debugging, profiling integration)
- Initial build pipeline and platform configuration
examples:
- "Input System: Configure PlayerInput component with Action Maps for movement and UI"
- 'Input System: Configure PlayerInput component with Action Maps for movement and UI'
- id: core-systems-design
title: "Design: Essential Game Systems"
title: 'Design: Essential Game Systems'
type: bullet-list
template: |
- Save/Load system implementation with {{save_format}} format
@@ -1707,10 +1668,10 @@ sections:
- Basic UI framework and canvas configuration
- Settings and configuration management with ScriptableObjects
- id: phase-2-gameplay
title: "Phase 2: Core Gameplay Implementation ({{duration}})"
title: 'Phase 2: Core Gameplay Implementation ({{duration}})'
sections:
- id: gameplay-mechanics-design
title: "Design: Primary Game Mechanics"
title: 'Design: Primary Game Mechanics'
type: bullet-list
template: |
- Player controller with {{movement_type}} movement system
@@ -1720,7 +1681,7 @@ sections:
- Basic collision detection and response systems
- Animation system integration with Animator controllers
- id: level-systems-design
title: "Design: Level & Content Systems"
title: 'Design: Level & Content Systems'
type: bullet-list
template: |
- Scene loading and transition system
@@ -1730,10 +1691,10 @@ sections:
- Collectibles and pickup systems
- Victory/defeat condition implementation
- id: phase-3-polish
title: "Phase 3: Polish & Optimization ({{duration}})"
title: 'Phase 3: Polish & Optimization ({{duration}})'
sections:
- id: performance-design
title: "Design: Performance & Platform Optimization"
title: 'Design: Performance & Platform Optimization'
type: bullet-list
template: |
- Unity Profiler analysis and optimization passes
@@ -1743,7 +1704,7 @@ sections:
- Build size optimization and asset bundling
- Quality settings configuration for different device tiers
- id: user-experience-design
title: "Design: User Experience & Polish"
title: 'Design: User Experience & Polish'
type: bullet-list
template: |
- Complete UI/UX implementation with responsive design
@@ -1768,10 +1729,10 @@ sections:
- Cross Cutting Concerns should flow through epics and stories and not be final stories. For example, adding a logging framework as a last story of an epic, or at the end of a project as a final epic or story would be terrible as we would not have logging from the beginning.
elicit: true
examples:
- "Epic 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management"
- "Epic 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop"
- "Epic 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression"
- "Epic 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment"
- 'Epic 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management'
- 'Epic 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop'
- 'Epic 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression'
- 'Epic 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment'
- id: epic-details
title: Epic {{epic_number}} {{epic_title}}
@@ -1793,13 +1754,13 @@ sections:
- Think "junior developer working for 2-4 hours" - stories must be small, focused, and self-contained
- If a story seems complex, break it down further as long as it can deliver a vertical slice
elicit: true
template: "{{epic_goal}}"
template: '{{epic_goal}}'
sections:
- id: story
title: Story {{epic_number}}.{{story_number}} {{story_title}}
repeatable: true
instruction: Provide a clear, concise description of what this story implements. Focus on the specific game feature or system being built. Reference the GDD section that defines this feature and reference the gamearchitecture section for additional implementation and integration specifics.
template: "{{clear_description_of_what_needs_to_be_implemented}}"
template: '{{clear_description_of_what_needs_to_be_implemented}}'
sections:
- id: acceptance-criteria
title: Acceptance Criteria
@@ -1809,7 +1770,7 @@ sections:
title: Functional Requirements
type: checklist
items:
- "{{specific_functional_requirement}}"
- '{{specific_functional_requirement}}'
- id: technical-requirements
title: Technical Requirements
type: checklist
@@ -1817,14 +1778,14 @@ sections:
- Code follows C# best practices
- Maintains stable frame rate on target devices
- No memory leaks or performance degradation
- "{{specific_technical_requirement}}"
- '{{specific_technical_requirement}}'
- id: game-design-requirements
title: Game Design Requirements
type: checklist
items:
- "{{gameplay_requirement_from_gdd}}"
- "{{balance_requirement_if_applicable}}"
- "{{player_experience_requirement}}"
- '{{gameplay_requirement_from_gdd}}'
- '{{balance_requirement_if_applicable}}'
- '{{player_experience_requirement}}'
- id: success-metrics
title: Success Metrics & Quality Assurance
@@ -1842,8 +1803,8 @@ sections:
- **Build Size:** Final build <{{size_limit}}MB for mobile, <{{desktop_limit}}MB for desktop
- **Battery Life:** Mobile gameplay sessions >{{battery_target}} hours on average device
examples:
- "Frame Rate: Consistent 60 FPS with <5% drops below 45 FPS on target hardware"
- "Crash Rate: <0.5% across iOS/Android, <0.1% on desktop platforms"
- 'Frame Rate: Consistent 60 FPS with <5% drops below 45 FPS on target hardware'
- 'Crash Rate: <0.5% across iOS/Android, <0.1% on desktop platforms'
- id: gameplay-metrics
title: Gameplay & User Engagement Metrics
type: bullet-list
@@ -1855,8 +1816,8 @@ sections:
- **Gameplay Completion:** {{completion_rate}}% complete main game content
- **Control Responsiveness:** Input lag <{{input_lag}}ms on all platforms
examples:
- "Tutorial Completion: 85% of players complete movement and basic mechanics tutorial"
- "Session Duration: Average 15-20 minutes per session for mobile, 30-45 minutes for desktop"
- 'Tutorial Completion: 85% of players complete movement and basic mechanics tutorial'
- 'Session Duration: Average 15-20 minutes per session for mobile, 30-45 minutes for desktop'
- id: platform-specific-metrics
title: Platform-Specific Quality Metrics
type: table
@@ -1901,17 +1862,17 @@ sections:
- Consider cross-platform testing requirements
- Account for Unity build and deployment steps
examples:
- "Foundation stories: Individual Unity systems (Input, Audio, Scene Management) - 1-2 days each"
- "Feature stories: Complete gameplay mechanics with UI and feedback - 2-4 days each"
- 'Foundation stories: Individual Unity systems (Input, Audio, Scene Management) - 1-2 days each'
- 'Feature stories: Complete gameplay mechanics with UI and feedback - 2-4 days each'
- id: recommended-agents
title: Recommended BMad Agent Sequence
type: numbered-list
template: |
1. **{{agent_name}}**: {{agent_responsibility}}
examples:
- "Unity Architect: Create detailed technical architecture document with specific Unity implementation patterns"
- "Unity Developer: Implement core systems and gameplay mechanics according to architecture"
- "QA Tester: Validate performance metrics and cross-platform functionality"
- 'Unity Architect: Create detailed technical architecture document with specific Unity implementation patterns'
- 'Unity Developer: Implement core systems and gameplay mechanics according to architecture'
- 'QA Tester: Validate performance metrics and cross-platform functionality'
==================== END: .bmad-2d-unity-game-dev/templates/game-design-doc-tmpl.yaml ====================
==================== START: .bmad-2d-unity-game-dev/templates/level-design-doc-tmpl.yaml ====================
@@ -1922,7 +1883,7 @@ template:
output:
format: markdown
filename: docs/level-design-document.md
title: "{{game_title}} Level Design Document"
title: '{{game_title}} Level Design Document'
workflow:
mode: interactive
@@ -1983,7 +1944,7 @@ sections:
repeatable: true
sections:
- id: level-category
title: "{{category_name}} Levels"
title: '{{category_name}} Levels'
template: |
**Purpose:** {{gameplay_purpose}}
@@ -2409,7 +2370,7 @@ template:
output:
format: markdown
filename: docs/game-brief.md
title: "{{game_title}} Game Brief"
title: '{{game_title}} Game Brief'
workflow:
mode: interactive
@@ -2695,21 +2656,21 @@ sections:
title: Development Roadmap
sections:
- id: phase-1-preproduction
title: "Phase 1: Pre-Production ({{duration}})"
title: 'Phase 1: Pre-Production ({{duration}})'
type: bullet-list
template: |
- Detailed Game Design Document creation
- Technical architecture planning
- Art style exploration and pipeline setup
- id: phase-2-prototype
title: "Phase 2: Prototype ({{duration}})"
title: 'Phase 2: Prototype ({{duration}})'
type: bullet-list
template: |
- Core mechanic implementation
- Technical proof of concept
- Initial playtesting and iteration
- id: phase-3-production
title: "Phase 3: Production ({{duration}})"
title: 'Phase 3: Production ({{duration}})'
type: bullet-list
template: |
- Full feature development
@@ -3237,7 +3198,6 @@ that can handle [specific game requirements] with stable performance."
**Prerequisites**: Game planning documents must exist in `docs/` folder of Unity project
1. **Document Sharding** (CRITICAL STEP for Game Development):
- Documents created by Game Designer/Architect (in Web or IDE) MUST be sharded for development
- Use core BMad agents or tools to shard:
a) **Manual**: Use core BMad `shard-doc` task if available
@@ -3260,20 +3220,17 @@ Resulting Unity Project Folder Structure:
3. **Game Development Cycle** (Sequential, one game story at a time):
**CRITICAL CONTEXT MANAGEMENT for Unity Development**:
- **Context windows matter!** Always use fresh, clean context windows
- **Model selection matters!** Use most powerful thinking model for Game SM story creation
- **ALWAYS start new chat between Game SM, Game Dev, and QA work**
**Step 1 - Game Story Creation**:
- **NEW CLEAN CHAT** → Select powerful model → `/bmad2du/game-sm` → `*draft`
- Game SM executes create-game-story task using `game-story-tmpl`
- Review generated story in `docs/game-stories/`
- Update status from "Draft" to "Approved"
**Step 2 - Unity Game Story Implementation**:
- **NEW CLEAN CHAT** → `/bmad2du/game-developer`
- Agent asks which game story to implement
- Include story file content to save game dev agent lookup time
@@ -3282,7 +3239,6 @@ Resulting Unity Project Folder Structure:
- Game Dev marks story as "Review" when complete with all Unity tests passing
**Step 3 - Game QA Review**:
- **NEW CLEAN CHAT** → Use core `@qa` agent → execute review-story task
- QA performs senior Unity developer code review
- QA can refactor and improve Unity code directly
@@ -3322,14 +3278,12 @@ Since this expansion pack doesn't include specific brownfield templates, you'll
1. **Upload Unity project to Web UI** (GitHub URL, files, or zip)
2. **Create adapted Game Design Document**: `/bmad2du/game-designer` - Modify `game-design-doc-tmpl` to include:
- Analysis of existing game systems
- Integration points for new features
- Compatibility requirements
- Risk assessment for changes
3. **Game Architecture Planning**:
- Use `/bmad2du/game-architect` with `game-architecture-tmpl`
- Focus on how new features integrate with existing Unity systems
- Plan for gradual rollout and testing
@@ -3430,7 +3384,7 @@ Use the `shard-doc` task or `@kayvan/markdown-tree-parser` tool for automatic ga
- **Claude Code**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Cursor**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Trae**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Roo Code**: Select mode from mode selector with bmad2du prefix
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select the appropriate game agent.

View File

@@ -108,7 +108,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -121,14 +120,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -137,7 +134,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -145,7 +141,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -159,7 +154,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -169,7 +163,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -357,7 +350,6 @@ The goal is quality delivery, not just checking boxes.]]
1. **Requirements Met:**
[[LLM: Be specific - list each requirement and whether it's complete. Include game-specific requirements from GDD]]
- [ ] All functional requirements specified in the story are implemented.
- [ ] All acceptance criteria defined in the story are met.
- [ ] Game Design Document (GDD) requirements referenced in the story are implemented.
@@ -366,7 +358,6 @@ The goal is quality delivery, not just checking boxes.]]
2. **Coding Standards & Project Structure:**
[[LLM: Code quality matters for maintainability. Check Unity-specific patterns and C# standards]]
- [ ] All new/modified code strictly adheres to `Operational Guidelines`.
- [ ] All new/modified code aligns with `Project Structure` (Scripts/, Prefabs/, Scenes/, etc.).
- [ ] Adherence to `Tech Stack` for Unity version and packages used.
@@ -380,7 +371,6 @@ The goal is quality delivery, not just checking boxes.]]
3. **Testing:**
[[LLM: Testing proves your code works. Include Unity-specific testing with NUnit and manual testing]]
- [ ] All required unit tests (NUnit) as per the story and testing strategy are implemented.
- [ ] All required integration tests (if applicable) are implemented.
- [ ] Manual testing performed in Unity Editor for all game functionality.
@@ -392,7 +382,6 @@ The goal is quality delivery, not just checking boxes.]]
4. **Functionality & Verification:**
[[LLM: Did you actually run and test your code in Unity? Be specific about game mechanics tested]]
- [ ] Functionality has been manually verified in Unity Editor and play mode.
- [ ] Game mechanics work as specified in the GDD.
- [ ] Player controls and input handling work correctly.
@@ -405,7 +394,6 @@ The goal is quality delivery, not just checking boxes.]]
5. **Story Administration:**
[[LLM: Documentation helps the next developer. Include Unity-specific implementation notes]]
- [ ] All tasks within the story file are marked as complete.
- [ ] Any clarifications or decisions made during development are documented.
- [ ] Unity-specific implementation details documented (scene changes, prefab modifications).
@@ -415,7 +403,6 @@ The goal is quality delivery, not just checking boxes.]]
6. **Dependencies, Build & Configuration:**
[[LLM: Build issues block everyone. Ensure Unity project builds for all target platforms]]
- [ ] Unity project builds successfully without errors.
- [ ] Project builds for all target platforms (desktop/mobile as specified).
- [ ] Any new Unity packages or Asset Store items were pre-approved OR approved by user.
@@ -427,7 +414,6 @@ The goal is quality delivery, not just checking boxes.]]
7. **Game-Specific Quality:**
[[LLM: Game quality matters. Check performance, game feel, and player experience]]
- [ ] Frame rate meets target (30/60 FPS) on all platforms.
- [ ] Memory usage within acceptable limits.
- [ ] Game feel and responsiveness meet design requirements.
@@ -439,7 +425,6 @@ The goal is quality delivery, not just checking boxes.]]
8. **Documentation (If Applicable):**
[[LLM: Good documentation prevents future confusion. Include Unity-specific docs]]
- [ ] Code documentation (XML comments) for public APIs complete.
- [ ] Unity component documentation in Inspector updated.
- [ ] User-facing documentation updated, if changes impact players.

View File

@@ -286,7 +286,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
## Instructions
1. **Initial Assessment**
- If user or the task being run provides a checklist name:
- Try fuzzy matching (e.g. "architecture checklist" -> "architect-checklist")
- If multiple matches found, ask user to clarify
@@ -299,14 +298,12 @@ If the user asks or does not specify a specific checklist, list the checklists a
- All at once (YOLO mode - recommended for checklists, there will be a summary of sections at the end to discuss)
2. **Document and Artifact Gathering**
- Each checklist will specify its required documents/artifacts at the beginning
- Follow the checklist's specific instructions for what to gather, generally a file can be resolved in the docs folder, if not or unsure, halt and ask or confirm with the user.
3. **Checklist Processing**
If in interactive mode:
- Work through each section of the checklist one at a time
- For each section:
- Review all items in the section following instructions for that section embedded in the checklist
@@ -315,7 +312,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
- Get user confirmation before proceeding to next section or if any thing major do we need to halt and take corrective action
If in YOLO mode:
- Process all sections at once
- Create a comprehensive report of all findings
- Present the complete analysis to the user
@@ -323,7 +319,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
4. **Validation Approach**
For each checklist item:
- Read and understand the requirement
- Look for evidence in the documentation that satisfies the requirement
- Consider both explicit mentions and implicit coverage
@@ -337,7 +332,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
5. **Section Analysis**
For each section:
- think step by step to calculate pass rate
- Identify common themes in failed items
- Provide specific recommendations for improvement
@@ -347,7 +341,6 @@ If the user asks or does not specify a specific checklist, list the checklists a
6. **Final Report**
Prepare a summary that includes:
- Overall checklist completion status
- Pass rates by section
- List of failed items with context
@@ -387,7 +380,6 @@ The LLM will:
### 1. Initial Setup & Mode Selection
- **Acknowledge Task & Inputs:**
- Confirm with the user that the "Game Development Correct Course Task" is being initiated.
- Verify the change trigger (e.g., performance issue, platform constraint, gameplay feedback, technical blocker).
- Confirm access to relevant game artifacts:
@@ -408,7 +400,6 @@ The LLM will:
### 2. Execute Game Development Checklist Analysis
- Systematically work through the game-change-checklist sections:
1. **Change Context & Game Impact**
2. **Feature/System Impact Analysis**
3. **Technical Artifact Conflict Resolution**
@@ -433,7 +424,6 @@ The LLM will:
Based on the analysis and agreed path forward:
- **Identify affected game artifacts requiring updates:**
- GDD sections (mechanics, systems, progression)
- Technical specifications (architecture, performance targets)
- Unity-specific configurations (build settings, quality settings)
@@ -442,7 +432,6 @@ Based on the analysis and agreed path forward:
- Platform-specific adaptations
- **Draft explicit changes for each artifact:**
- **Game Stories:** Revise story text, Unity-specific acceptance criteria, technical constraints
- **Technical Specs:** Update architecture diagrams, component hierarchies, performance budgets
- **Unity Configurations:** Propose settings changes, optimization strategies, platform variants
@@ -462,14 +451,12 @@ Based on the analysis and agreed path forward:
- Create a comprehensive proposal document containing:
**A. Change Summary:**
- Original issue (performance, gameplay, technical constraint)
- Game systems affected
- Platform/performance implications
- Chosen solution approach
**B. Technical Impact Analysis:**
- Unity architecture changes needed
- Performance implications (with metrics)
- Platform compatibility effects
@@ -477,14 +464,12 @@ Based on the analysis and agreed path forward:
- Third-party dependency impacts
**C. Specific Proposed Edits:**
- For each game story: "Change Story GS-X.Y from: [old] To: [new]"
- For technical specs: "Update Unity Architecture Section X: [changes]"
- For GDD: "Modify [Feature] in Section Y: [updates]"
- For configurations: "Change [Setting] from [old_value] to [new_value]"
**D. Implementation Considerations:**
- Required Unity version updates
- Asset reimport needs
- Shader recompilation requirements
@@ -496,7 +481,6 @@ Based on the analysis and agreed path forward:
- Provide the finalized document to the user
- **Based on change scope:**
- **Minor adjustments (can be handled in current sprint):**
- Confirm task completion
- Suggest handoff to game-dev agent for implementation
@@ -510,7 +494,6 @@ Based on the analysis and agreed path forward:
## Output Deliverables
- **Primary:** "Game Development Change Proposal" document containing:
- Game-specific change analysis
- Technical impact assessment with Unity context
- Platform and performance considerations
@@ -531,8 +514,8 @@ template:
version: 3.0
output:
format: markdown
filename: "stories/{{epic_name}}/{{story_id}}-{{story_name}}.md"
title: "Story: {{story_title}}"
filename: 'stories/{{epic_name}}/{{story_id}}-{{story_name}}.md'
title: 'Story: {{story_title}}'
workflow:
mode: interactive
@@ -561,7 +544,7 @@ sections:
- id: description
title: Description
instruction: Provide a clear, concise description of what this story implements. Focus on the specific game feature or system being built. Reference the GDD section that defines this feature.
template: "{{clear_description_of_what_needs_to_be_implemented}}"
template: '{{clear_description_of_what_needs_to_be_implemented}}'
- id: acceptance-criteria
title: Acceptance Criteria
@@ -571,7 +554,7 @@ sections:
title: Functional Requirements
type: checklist
items:
- "{{specific_functional_requirement}}"
- '{{specific_functional_requirement}}'
- id: technical-requirements
title: Technical Requirements
type: checklist
@@ -579,14 +562,14 @@ sections:
- Code follows C# best practices
- Maintains stable frame rate on target devices
- No memory leaks or performance degradation
- "{{specific_technical_requirement}}"
- '{{specific_technical_requirement}}'
- id: game-design-requirements
title: Game Design Requirements
type: checklist
items:
- "{{gameplay_requirement_from_gdd}}"
- "{{balance_requirement_if_applicable}}"
- "{{player_experience_requirement}}"
- '{{gameplay_requirement_from_gdd}}'
- '{{balance_requirement_if_applicable}}'
- '{{player_experience_requirement}}'
- id: technical-specifications
title: Technical Specifications
@@ -761,7 +744,7 @@ sections:
- Performance targets met
- No C# compiler errors or warnings
- Documentation updated
- "{{game_specific_dod_item}}"
- '{{game_specific_dod_item}}'
- id: notes
title: Notes

File diff suppressed because it is too large Load Diff

View File

@@ -239,7 +239,6 @@ To conduct a thorough review of existing infrastructure to identify improvement
### 3. Conduct Systematic Review
- **If "Incremental Mode" was selected:**
- For each section of the infrastructure checklist:
- **a. Present Section Focus:** Explain what aspects of infrastructure this section reviews
- **b. Work Through Items:** Examine each checklist item against current infrastructure
@@ -425,7 +424,6 @@ To comprehensively validate platform infrastructure changes against security, re
### 4. Execute Comprehensive Platform Validation Process
- **If "Incremental Mode" was selected:**
- For each section of the infrastructure checklist (Sections 1-16):
- **a. Present Section Purpose:** Explain what this section validates and why it's important for platform operations
- **b. Work Through Items:** Present each checklist item, guide the user through validation, and document compliance or gaps
@@ -532,23 +530,23 @@ template:
output:
format: markdown
filename: docs/infrastructure-architecture.md
title: "{{project_name}} Infrastructure Architecture"
title: '{{project_name}} Infrastructure Architecture'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Infrastructure Architecture Elicitation Actions"
title: 'Infrastructure Architecture Elicitation Actions'
sections:
- id: infrastructure-overview
options:
- "Multi-Cloud Strategy Analysis - Evaluate cloud provider options and vendor lock-in considerations"
- "Regional Distribution Planning - Analyze latency requirements and data residency needs"
- "Environment Isolation Strategy - Design security boundaries and resource segregation"
- "Scalability Patterns Review - Assess auto-scaling needs and traffic patterns"
- "Compliance Requirements Analysis - Review regulatory and security compliance needs"
- "Cost-Benefit Analysis - Compare infrastructure options and TCO"
- "Proceed to next section"
- 'Multi-Cloud Strategy Analysis - Evaluate cloud provider options and vendor lock-in considerations'
- 'Regional Distribution Planning - Analyze latency requirements and data residency needs'
- 'Environment Isolation Strategy - Design security boundaries and resource segregation'
- 'Scalability Patterns Review - Assess auto-scaling needs and traffic patterns'
- 'Compliance Requirements Analysis - Review regulatory and security compliance needs'
- 'Cost-Benefit Analysis - Compare infrastructure options and TCO'
- 'Proceed to next section'
sections:
- id: initial-setup
@@ -608,7 +606,7 @@ sections:
sections:
- id: environments
repeatable: true
title: "{{environment_name}} Environment"
title: '{{environment_name}} Environment'
template: |
- **Purpose:** {{environment_purpose}}
- **Resources:** {{environment_resources}}
@@ -959,24 +957,24 @@ template:
output:
format: markdown
filename: docs/platform-infrastructure/platform-implementation.md
title: "{{project_name}} Platform Infrastructure Implementation"
title: '{{project_name}} Platform Infrastructure Implementation'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Platform Implementation Elicitation Actions"
title: 'Platform Implementation Elicitation Actions'
sections:
- id: foundation-infrastructure
options:
- "Platform Layer Security Hardening - Additional security controls and compliance validation"
- "Performance Optimization - Network and resource optimization"
- "Operational Excellence Enhancement - Automation and monitoring improvements"
- "Platform Integration Validation - Verify foundation supports upper layers"
- "Developer Experience Analysis - Foundation impact on developer workflows"
- "Disaster Recovery Testing - Foundation resilience validation"
- "BMAD Workflow Integration - Cross-agent support verification"
- "Finalize and Proceed to Container Platform"
- 'Platform Layer Security Hardening - Additional security controls and compliance validation'
- 'Performance Optimization - Network and resource optimization'
- 'Operational Excellence Enhancement - Automation and monitoring improvements'
- 'Platform Integration Validation - Verify foundation supports upper layers'
- 'Developer Experience Analysis - Foundation impact on developer workflows'
- 'Disaster Recovery Testing - Foundation resilience validation'
- 'BMAD Workflow Integration - Cross-agent support verification'
- 'Finalize and Proceed to Container Platform'
sections:
- id: initial-setup

2684
dist/teams/team-all.txt vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,8 @@
# Enhanced Development Workflow
# Enhanced IDE Development Workflow
This is a simple step-by-step guide to help you efficiently manage your development workflow using the BMad Method. Refer to the **[<ins>User Guide</ins>](user-guide.md)** for any scenario that is not covered here.
This is a simple step-by-step guide to help you efficiently manage your development workflow using the BMad Method. The workflow integrates the Test Architect (QA agent) throughout the development lifecycle to ensure quality, prevent regressions, and maintain high standards. Refer to the **[<ins>User Guide</ins>](user-guide.md)** for any scenario that is not covered here.
## Create new Branch
## Create New Branch
1. **Start new branch**
@@ -21,23 +21,228 @@ This is a simple step-by-step guide to help you efficiently manage your developm
3. **Execute**: `*develop-story {selected-story}` (runs execute-checklist task)
4. **Review generated report** in `{selected-story}`
## Story Review (Quality Assurance)
## Test Architect Integration Throughout Workflow
1. **Start new chat/conversation**
2. **Load QA agent**
3. **Execute**: `*review {selected-story}` (runs review-story task)
4. **Review generated report** in `{selected-story}`
The Test Architect (Quinn) provides comprehensive quality assurance throughout the development lifecycle. Here's how to leverage each capability at the right time.
**Command Aliases:** Documentation uses short forms (`*risk`, `*design`, `*nfr`, `*trace`) for the full commands (`*risk-profile`, `*test-design`, `*nfr-assess`, `*trace-requirements`).
### Quick Command Reference
| **Stage** | **Command** | **Purpose** | **Output** | **Priority** |
| ------------------------ | ----------- | --------------------------------------- | --------------------------------------------------------------- | --------------------------- |
| **After Story Approval** | `*risk` | Identify integration & regression risks | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` | High for complex/brownfield |
| | `*design` | Create test strategy for dev | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` | High for new features |
| **During Development** | `*trace` | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` | Medium |
| | `*nfr` | Validate quality attributes | `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md` | High for critical features |
| **After Development** | `*review` | Comprehensive assessment | QA Results in story + `docs/qa/gates/{epic}.{story}-{slug}.yml` | **Required** |
| **Post-Review** | `*gate` | Update quality decision | Updated `docs/qa/gates/{epic}.{story}-{slug}.yml` | As needed |
### Stage 1: After Story Creation (Before Dev Starts)
**RECOMMENDED - Set Developer Up for Success:**
```bash
# 1. RISK ASSESSMENT (Run FIRST for complex stories)
@qa *risk {approved-story}
# Identifies:
# - Technical debt impact
# - Integration complexity
# - Regression potential (1-9 scoring)
# - Mitigation strategies
# Critical for: Brownfield, API changes, data migrations
# 2. TEST DESIGN (Run SECOND to guide implementation)
@qa *design {approved-story}
# Provides:
# - Test scenarios per acceptance criterion
# - Test level recommendations (unit/integration/E2E)
# - Risk-based priorities (P0/P1/P2)
# - Test data requirements
# Share with Dev: Include in story comments or attach to ticket
```
### Stage 2: During Development (Mid-Implementation Checkpoints)
**Developer Self-Service Quality Checks:**
```bash
# 3. REQUIREMENTS TRACING (Verify coverage mid-development)
@qa *trace {story-in-progress}
# Validates:
# - All acceptance criteria have tests
# - No missing test scenarios
# - Appropriate test levels
# - Given-When-Then documentation clarity
# Run when: After writing initial tests
# 4. NFR VALIDATION (Check quality attributes)
@qa *nfr {story-in-progress}
# Assesses:
# - Security: Authentication, authorization, data protection
# - Performance: Response times, resource usage
# - Reliability: Error handling, recovery
# - Maintainability: Code quality, documentation
# Run when: Before marking "Ready for Review"
```
### Stage 3: Story Review (Quality Gate Assessment)
**REQUIRED - Comprehensive Test Architecture Review:**
**Prerequisite:** All tests green locally; lint & type checks pass.
```bash
# 5. FULL REVIEW (Standard review process)
@qa *review {completed-story}
```
**What Happens During Review:**
1. **Deep Code Analysis**
- Architecture pattern compliance
- Code quality and maintainability
- Security vulnerability scanning
- Performance bottleneck detection
2. **Active Refactoring**
- Improves code directly when safe
- Fixes obvious issues immediately
- Suggests complex refactoring for dev
3. **Test Validation**
- Coverage at all levels (unit/integration/E2E)
- Test quality (no flaky tests, proper assertions)
- Regression test adequacy
4. **Gate Decision**
- Creates: `docs/qa/gates/{epic}.{story}-{slug}.yml`
- Adds: QA Results section to story file
- Status: PASS/CONCERNS/FAIL/WAIVED
### Stage 4: Post-Review (After Addressing Issues)
**Update Gate Status After Fixes:**
```bash
# 6. GATE UPDATE (Document final decision)
@qa *gate {reviewed-story}
# Updates: Quality gate with new status
# Use when: After addressing review feedback
# Documents: What was fixed, what was waived
```
### Understanding Gate Decisions
| **Status** | **Meaning** | **Action Required** | **Can Proceed?** |
| ------------ | -------------------------------------------- | ----------------------- | ---------------- |
| **PASS** | All critical requirements met | None | ✅ Yes |
| **CONCERNS** | Non-critical issues found | Team review recommended | ⚠️ With caution |
| **FAIL** | Critical issues (security, missing P0 tests) | Must fix | ❌ No |
| **WAIVED** | Issues acknowledged and accepted | Document reasoning | ✅ With approval |
### Risk-Based Testing Strategy
The Test Architect uses risk scoring to prioritize testing:
| **Risk Score** | **Calculation** | **Testing Priority** | **Gate Impact** |
| -------------- | ------------------------------ | ------------------------- | ------------------------ |
| **9** | High probability × High impact | P0 - Must test thoroughly | FAIL if untested |
| **6** | Medium-high combinations | P1 - Should test well | CONCERNS if gaps |
| **4** | Medium combinations | P1 - Should test | CONCERNS if notable gaps |
| **2-3** | Low-medium combinations | P2 - Nice to have | Note in review |
| **1** | Minimal risk | P2 - Minimal | Note in review |
### Special Situations & Best Practices
#### High-Risk or Brownfield Stories
```bash
# ALWAYS run this sequence:
@qa *risk {story} # First - identify dangers
@qa *design {story} # Second - plan defense
# Then during dev:
@qa *trace {story} # Verify regression coverage
@qa *nfr {story} # Check performance impact
# Finally:
@qa *review {story} # Deep integration analysis
```
#### Complex Integrations
- Run `*trace` multiple times during development
- Focus on integration test coverage
- Use `*nfr` to validate cross-system performance
- Review with extra attention to API contracts
#### Performance-Critical Features
- Run `*nfr` early and often (not just at review)
- Establish performance baselines before changes
- Document acceptable performance degradation
- Consider load testing requirements in `*design`
### Test Quality Standards Enforced
Quinn ensures all tests meet these standards:
- **No Flaky Tests**: Proper async handling, explicit waits
- **No Hard Waits**: Dynamic strategies only (polling, events)
- **Stateless**: Tests run independently and in parallel
- **Self-Cleaning**: Tests manage their own test data
- **Appropriate Levels**: Unit for logic, integration for interactions, E2E for journeys
- **Clear Assertions**: Keep assertions in tests, not buried in helpers
### Documentation & Audit Trail
All Test Architect activities create permanent records:
- **Assessment Reports**: Timestamped analysis in `docs/qa/assessments/`
- **Gate Files**: Decision records in `docs/qa/gates/`
- **Story Updates**: QA Results sections in story files
- **Traceability**: Requirements to test mapping maintained
## Commit Changes and Push
1. **Commit changes**
2. **Push to remote**
## Repeat Until Complete
## Complete Development Cycle Flow
- **SM**: Create next story → Review → Approve
- **Dev**: Implement story → Complete → Mark Ready for Review
- **QA**: Review story → Mark done
- **Commit**: All changes
- **Push**: To remote
- **Continue**: Until all features implemented
### The Full Workflow with Test Architect
1. **SM**: Create next story → Review → Approve
2. **QA (Optional)**: Risk assessment (`*risk`) → Test design (`*design`)
3. **Dev**: Implement story → Write tests → Complete
4. **QA (Optional)**: Mid-dev checks (`*trace`, `*nfr`)
5. **Dev**: Mark Ready for Review
6. **QA (Required)**: Review story (`*review`) → Gate decision
7. **Dev (If needed)**: Address issues
8. **QA (If needed)**: Update gate (`*gate`)
9. **Commit**: All changes
10. **Push**: To remote
11. **Continue**: Until all features implemented
### Quick Decision Guide
**Should I run Test Architect commands?**
| **Scenario** | **Before Dev** | **During Dev** | **After Dev** |
| ------------------------ | ------------------------------- | ---------------------------- | ---------------------------- |
| **Simple bug fix** | Optional | Optional | Required `*review` |
| **New feature** | Recommended `*risk`, `*design` | Optional `*trace` | Required `*review` |
| **Brownfield change** | **Required** `*risk`, `*design` | Recommended `*trace`, `*nfr` | Required `*review` |
| **API modification** | **Required** `*risk`, `*design` | **Required** `*trace` | Required `*review` |
| **Performance-critical** | Recommended `*design` | **Required** `*nfr` | Required `*review` |
| **Data migration** | **Required** `*risk`, `*design` | **Required** `*trace` | Required `*review` + `*gate` |
### Success Metrics
The Test Architect helps achieve:
- **Zero regression defects** in production
- **100% requirements coverage** with tests
- **Clear quality gates** for go/no-go decisions
- **Documented risk acceptance** for technical debt
- **Consistent test quality** across the team
- **Shift-left testing** with early risk identification

View File

@@ -1,6 +1,6 @@
# BMad-Method BMAd Code User Guide
# BMad Method User Guide
This guide will help you understand and effectively use the BMad Method for agile AI driven planning and development.
This guide will help you understand and effectively use the BMad Method for agile AI-driven planning and development.
## The BMad Plan and Execute Workflow
@@ -8,7 +8,7 @@ First, here is the full standard Greenfield Planning + Execution Workflow. Brown
If you are going to use the BMad Method with a Brownfield project (an existing project), review **[Working in the Brownfield](./working-in-the-brownfield.md)**.
If you do not see the diagrams that following rendering, you can install Markdown All in One along with the Markdown Preview Mermaid Support plugins to VSCode (or one of the forked clones). With these plugin's, if you right click on the tab when open, there should be a Open Preview option, or check the IDE documentation.
If the diagrams below don't render, install Markdown All in One along with the Markdown Preview Mermaid Support plugins to VSCode (or one of the forked clones). With these plugins, if you right click on the tab when open, there should be an Open Preview option, or check the IDE documentation.
### The Planning Workflow (Web UI or Powerful IDE Agents)
@@ -32,8 +32,11 @@ graph TD
F2 -->|No| H["Architect: Create Architecture from PRD"]
F3 --> F4["UX Expert: Generate UI Prompt for Lovable/V0 (Optional)"]
F4 --> H2["Architect: Create Architecture from PRD + UX Spec"]
H --> I["PO: Run Master Checklist"]
H2 --> I
H --> Q{"Early Test Strategy? (Optional)"}
H2 --> Q
Q -->|Yes| R["QA: Early Test Architecture Input on High-Risk Areas"]
Q -->|No| I
R --> I["PO: Run Master Checklist"]
I --> J{"Documents Aligned?"}
J -->|Yes| K["Planning Complete"]
J -->|No| L["PO: Update Epics & Stories"]
@@ -58,6 +61,8 @@ graph TD
style G fill:#e3f2fd,color:#000
style H fill:#f3e5f5,color:#000
style H2 fill:#f3e5f5,color:#000
style Q fill:#e3f2fd,color:#000
style R fill:#ffd54f,color:#000
style I fill:#f9ab00,color:#fff
style J fill:#e3f2fd,color:#000
style K fill:#34a853,color:#fff
@@ -77,6 +82,17 @@ graph TD
3. **Document Sharding**: Use the PO agent to shard the PRD and then the Architecture
4. **Begin Development**: Start the Core Development Cycle that follows
#### Planning Artifacts (Standard Paths)
```text
PRD → docs/prd.md
Architecture → docs/architecture.md
Sharded Epics → docs/epics/
Sharded Stories → docs/stories/
QA Assessments → docs/qa/assessments/
QA Gates → docs/qa/gates/
```
### The Core Development Cycle (IDE)
Once planning is complete and documents are sharded, BMad follows a structured development workflow:
@@ -85,35 +101,52 @@ Once planning is complete and documents are sharded, BMad follows a structured d
graph TD
A["Development Phase Start"] --> B["SM: Reviews Previous Story Dev/QA Notes"]
B --> B2["SM: Drafts Next Story from Sharded Epic + Architecture"]
B2 --> B3{"PO: Validate Story Draft (Optional)"}
B2 --> S{"High-Risk Story? (Optional)"}
S -->|Yes| T["QA: *risk + *design on Draft Story"]
S -->|No| B3
T --> U["Test Strategy & Risk Profile Created"]
U --> B3{"PO: Validate Story Draft (Optional)"}
B3 -->|Validation Requested| B4["PO: Validate Story Against Artifacts"]
B3 -->|Skip Validation| C{"User Approval"}
B4 --> C
C -->|Approved| D["Dev: Sequential Task Execution"]
C -->|Needs Changes| B2
D --> E["Dev: Implement Tasks + Tests"]
E --> F["Dev: Run All Validations"]
E --> V{"Mid-Dev QA Check? (Optional)"}
V -->|Yes| W["QA: *trace or *nfr for Early Validation"]
V -->|No| F
W --> X["Dev: Address Coverage/NFR Gaps"]
X --> F["Dev: Run All Validations"]
F --> G["Dev: Mark Ready for Review + Add Notes"]
G --> H{"User Verification"}
H -->|Request QA Review| I["QA: Senior Dev Review + Active Refactoring"]
H -->|Request QA Review| I["QA: Test Architect Review + Quality Gate"]
H -->|Approve Without QA| M["IMPORTANT: Verify All Regression Tests and Linting are Passing"]
I --> J["QA: Review, Refactor Code, Add Tests, Document Notes"]
I --> J["QA: Test Architecture Analysis + Active Refactoring"]
J --> L{"QA Decision"}
L -->|Needs Dev Work| D
L -->|Approved| M
H -->|Needs Fixes| D
M --> N["IMPORTANT: COMMIT YOUR CHANGES BEFORE PROCEEDING!"]
N --> K["Mark Story as Done"]
N --> Y{"Gate Update Needed?"}
Y -->|Yes| Z["QA: *gate to Update Status"]
Y -->|No| K
Z --> K["Mark Story as Done"]
K --> B
style A fill:#f5f5f5,color:#000
style B fill:#e8f5e9,color:#000
style B2 fill:#e8f5e9,color:#000
style S fill:#e3f2fd,color:#000
style T fill:#ffd54f,color:#000
style U fill:#ffd54f,color:#000
style B3 fill:#e3f2fd,color:#000
style B4 fill:#fce4ec,color:#000
style C fill:#e3f2fd,color:#000
style D fill:#e3f2fd,color:#000
style E fill:#e3f2fd,color:#000
style V fill:#e3f2fd,color:#000
style W fill:#ffd54f,color:#000
style X fill:#e3f2fd,color:#000
style F fill:#e3f2fd,color:#000
style G fill:#e3f2fd,color:#000
style H fill:#e3f2fd,color:#000
@@ -123,13 +156,23 @@ graph TD
style L fill:#e3f2fd,color:#000
style M fill:#ff5722,color:#fff
style N fill:#d32f2f,color:#fff
style Y fill:#e3f2fd,color:#000
style Z fill:#ffd54f,color:#000
```
## Prerequisites
Before installing BMad Method, ensure you have:
- **Node.js** ≥ 18, **npm** ≥ 9
- **Git** installed and configured
- **(Optional)** VS Code with "Markdown All in One" + "Markdown Preview Mermaid Support" extensions
## Installation
### Optional
If you want to do the planning in the Web with Claude (Sonnet 4 or Opus), Gemini Gem (2.5 Pro), or Custom GPT's:
If you want to do the planning on the web with Claude (Sonnet 4 or Opus), Gemini Gem (2.5 Pro), or Custom GPTs:
1. Navigate to `dist/teams/`
2. Copy `team-fullstack.txt`
@@ -146,17 +189,17 @@ npx bmad-method install
## Special Agents
There are two bmad agents - in the future they will be consolidated into the single bmad-master.
There are two BMad agents in the future they'll be consolidated into a single BMad-Master.
### BMad-Master
This agent can do any task or command that all other agents can do, aside from actual story implementation. Additionally, this agent can help explain the BMad Method when in the web by accessing the knowledge base and explaining anything to you about the process.
This agent can do any task or command that all other agents can do, aside from actual story implementation. Additionally, this agent can help explain the BMad Method when on the web by accessing the knowledge base and explaining anything to you about the process.
If you don't want to bother switching between different agents aside from the dev, this is the agent for you. Just remember that as the context grows, the performance of the agent degrades, therefore it is important to instruct the agent to compact the conversation and start a new conversation with the compacted conversation as the initial message. Do this often, preferably after each story is implemented.
### BMad-Orchestrator
This agent should NOT be used within the IDE, it is a heavy weight special purpose agent that utilizes a lot of context and can morph into any other agent. This exists solely to facilitate the team's within the web bundles. If you use a web bundle you will be greeted by the BMad Orchestrator.
This agent should NOT be used within the IDE, it is a heavyweight, special-purpose agent that utilizes a lot of context and can morph into any other agent. This exists solely to facilitate the teams within the web bundles. If you use a web bundle you will be greeted by the BMad Orchestrator.
### How Agents Work
@@ -187,12 +230,12 @@ dependencies:
**In IDE:**
```bash
# Some Ide's, like Cursor or Windsurf for example, utilize manual rules so interaction is done with the '@' symbol
# Some IDEs, like Cursor or Windsurf for example, utilize manual rules so interaction is done with the '@' symbol
@pm Create a PRD for a task management app
@architect Design the system architecture
@dev Implement the user authentication
# Some, like Claude Code use slash commands instead
# Some IDEs, like Claude Code, use slash commands instead
/pm Create user stories
/dev Fix the login bug
```
@@ -212,6 +255,216 @@ dependencies:
- **File Organization**: Maintain clean project structure
- **Commit Regularly**: Save your work frequently
## The Test Architect (QA Agent)
### Overview
The QA agent in BMad is not just a "senior developer reviewer" - it's a **Test Architect** with deep expertise in test strategy, quality gates, and risk-based testing. Named Quinn, this agent provides advisory authority on quality matters while actively improving code when safe to do so.
#### Quick Start (Essential Commands)
```bash
@qa *risk {story} # Assess risks before development
@qa *design {story} # Create test strategy
@qa *trace {story} # Verify test coverage during dev
@qa *nfr {story} # Check quality attributes
@qa *review {story} # Full assessment → writes gate
```
#### Command Aliases (Test Architect)
The documentation uses short forms for convenience. Both styles are valid:
```text
*risk → *risk-profile
*design → *test-design
*nfr → *nfr-assess
*trace → *trace-requirements (or just *trace)
*review → *review
*gate → *gate
```
### Core Capabilities
#### 1. Risk Profiling (`*risk`)
**When:** After story draft, before development begins (earliest intervention point)
Identifies and assesses implementation risks:
- **Categories**: Technical, Security, Performance, Data, Business, Operational
- **Scoring**: Probability × Impact analysis (1-9 scale)
- **Mitigation**: Specific strategies for each identified risk
- **Gate Impact**: Risks ≥9 trigger FAIL, ≥6 trigger CONCERNS (see `tasks/risk-profile.md` for authoritative rules)
#### 2. Test Design (`*design`)
**When:** After story draft, before development begins (guides what tests to write)
Creates comprehensive test strategies including:
- Test scenarios for each acceptance criterion
- Appropriate test level recommendations (unit vs integration vs E2E)
- Risk-based prioritization (P0/P1/P2)
- Test data requirements and mock strategies
- Execution strategies for CI/CD integration
**Example output:**
```yaml
test_summary:
total: 24
by_level:
unit: 15
integration: 7
e2e: 2
by_priority:
P0: 8 # Must have - linked to critical risks
P1: 10 # Should have - medium risks
P2: 6 # Nice to have - low risks
```
#### 3. Requirements Tracing (`*trace`)
**When:** During development (mid-implementation checkpoint)
Maps requirements to test coverage:
- Documents which tests validate each acceptance criterion
- Uses Given-When-Then for clarity (documentation only, not BDD code)
- Identifies coverage gaps with severity ratings
- Creates traceability matrix for audit purposes
#### 4. NFR Assessment (`*nfr`)
**When:** During development or early review (validate quality attributes)
Validates non-functional requirements:
- **Core Four**: Security, Performance, Reliability, Maintainability
- **Evidence-Based**: Looks for actual implementation proof
- **Gate Integration**: NFR failures directly impact quality gates
#### 5. Comprehensive Test Architecture Review (`*review`)
**When:** After development complete, story marked "Ready for Review"
When you run `@qa *review {story}`, Quinn performs:
- **Requirements Traceability**: Maps every acceptance criterion to its validating tests
- **Test Level Analysis**: Ensures appropriate testing at unit, integration, and E2E levels
- **Coverage Assessment**: Identifies gaps and redundant test coverage
- **Active Refactoring**: Improves code quality directly when safe
- **Quality Gate Decision**: Issues PASS/CONCERNS/FAIL status based on findings
#### 6. Quality Gates (`*gate`)
**When:** After review fixes or when gate status needs updating
Manages quality gate decisions:
- **Deterministic Rules**: Clear criteria for PASS/CONCERNS/FAIL
- **Parallel Authority**: QA owns gate files in `docs/qa/gates/`
- **Advisory Nature**: Provides recommendations, not blocks
- **Waiver Support**: Documents accepted risks when needed
**Note:** Gates are advisory; teams choose their quality bar. WAIVED requires reason, approver, and expiry date. See `templates/qa-gate-tmpl.yaml` for schema and `tasks/review-story.md` (gate rules) and `tasks/risk-profile.md` for scoring.
### Working with the Test Architect
#### Integration with BMad Workflow
The Test Architect provides value throughout the entire development lifecycle. Here's when and how to leverage each capability:
| **Stage** | **Command** | **When to Use** | **Value** | **Output** |
| ------------------ | ----------- | ----------------------- | -------------------------- | -------------------------------------------------------------- |
| **Story Drafting** | `*risk` | After SM drafts story | Identify pitfalls early | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` |
| | `*design` | After risk assessment | Guide dev on test strategy | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` |
| **Development** | `*trace` | Mid-implementation | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` |
| | `*nfr` | While building features | Catch quality issues early | `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md` |
| **Review** | `*review` | Story marked complete | Full quality assessment | QA Results in story + gate file |
| **Post-Review** | `*gate` | After fixing issues | Update quality decision | Updated `docs/qa/gates/{epic}.{story}-{slug}.yml` |
#### Example Commands
```bash
# Planning Stage - Run these BEFORE development starts
@qa *risk {draft-story} # What could go wrong?
@qa *design {draft-story} # What tests should we write?
# Development Stage - Run these DURING coding
@qa *trace {story} # Are we testing everything?
@qa *nfr {story} # Are we meeting quality standards?
# Review Stage - Run when development complete
@qa *review {story} # Comprehensive assessment + refactoring
# Post-Review - Run after addressing issues
@qa *gate {story} # Update gate status
```
### Quality Standards Enforced
Quinn enforces these test quality principles:
- **No Flaky Tests**: Ensures reliability through proper async handling
- **No Hard Waits**: Dynamic waiting strategies only
- **Stateless & Parallel-Safe**: Tests run independently
- **Self-Cleaning**: Tests manage their own test data
- **Appropriate Test Levels**: Unit for logic, integration for interactions, E2E for journeys
- **Explicit Assertions**: Keep assertions in tests, not helpers
### Gate Status Meanings
- **PASS**: All critical requirements met, no blocking issues
- **CONCERNS**: Non-critical issues found, team should review
- **FAIL**: Critical issues that should be addressed (security risks, missing P0 tests)
- **WAIVED**: Issues acknowledged but explicitly accepted by team
### Special Situations
**High-Risk Stories:**
- Always run `*risk` and `*design` before development starts
- Consider mid-development `*trace` and `*nfr` checkpoints
**Complex Integrations:**
- Run `*trace` during development to ensure all integration points tested
- Follow up with `*nfr` to validate performance across integrations
**Performance-Critical:**
- Run `*nfr` early and often during development
- Don't wait until review to discover performance issues
**Brownfield/Legacy Code:**
- Start with `*risk` to identify regression dangers
- Use `*review` with extra focus on backward compatibility
### Best Practices
- **Early Engagement**: Run `*design` and `*risk` during story drafting
- **Risk-Based Focus**: Let risk scores drive test prioritization
- **Iterative Improvement**: Use QA feedback to improve future stories
- **Gate Transparency**: Share gate decisions with the team
- **Continuous Learning**: QA documents patterns for team knowledge sharing
- **Brownfield Care**: Pay extra attention to regression risks in existing systems
### Output Paths Reference
Quick reference for where Test Architect outputs are stored:
```text
*risk-profile → docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
*test-design → docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md
*trace → docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md
*nfr-assess → docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
*review → QA Results section in story + gate file reference
*gate → docs/qa/gates/{epic}.{story}-{slug}.yml
```
## Technical Preferences System
BMad includes a personalization system through the `technical-preferences.md` file located in `.bmad-core/data/` - this can help bias the PM and Architect to recommend your preferences for design patterns, technology selection, or anything else you would like to put in here.
@@ -235,9 +488,9 @@ devLoadAlwaysFiles:
- docs/architecture/project-structure.md
```
You will want to verify from sharding your architecture that these documents exist, that they are as lean as possible, and contain exactly the information you want your dev agent to ALWAYS load into it's context. These are the rules the agent will follow.
You will want to verify from sharding your architecture that these documents exist, that they are as lean as possible, and contain exactly the information you want your dev agent to ALWAYS load into its context. These are the rules the agent will follow.
As your project grows and the code starts to build consistent patterns, coding standards should be reduced to include only the standards that the agent still makes with. The agent will look at surrounding code in files to infer the coding standards that are relevant to the current task.
As your project grows and the code starts to build consistent patterns, coding standards should be reduced to include only the standards the agent still needs enforced. The agent will look at surrounding code in files to infer the coding standards that are relevant to the current task.
## Getting Help

View File

@@ -27,7 +27,7 @@ If you have just completed an MVP with BMad, and you want to continue with post-
## The Complete Brownfield Workflow
1. **Follow the [<ins>User Guide - Installation</ins>](user-guide.md#installation) steps to setup your agent in the web.**
2. **Generate a 'flattened' single file of your entire codebase** run: ```npx bmad-method flatten```
2. **Generate a 'flattened' single file of your entire codebase** run: `npx bmad-method flatten`
### Choose Your Approach
@@ -76,7 +76,7 @@ The PM will:
*document-project
```
The analyst will:
The architect will:
- **Ask about your focus** if no PRD was provided
- **Offer options**: Create PRD, provide requirements, or describe the enhancement
@@ -85,11 +85,11 @@ The analyst will:
- **Skip unrelated areas** to keep docs lean
- **Generate ONE architecture document** for all environments
The analyst creates:
The architect creates:
- **One comprehensive architecture document** following fullstack-architecture template
- **Covers all system aspects** in a single file
- **Easy to copy and save** as `docs/project-architecture.md`
- **Easy to copy and save** as `docs/architecture.md`
- **Can be sharded later** in IDE if desired
For example, if you say "Add payment processing to user service":
@@ -108,10 +108,10 @@ For example, if you say "Add payment processing to user service":
2. **Upload your project**:
- **Option A**: Paste your GitHub repository URL directly
- **Option B**: Upload your flattened-codebase.xml file
3. **Load the analyst agent**: Upload `dist/agents/architect.txt`
3. **Load the architect agent**: Upload `dist/agents/architect.txt`
4. **Run documentation**: Type `*document-project`
The analyst will generate comprehensive documentation of everything.
The architect will generate comprehensive documentation of everything.
#### Phase 2: Plan Your Enhancement
@@ -206,19 +206,20 @@ The PO ensures:
### Phase 4: Save and Shard Documents
1. Save your PRD and Architecture as:
docs/brownfield-prd.md
docs/brownfield-architecture.md
docs/prd.md
docs/architecture.md
(Note: You can optionally prefix with 'brownfield-' if managing multiple versions)
2. Shard your docs:
In your IDE
```bash
@po
shard docs/brownfield-prd.md
shard docs/prd.md
```
```bash
@po
shard docs/brownfield-architecture.md
shard docs/architecture.md
```
### Phase 5: Transition to Development
@@ -255,12 +256,172 @@ Brownfield changes should:
### 4. Test Integration Thoroughly
Focus testing on:
#### Why the Test Architect is Critical for Brownfield
- Integration points
- Existing functionality (regression)
- Performance impact
- Data migrations
In brownfield projects, the Test Architect (Quinn) becomes your safety net against breaking existing functionality. Unlike greenfield where you're building fresh, brownfield requires careful validation that new changes don't destabilize what already works.
#### Brownfield-Specific Testing Challenges
The Test Architect addresses unique brownfield complexities:
| **Challenge** | **How Test Architect Helps** | **Command** |
| --------------------------- | ------------------------------------------------- | ------------------- |
| **Regression Risks** | Identifies which existing features might break | `*risk` |
| **Legacy Dependencies** | Maps integration points and hidden dependencies | `*trace` |
| **Performance Degradation** | Validates no slowdown in existing flows | `*nfr` |
| **Coverage Gaps** | Finds untested legacy code that new changes touch | `*design` |
| **Breaking Changes** | Detects API/contract violations | `*review` |
| **Migration Safety** | Validates data transformations and rollback plans | `*risk` + `*review` |
#### Complete Test Architect Workflow for Brownfield
##### Stage 1: Before Development (Risk & Strategy)
**CRITICAL FOR BROWNFIELD - Run These First:**
```bash
# 1. RISK ASSESSMENT (Run IMMEDIATELY after story creation)
@qa *risk {brownfield-story}
# Identifies: Legacy dependencies, breaking changes, integration points
# Output: docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
# Brownfield Focus:
# - Regression probability scoring
# - Affected downstream systems
# - Data migration risks
# - Rollback complexity
# 2. TEST DESIGN (After risk assessment)
@qa *design {brownfield-story}
# Creates: Regression test strategy + new feature tests
# Output: docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md
# Brownfield Focus:
# - Existing functionality that needs regression tests
# - Integration test requirements
# - Performance benchmarks to maintain
# - Feature flag test scenarios
```
##### Stage 2: During Development (Continuous Validation)
**Monitor Integration Health While Coding:**
```bash
# 3. REQUIREMENTS TRACING (Mid-development checkpoint)
@qa *trace {brownfield-story}
# Maps: New requirements + existing functionality preservation
# Output: docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md
# Brownfield Focus:
# - Existing features that must still work
# - New/old feature interactions
# - API contract preservation
# - Missing regression test coverage
# 4. NFR VALIDATION (Before considering "done")
@qa *nfr {brownfield-story}
# Validates: Performance, security, reliability unchanged
# Output: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
# Brownfield Focus:
# - Performance regression detection
# - Security implications of integrations
# - Backward compatibility validation
# - Load/stress on legacy components
```
##### Stage 3: Code Review (Deep Integration Analysis)
**Comprehensive Brownfield Review:**
```bash
# 5. FULL REVIEW (When development complete)
@qa *review {brownfield-story}
# Performs: Deep analysis + active refactoring
# Outputs:
# - QA Results in story file
# - Gate file: docs/qa/gates/{epic}.{story}-{slug}.yml
```
The review specifically analyzes:
- **API Breaking Changes**: Validates all existing contracts maintained
- **Data Migration Safety**: Checks transformation logic and rollback procedures
- **Performance Regression**: Compares against baseline metrics
- **Integration Points**: Validates all touchpoints with legacy code
- **Feature Flag Logic**: Ensures proper toggle behavior
- **Dependency Impacts**: Maps affected downstream systems
##### Stage 4: Post-Review (Gate Updates)
```bash
# 6. GATE STATUS UPDATE (After addressing issues)
@qa *gate {brownfield-story}
# Updates: Quality gate decision after fixes
# Output: docs/qa/gates/{epic}.{story}-{slug}.yml
# Brownfield Considerations:
# - May WAIVE certain legacy code issues
# - Documents technical debt acceptance
# - Tracks migration progress
```
#### Brownfield-Specific Risk Scoring
The Test Architect uses enhanced risk scoring for brownfield:
| **Risk Category** | **Brownfield Factors** | **Impact on Gate** |
| ---------------------- | ------------------------------------------ | ------------------- |
| **Regression Risk** | Number of integration points × Age of code | Score ≥9 = FAIL |
| **Data Risk** | Migration complexity × Data volume | Score ≥6 = CONCERNS |
| **Performance Risk** | Current load × Added complexity | Score ≥6 = CONCERNS |
| **Compatibility Risk** | API consumers × Contract changes | Score ≥9 = FAIL |
#### Brownfield Testing Standards
Quinn enforces additional standards for brownfield:
- **Regression Test Coverage**: Every touched legacy module needs tests
- **Performance Baselines**: Must maintain or improve current metrics
- **Rollback Procedures**: Every change needs a rollback plan
- **Feature Flags**: All risky changes behind toggles
- **Integration Tests**: Cover all legacy touchpoints
- **Contract Tests**: Validate API compatibility
- **Data Validation**: Migration correctness checks
#### Quick Reference: Brownfield Test Commands
| **Scenario** | **Commands to Run** | **Order** | **Why Critical** |
| --------------------------------- | ---------------------------------------------------- | ---------- | ----------------------------- |
| **Adding Feature to Legacy Code** | `*risk` → `*design` → `*trace` → `*review` | Sequential | Map all dependencies first |
| **API Modification** | `*risk` → `*design` → `*nfr` → `*review` | Sequential | Prevent breaking consumers |
| **Performance-Critical Change** | `*nfr` early and often → `*review` | Continuous | Catch degradation immediately |
| **Data Migration** | `*risk` → `*design` → `*trace` → `*review` → `*gate` | Full cycle | Ensure data integrity |
| **Bug Fix in Complex System** | `*risk` → `*trace` → `*review` | Focused | Prevent side effects |
#### Integration with Brownfield Scenarios
**Scenario-Specific Guidance:**
1. **Legacy Code Modernization**
- Start with `*risk` to map all dependencies
- Use `*design` to plan strangler fig approach
- Run `*trace` frequently to ensure nothing breaks
- `*review` with focus on gradual migration
2. **Adding Features to Monolith**
- `*risk` identifies integration complexity
- `*design` plans isolation strategies
- `*nfr` monitors performance impact
- `*review` validates no monolith degradation
3. **Microservice Extraction**
- `*risk` maps service boundaries
- `*trace` ensures functionality preservation
- `*nfr` validates network overhead acceptable
- `*gate` documents accepted trade-offs
4. **Database Schema Changes**
- `*risk` assesses migration complexity
- `*design` plans backward-compatible approach
- `*trace` maps all affected queries
- `*review` validates migration safety
### 5. Communicate Changes
@@ -277,29 +438,63 @@ Document:
1. Document existing system
2. Create brownfield PRD focusing on integration
3. Architecture emphasizes compatibility
4. Stories include integration tasks
3. **Test Architect Early Involvement**:
- Run `@qa *risk` on draft stories to identify integration risks
- Use `@qa *design` to plan regression test strategy
4. Architecture emphasizes compatibility
5. Stories include integration tasks with test requirements
6. **During Development**:
- Developer runs `@qa *trace` to verify coverage
- Use `@qa *nfr` to monitor performance impact
7. **Review Stage**: `@qa *review` validates integration safety
### Scenario 2: Modernizing Legacy Code
1. Extensive documentation phase
2. PRD includes migration strategy
3. Architecture plans gradual transition
4. Stories follow strangler fig pattern
3. **Test Architect Strategy Planning**:
- `@qa *risk` assesses modernization complexity
- `@qa *design` plans parallel testing approach
4. Architecture plans gradual transition (strangler fig pattern)
5. Stories follow incremental modernization with:
- Regression tests for untouched legacy code
- Integration tests for new/old boundaries
- Performance benchmarks at each stage
6. **Continuous Validation**: Run `@qa *trace` after each increment
7. **Gate Management**: Use `@qa *gate` to track technical debt acceptance
### Scenario 3: Bug Fix in Complex System
1. Document relevant subsystems
2. Use `create-brownfield-story` for focused fix
3. Include regression test requirements
4. QA validates no side effects
3. **Test Architect Risk Assessment**: Run `@qa *risk` to identify side effect potential
4. Include regression test requirements from `@qa *design` output
5. **During Fix**: Use `@qa *trace` to map affected functionality
6. **Before Commit**: Run `@qa *review` for comprehensive validation
7. Test Architect validates no side effects using:
- Risk profiling for side effect analysis (probability × impact scoring)
- Trace matrix to ensure fix doesn't break related features
- NFR assessment to verify performance/security unchanged
- Gate decision documents fix safety
### Scenario 4: API Integration
1. Document existing API patterns
2. PRD defines integration requirements
3. Architecture ensures consistent patterns
4. Stories include API documentation updates
3. **Test Architect Contract Analysis**:
- `@qa *risk` identifies breaking change potential
- `@qa *design` creates contract test strategy
4. Architecture ensures consistent patterns
5. **API Testing Focus**:
- Contract tests for backward compatibility
- Integration tests for new endpoints
- Performance tests for added load
6. Stories include API documentation updates
7. **Validation Checkpoints**:
- `@qa *trace` maps all API consumers
- `@qa *nfr` validates response times
- `@qa *review` ensures no breaking changes
8. **Gate Decision**: Document any accepted breaking changes with migration path
## Troubleshooting
@@ -325,19 +520,37 @@ Document:
```bash
# Document existing project
@architect *document-project
@architect *document-project
# Create enhancement PRD
@pm *create-brownfield-prd
@pm *create-brownfield-prd
# Create architecture with integration focus
@architect *create-brownfield-architecture
@architect *create-brownfield-architecture
# Quick epic creation
@pm *create-brownfield-epic
@pm *create-brownfield-epic
# Single story creation
@pm *create-brownfield-story
@pm *create-brownfield-story
```
### Test Architect Commands for Brownfield
Note: Short forms shown below. Full commands: `*risk-profile`, `*test-design`, `*nfr-assess`, `*trace-requirements`
```bash
# BEFORE DEVELOPMENT (Planning)
@qa *risk {story} # Assess regression & integration risks
@qa *design {story} # Plan regression + new feature tests
# DURING DEVELOPMENT (Validation)
@qa *trace {story} # Verify coverage of old + new
@qa *nfr {story} # Check performance degradation
# AFTER DEVELOPMENT (Review)
@qa *review {story} # Deep integration analysis
@qa *gate {story} # Update quality decision
```
### Decision Tree
@@ -352,13 +565,33 @@ Do you have a large codebase or monorepo?
Is this a major enhancement affecting multiple systems?
├─ Yes → Full Brownfield Workflow
│ └─ ALWAYS run Test Architect *risk + *design first
└─ No → Is this more than a simple bug fix?
├─ Yes → brownfield-create-epic
└─ No → brownfield-create-story
├─ Yes → *create-brownfield-epic
│ └─ Run Test Architect *risk for integration points
└─ No → *create-brownfield-story
└─ Still run *risk if touching critical paths
Does the change touch legacy code?
├─ Yes → Test Architect is MANDATORY
│ ├─ *risk → Identify regression potential
│ ├─ *design → Plan test coverage
│ └─ *review → Validate no breakage
└─ No → Test Architect is RECOMMENDED
└─ *review → Ensure quality standards
```
## Conclusion
Brownfield development with BMad-Method provides structure and safety when modifying existing systems. The key is providing comprehensive context through documentation, using specialized templates that consider integration requirements, and following workflows that respect existing constraints while enabling progress.
Brownfield development with BMad Method provides structure and safety when modifying existing systems. The Test Architect becomes your critical safety net, using risk assessment, regression testing, and continuous validation to ensure new changes don't destabilize existing functionality.
Remember: **Document First, Plan Carefully, Integrate Safely**
**The Brownfield Success Formula:**
1. **Document First** - Understand what exists
2. **Assess Risk Early** - Use Test Architect `*risk` before coding
3. **Plan Test Strategy** - Design regression + new feature tests
4. **Validate Continuously** - Check integration health during development
5. **Review Comprehensively** - Deep analysis before committing
6. **Gate Decisively** - Document quality decisions
Remember: **In brownfield, the Test Architect isn't optional - it's your insurance policy against breaking production.**

119
eslint.config.mjs Normal file
View File

@@ -0,0 +1,119 @@
import js from '@eslint/js';
import eslintConfigPrettier from 'eslint-config-prettier/flat';
import nodePlugin from 'eslint-plugin-n';
import unicorn from 'eslint-plugin-unicorn';
import yml from 'eslint-plugin-yml';
export default [
// Global ignores for files/folders that should not be linted
{
ignores: ['dist/**', 'coverage/**', '**/*.min.js'],
},
// Base JavaScript recommended rules
js.configs.recommended,
// Node.js rules
...nodePlugin.configs['flat/mixed-esm-and-cjs'],
// Unicorn rules (modern best practices)
unicorn.configs.recommended,
// YAML linting
...yml.configs['flat/recommended'],
// Place Prettier last to disable conflicting stylistic rules
eslintConfigPrettier,
// Project-specific tweaks
{
rules: {
// Allow console for CLI tools in this repo
'no-console': 'off',
// Enforce .yaml file extension for consistency
'yml/file-extension': [
'error',
{
extension: 'yaml',
caseSensitive: true,
},
],
// Prefer double quotes in YAML wherever quoting is used, but allow the other to avoid escapes
'yml/quotes': [
'error',
{
prefer: 'double',
avoidEscape: true,
},
],
// Relax some Unicorn rules that are too opinionated for this codebase
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-null': 'off',
},
},
// CLI/CommonJS scripts under tools/**
{
files: ['tools/**/*.js'],
rules: {
// Allow CommonJS patterns for Node CLI scripts
'unicorn/prefer-module': 'off',
'unicorn/import-style': 'off',
'unicorn/no-process-exit': 'off',
'n/no-process-exit': 'off',
'unicorn/no-await-expression-member': 'off',
'unicorn/prefer-top-level-await': 'off',
// Avoid failing CI on incidental unused vars in internal scripts
'no-unused-vars': 'off',
// Reduce style-only churn in internal tools
'unicorn/prefer-ternary': 'off',
'unicorn/filename-case': 'off',
'unicorn/no-array-reduce': 'off',
'unicorn/no-array-callback-reference': 'off',
'unicorn/consistent-function-scoping': 'off',
'n/no-extraneous-require': 'off',
'n/no-extraneous-import': 'off',
'n/no-unpublished-require': 'off',
'n/no-unpublished-import': 'off',
// Some scripts intentionally use globals provided at runtime
'no-undef': 'off',
// Additional relaxed rules for legacy/internal scripts
'no-useless-catch': 'off',
'unicorn/prefer-number-properties': 'off',
'no-unreachable': 'off',
},
},
// ESLint config file should not be checked for publish-related Node rules
{
files: ['eslint.config.mjs'],
rules: {
'n/no-unpublished-import': 'off',
},
},
// YAML workflow templates allow empty mapping values intentionally
{
files: ['bmad-core/workflows/**/*.yaml'],
rules: {
'yml/no-empty-mapping-value': 'off',
},
},
// GitHub workflow files in this repo may use empty mapping values
{
files: ['.github/workflows/**/*.yaml'],
rules: {
'yml/no-empty-mapping-value': 'off',
},
},
// Other GitHub YAML files may intentionally use empty values and reserved filenames
{
files: ['.github/**/*.yaml'],
rules: {
'yml/no-empty-mapping-value': 'off',
'unicorn/filename-case': 'off',
},
},
];

View File

@@ -1,26 +1,26 @@
steps:
# Build the container image
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA', '.']
- name: "gcr.io/cloud-builders/docker"
args: ["build", "-t", "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA", "."]
# Push the container image to Container Registry
- name: 'gcr.io/cloud-builders/docker'
args: ['push', 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA']
- name: "gcr.io/cloud-builders/docker"
args: ["push", "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA"]
# Deploy container image to Cloud Run
- name: 'gcr.io/google.com/cloudsdktool/cloud-sdk'
- name: "gcr.io/google.com/cloudsdktool/cloud-sdk"
entrypoint: gcloud
args:
- 'run'
- 'deploy'
- '{{COMPANY_NAME}}-ai-agents'
- '--image'
- 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA'
- '--region'
- '{{LOCATION}}'
- '--platform'
- 'managed'
- '--allow-unauthenticated'
- "run"
- "deploy"
- "{{COMPANY_NAME}}-ai-agents"
- "--image"
- "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA"
- "--region"
- "{{LOCATION}}"
- "--platform"
- "managed"
- "--allow-unauthenticated"
images:
- 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA'
- "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA"

View File

@@ -8,21 +8,21 @@ This expansion pack provides a complete, deployable starter kit for building and
## Features
* **Automated GCP Setup**: `gcloud` scripts to configure your project, service accounts, and required APIs in minutes.
* **Production-Ready Deployment**: Includes a `Dockerfile` and `cloudbuild.yaml` for easy, repeatable deployments to Google Cloud Run.
* **Rich Template Library**: A comprehensive set of BMad-compatible templates for Teams, Agents, Tasks, Workflows, Documents, and Checklists.
* **Pre-configured Agent Roles**: Includes powerful master templates for key agent archetypes like Orchestrators and Specialists.
* **Highly Customizable**: Easily adapt the entire system with company-specific variables and industry-specific configurations.
* **Powered by Google ADK**: Built on the official Google Agent Development Kit for robust and native integration with Vertex AI services.
- **Automated GCP Setup**: `gcloud` scripts to configure your project, service accounts, and required APIs in minutes.
- **Production-Ready Deployment**: Includes a `Dockerfile` and `cloudbuild.yaml` for easy, repeatable deployments to Google Cloud Run.
- **Rich Template Library**: A comprehensive set of BMad-compatible templates for Teams, Agents, Tasks, Workflows, Documents, and Checklists.
- **Pre-configured Agent Roles**: Includes powerful master templates for key agent archetypes like Orchestrators and Specialists.
- **Highly Customizable**: Easily adapt the entire system with company-specific variables and industry-specific configurations.
- **Powered by Google ADK**: Built on the official Google Agent Development Kit for robust and native integration with Vertex AI services.
## Prerequisites
Before you begin, ensure you have the following installed and configured:
* A Google Cloud Platform (GCP) Account with an active billing account.
* The [Google Cloud SDK (`gcloud` CLI)](https://www.google.com/search?q=%5Bhttps://cloud.google.com/sdk/docs/install%5D\(https://cloud.google.com/sdk/docs/install\)) installed and authenticated.
* [Docker](https://www.docker.com/products/docker-desktop/) installed on your local machine.
* Python 3.11+
- A Google Cloud Platform (GCP) Account with an active billing account.
- The [Google Cloud SDK (`gcloud` CLI)](<https://www.google.com/search?q=%5Bhttps://cloud.google.com/sdk/docs/install%5D(https://cloud.google.com/sdk/docs/install)>) installed and authenticated.
- [Docker](https://www.docker.com/products/docker-desktop/) installed on your local machine.
- Python 3.11+
## Quick Start Guide
@@ -32,9 +32,9 @@ Follow these steps to get your own AI agent system running on Google Cloud.
The setup scripts use placeholder variables. Before running them, open the files in the `/scripts` directory and replace the following placeholders with your own values:
* `{{PROJECT_ID}}`: Your unique Google Cloud project ID.
* `{{COMPANY_NAME}}`: Your company or project name (used for naming resources).
* `{{LOCATION}}`: The GCP region you want to deploy to (e.g., `us-central1`).
- `{{PROJECT_ID}}`: Your unique Google Cloud project ID.
- `{{COMPANY_NAME}}`: Your company or project name (used for naming resources).
- `{{LOCATION}}`: The GCP region you want to deploy to (e.g., `us-central1`).
### 2\. Run the GCP Setup Scripts

View File

@@ -60,10 +60,10 @@ commands:
task-execution:
flow: Read story → Implement game feature → Write tests → Pass tests → Update [x] → Next task
updates-ONLY:
- "Checkboxes: [ ] not started | [-] in progress | [x] complete"
- "Debug Log: | Task | File | Change | Reverted? |"
- "Completion Notes: Deviations only, <50 words"
- "Change Log: Requirement changes only"
- 'Checkboxes: [ ] not started | [-] in progress | [x] complete'
- 'Debug Log: | Task | File | Change | Reverted? |'
- 'Completion Notes: Deviations only, <50 words'
- 'Change Log: Requirement changes only'
blocking: Unapproved deps | Ambiguous after story check | 3 failures | Missing game config
done: Game feature works + Tests pass + 60 FPS + No lint errors + Follows Phaser 3 best practices
dependencies:

View File

@@ -27,7 +27,7 @@ activation-instructions:
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
- STAY IN CHARACTER!
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
- "CRITICAL RULE: You are ONLY allowed to create/modify story files - NEVER implement! If asked to implement, tell user they MUST switch to Game Developer Agent"
- 'CRITICAL RULE: You are ONLY allowed to create/modify story files - NEVER implement! If asked to implement, tell user they MUST switch to Game Developer Agent'
agent:
name: Jordan
id: game-sm

View File

@@ -39,13 +39,11 @@ You are developing games as a "Player Experience CEO" - thinking like a game dir
### Phase 1: Game Concept and Design
1. **Game Designer**: Start with brainstorming and concept development
- Use \*brainstorm to explore game concepts and mechanics
- Create Game Brief using game-brief-tmpl
- Develop core game pillars and player experience goals
2. **Game Designer**: Create comprehensive Game Design Document
- Use game-design-doc-tmpl to create detailed GDD
- Define all game mechanics, progression, and balance
- Specify technical requirements and platform targets
@@ -65,13 +63,11 @@ You are developing games as a "Player Experience CEO" - thinking like a game dir
### Phase 3: Story-Driven Development
5. **Game Scrum Master**: Break down design into development stories
- Use create-game-story task to create detailed implementation stories
- Each story should be immediately actionable by game developers
- Apply game-story-dod-checklist to ensure story quality
6. **Game Developer**: Implement game features story by story
- Follow TypeScript strict mode and Phaser 3 best practices
- Maintain 60 FPS performance target throughout development
- Use test-driven development for game logic components

View File

@@ -73,7 +73,7 @@ interface GameState {
interface GameSettings {
musicVolume: number;
sfxVolume: number;
difficulty: "easy" | "normal" | "hard";
difficulty: 'easy' | 'normal' | 'hard';
controls: ControlScheme;
}
```
@@ -114,12 +114,12 @@ class GameScene extends Phaser.Scene {
private inputManager!: InputManager;
constructor() {
super({ key: "GameScene" });
super({ key: 'GameScene' });
}
preload(): void {
// Load only scene-specific assets
this.load.image("player", "assets/player.png");
this.load.image('player', 'assets/player.png');
}
create(data: SceneData): void {
@@ -144,7 +144,7 @@ class GameScene extends Phaser.Scene {
this.inputManager.destroy();
// Remove event listeners
this.events.off("*");
this.events.off('*');
}
}
```
@@ -153,13 +153,13 @@ class GameScene extends Phaser.Scene {
```typescript
// Proper scene transitions with data
this.scene.start("NextScene", {
this.scene.start('NextScene', {
playerScore: this.playerScore,
currentLevel: this.currentLevel + 1,
});
// Scene overlays for UI
this.scene.launch("PauseMenuScene");
this.scene.launch('PauseMenuScene');
this.scene.pause();
```
@@ -203,7 +203,7 @@ class Player extends GameEntity {
private health!: HealthComponent;
constructor(scene: Phaser.Scene, x: number, y: number) {
super(scene, x, y, "player");
super(scene, x, y, 'player');
this.movement = this.addComponent(new MovementComponent(this));
this.health = this.addComponent(new HealthComponent(this, 100));
@@ -223,7 +223,7 @@ class GameManager {
constructor(scene: Phaser.Scene) {
if (GameManager.instance) {
throw new Error("GameManager already exists!");
throw new Error('GameManager already exists!');
}
this.scene = scene;
@@ -233,7 +233,7 @@ class GameManager {
static getInstance(): GameManager {
if (!GameManager.instance) {
throw new Error("GameManager not initialized!");
throw new Error('GameManager not initialized!');
}
return GameManager.instance;
}
@@ -280,7 +280,7 @@ class BulletPool {
}
// Pool exhausted - create new bullet
console.warn("Bullet pool exhausted, creating new bullet");
console.warn('Bullet pool exhausted, creating new bullet');
return new Bullet(this.scene, 0, 0);
}
@@ -380,12 +380,12 @@ class InputManager {
}
private setupKeyboard(): void {
this.keys = this.scene.input.keyboard.addKeys("W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT");
this.keys = this.scene.input.keyboard.addKeys('W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT');
}
private setupTouch(): void {
this.scene.input.on("pointerdown", this.handlePointerDown, this);
this.scene.input.on("pointerup", this.handlePointerUp, this);
this.scene.input.on('pointerdown', this.handlePointerDown, this);
this.scene.input.on('pointerup', this.handlePointerUp, this);
}
update(): void {
@@ -412,9 +412,9 @@ class InputManager {
class AssetManager {
loadAssets(): Promise<void> {
return new Promise((resolve, reject) => {
this.scene.load.on("filecomplete", this.handleFileComplete, this);
this.scene.load.on("loaderror", this.handleLoadError, this);
this.scene.load.on("complete", () => resolve());
this.scene.load.on('filecomplete', this.handleFileComplete, this);
this.scene.load.on('loaderror', this.handleLoadError, this);
this.scene.load.on('complete', () => resolve());
this.scene.load.start();
});
@@ -430,8 +430,8 @@ class AssetManager {
private loadFallbackAsset(key: string): void {
// Load placeholder or default assets
switch (key) {
case "player":
this.scene.load.image("player", "assets/defaults/default-player.png");
case 'player':
this.scene.load.image('player', 'assets/defaults/default-player.png');
break;
default:
console.warn(`No fallback for asset: ${key}`);
@@ -458,11 +458,11 @@ class GameSystem {
private attemptRecovery(context: string): void {
switch (context) {
case "update":
case 'update':
// Reset system state
this.reset();
break;
case "render":
case 'render':
// Disable visual effects
this.disableEffects();
break;
@@ -482,7 +482,7 @@ class GameSystem {
```typescript
// Example test for game mechanics
describe("HealthComponent", () => {
describe('HealthComponent', () => {
let healthComponent: HealthComponent;
beforeEach(() => {
@@ -490,18 +490,18 @@ describe("HealthComponent", () => {
healthComponent = new HealthComponent(mockEntity, 100);
});
test("should initialize with correct health", () => {
test('should initialize with correct health', () => {
expect(healthComponent.currentHealth).toBe(100);
expect(healthComponent.maxHealth).toBe(100);
});
test("should handle damage correctly", () => {
test('should handle damage correctly', () => {
healthComponent.takeDamage(25);
expect(healthComponent.currentHealth).toBe(75);
expect(healthComponent.isAlive()).toBe(true);
});
test("should handle death correctly", () => {
test('should handle death correctly', () => {
healthComponent.takeDamage(150);
expect(healthComponent.currentHealth).toBe(0);
expect(healthComponent.isAlive()).toBe(false);
@@ -514,7 +514,7 @@ describe("HealthComponent", () => {
**Scene Testing:**
```typescript
describe("GameScene Integration", () => {
describe('GameScene Integration', () => {
let scene: GameScene;
let mockGame: Phaser.Game;
@@ -524,7 +524,7 @@ describe("GameScene Integration", () => {
scene = new GameScene();
});
test("should initialize all systems", () => {
test('should initialize all systems', () => {
scene.create({});
expect(scene.gameManager).toBeDefined();
@@ -585,25 +585,21 @@ src/
### Story Implementation Process
1. **Read Story Requirements:**
- Understand acceptance criteria
- Identify technical requirements
- Review performance constraints
2. **Plan Implementation:**
- Identify files to create/modify
- Consider component architecture
- Plan testing approach
3. **Implement Feature:**
- Follow TypeScript strict mode
- Use established patterns
- Maintain 60 FPS performance
4. **Test Implementation:**
- Write unit tests for game logic
- Test cross-platform functionality
- Validate performance targets

View File

@@ -18,7 +18,6 @@
2. If the section contains game flow diagrams, level layouts, or system diagrams, explain each diagram briefly with game development context before offering elicitation options (e.g., "The gameplay loop diagram shows how player actions lead to rewards and progression. Notice how each step maintains player engagement and creates opportunities for skill development.")
3. If the section contains multiple game elements (like multiple mechanics, multiple levels, multiple systems, etc.), inform the user they can apply elicitation actions to:
- The entire section as a whole
- Individual game elements within the section (specify which element when selecting an action)

View File

@@ -9,7 +9,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Begin by understanding the game design context and goals. Ask clarifying questions if needed to determine the best approach for game-specific ideation.]]
1. **Establish Game Context**
- Understand the game genre or opportunity area
- Identify target audience and platform constraints
- Determine session goals (concept exploration vs. mechanic refinement)
@@ -27,7 +26,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **"What If" Game Scenarios**
[[LLM: Generate provocative what-if questions that challenge game design assumptions and expand thinking beyond current genre limitations.]]
- What if players could rewind time in any genre?
- What if the game world reacted to the player's real-world location?
- What if failure was more rewarding than success?
@@ -36,7 +34,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Cross-Genre Fusion**
[[LLM: Help user combine unexpected game genres and mechanics to create unique experiences.]]
- "How might [genre A] mechanics work in [genre B]?"
- Puzzle mechanics in action games
- Dating sim elements in strategy games
@@ -45,7 +42,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Player Motivation Reversal**
[[LLM: Flip traditional player motivations to reveal new gameplay possibilities.]]
- What if losing was the goal?
- What if cooperation was forced in competitive games?
- What if players had to help their enemies?
@@ -62,7 +58,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **SCAMPER for Game Mechanics**
[[LLM: Guide through each SCAMPER prompt specifically for game design.]]
- **S** = Substitute: What mechanics can be substituted? (walking → flying → swimming)
- **C** = Combine: What systems can be merged? (inventory + character growth)
- **A** = Adapt: What mechanics from other media? (books, movies, sports)
@@ -73,7 +68,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Agency Spectrum**
[[LLM: Explore different levels of player control and agency across game systems.]]
- Full Control: Direct character movement, combat, building
- Indirect Control: Setting rules, giving commands, environmental changes
- Influence Only: Suggestions, preferences, emotional reactions
@@ -81,7 +75,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Temporal Game Design**
[[LLM: Explore how time affects gameplay and player experience.]]
- Real-time vs. turn-based mechanics
- Time travel and manipulation
- Persistent vs. session-based progress
@@ -92,7 +85,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Emotion-First Design**
[[LLM: Start with target emotions and work backward to mechanics that create them.]]
- Target Emotion: Wonder → Mechanics: Discovery, mystery, scale
- Target Emotion: Triumph → Mechanics: Challenge, skill growth, recognition
- Target Emotion: Connection → Mechanics: Cooperation, shared goals, communication
@@ -100,7 +92,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Archetype Brainstorming**
[[LLM: Design for different player types and motivations.]]
- Achievers: Progression, completion, mastery
- Explorers: Discovery, secrets, world-building
- Socializers: Interaction, cooperation, community
@@ -109,7 +100,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Accessibility-First Innovation**
[[LLM: Generate ideas that make games more accessible while creating new gameplay.]]
- Visual impairment considerations leading to audio-focused mechanics
- Motor accessibility inspiring one-handed or simplified controls
- Cognitive accessibility driving clear feedback and pacing
@@ -119,7 +109,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Environmental Storytelling**
[[LLM: Brainstorm ways the game world itself tells stories without explicit narrative.]]
- How does the environment show history?
- What do interactive objects reveal about characters?
- How can level design communicate mood?
@@ -127,7 +116,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player-Generated Narrative**
[[LLM: Explore ways players create their own stories through gameplay.]]
- Emergent storytelling through player choices
- Procedural narrative generation
- Player-to-player story sharing
@@ -135,7 +123,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Genre Expectation Subversion**
[[LLM: Identify and deliberately subvert player expectations within genres.]]
- Fantasy RPG where magic is mundane
- Horror game where monsters are friendly
- Racing game where going slow is optimal
@@ -145,7 +132,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Platform-Specific Design**
[[LLM: Generate ideas that leverage unique platform capabilities.]]
- Mobile: GPS, accelerometer, camera, always-connected
- Web: URLs, tabs, social sharing, real-time collaboration
- Console: Controllers, TV viewing, couch co-op
@@ -153,7 +139,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Constraint-Based Creativity**
[[LLM: Use technical or design constraints as creative catalysts.]]
- One-button games
- Games without graphics
- Games that play in notification bars
@@ -199,19 +184,16 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Guide the brainstorming session with appropriate pacing for game design exploration.]]
1. **Inspiration Phase** (10-15 min)
- Reference existing games and mechanics
- Explore player experiences and emotions
- Gather visual and thematic inspiration
2. **Divergent Exploration** (25-35 min)
- Generate many game concepts or mechanics
- Use expansion and fusion techniques
- Encourage wild and impossible ideas
3. **Player-Centered Filtering** (15-20 min)
- Consider target audience reactions
- Evaluate emotional impact and engagement
- Group ideas by player experience goals

View File

@@ -17,21 +17,21 @@ workflow:
- brainstorming_session
- game_research_prompt
- player_research
notes: 'Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/design/ folder.'
notes: "Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project's docs/design/ folder."
- agent: game-designer
creates: game-design-doc.md
requires: game-brief.md
optional_steps:
- competitive_analysis
- technical_research
notes: 'Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project''s docs/design/ folder.'
notes: "Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project's docs/design/ folder."
- agent: game-designer
creates: level-design-doc.md
requires: game-design-doc.md
optional_steps:
- level_prototyping
- difficulty_analysis
notes: 'Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project''s docs/design/ folder.'
notes: "Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project's docs/design/ folder."
- agent: solution-architect
creates: game-architecture.md
requires:
@@ -41,7 +41,7 @@ workflow:
- technical_research_prompt
- performance_analysis
- platform_research
notes: 'Create comprehensive technical architecture using game-architecture-tmpl. Defines Phaser 3 systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project''s docs/architecture/ folder.'
notes: "Create comprehensive technical architecture using game-architecture-tmpl. Defines Phaser 3 systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project's docs/architecture/ folder."
- agent: game-designer
validates: design_consistency
requires: all_design_documents
@@ -66,7 +66,7 @@ workflow:
optional_steps:
- quick_brainstorming
- concept_validation
notes: 'Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/ folder.'
notes: "Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project's docs/ folder."
- agent: game-designer
creates: prototype-design.md
uses: create-doc prototype-design OR create-game-story

View File

@@ -44,7 +44,7 @@ workflow:
notes: Implement stories in priority order. Test frequently and adjust design based on what feels fun. Document discoveries.
workflow_end:
action: prototype_evaluation
notes: 'Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive.'
notes: "Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive."
game_jam_sequence:
- step: jam_concept
agent: game-designer

View File

@@ -61,13 +61,13 @@ commands:
- explain: teach me what and why you did whatever you just did in detail so I can learn. Explain to me as if you were training a junior Unity developer.
- exit: Say goodbye as the Game Developer, and then abandon inhabiting this persona
develop-story:
order-of-execution: "Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete"
order-of-execution: 'Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete'
story-file-updates-ONLY:
- CRITICAL: ONLY UPDATE THE STORY FILE WITH UPDATES TO SECTIONS INDICATED BELOW. DO NOT MODIFY ANY OTHER SECTIONS.
- CRITICAL: You are ONLY authorized to edit these specific sections of story files - Tasks / Subtasks Checkboxes, Dev Agent Record section and all its subsections, Agent Model Used, Debug Log References, Completion Notes List, File List, Change Log, Status
- CRITICAL: DO NOT modify Status, Story, Acceptance Criteria, Dev Notes, Testing sections, or any other sections not listed above
blocking: "HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression"
ready-for-review: "Code matches requirements + All validations pass + Follows Unity & C# standards + File List complete + Stable FPS"
blocking: 'HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression'
ready-for-review: 'Code matches requirements + All validations pass + Follows Unity & C# standards + File List complete + Stable FPS'
completion: "All Tasks and Subtasks marked [x] and have tests→Validations and full regression passes (DON'T BE LAZY, EXECUTE ALL TESTS and CONFIRM)→Ensure File List is Complete→run the task execute-checklist for the checklist game-story-dod-checklist→set story status: 'Ready for Review'→HALT"
dependencies:
tasks:

View File

@@ -355,34 +355,29 @@ Ask the user if they want to work through the checklist:
Generate a comprehensive validation report that includes:
1. Executive Summary
- Overall game architecture readiness (High/Medium/Low)
- Critical risks for game development
- Key strengths of the game architecture
- Unity-specific assessment
2. Game Systems Analysis
- Pass rate for each major system section
- Most concerning gaps in game architecture
- Systems requiring immediate attention
- Unity integration completeness
3. Performance Risk Assessment
- Top 5 performance risks for the game
- Mobile platform specific concerns
- Frame rate stability risks
- Memory usage concerns
4. Implementation Recommendations
- Must-fix items before development
- Unity-specific improvements needed
- Game development workflow enhancements
5. AI Agent Implementation Readiness
- Game-specific concerns for AI implementation
- Unity component complexity assessment
- Areas needing additional clarification

View File

@@ -25,7 +25,6 @@ The goal is quality delivery, not just checking boxes.]]
1. **Requirements Met:**
[[LLM: Be specific - list each requirement and whether it's complete. Include game-specific requirements from GDD]]
- [ ] All functional requirements specified in the story are implemented.
- [ ] All acceptance criteria defined in the story are met.
- [ ] Game Design Document (GDD) requirements referenced in the story are implemented.
@@ -34,7 +33,6 @@ The goal is quality delivery, not just checking boxes.]]
2. **Coding Standards & Project Structure:**
[[LLM: Code quality matters for maintainability. Check Unity-specific patterns and C# standards]]
- [ ] All new/modified code strictly adheres to `Operational Guidelines`.
- [ ] All new/modified code aligns with `Project Structure` (Scripts/, Prefabs/, Scenes/, etc.).
- [ ] Adherence to `Tech Stack` for Unity version and packages used.
@@ -48,7 +46,6 @@ The goal is quality delivery, not just checking boxes.]]
3. **Testing:**
[[LLM: Testing proves your code works. Include Unity-specific testing with NUnit and manual testing]]
- [ ] All required unit tests (NUnit) as per the story and testing strategy are implemented.
- [ ] All required integration tests (if applicable) are implemented.
- [ ] Manual testing performed in Unity Editor for all game functionality.
@@ -60,7 +57,6 @@ The goal is quality delivery, not just checking boxes.]]
4. **Functionality & Verification:**
[[LLM: Did you actually run and test your code in Unity? Be specific about game mechanics tested]]
- [ ] Functionality has been manually verified in Unity Editor and play mode.
- [ ] Game mechanics work as specified in the GDD.
- [ ] Player controls and input handling work correctly.
@@ -73,7 +69,6 @@ The goal is quality delivery, not just checking boxes.]]
5. **Story Administration:**
[[LLM: Documentation helps the next developer. Include Unity-specific implementation notes]]
- [ ] All tasks within the story file are marked as complete.
- [ ] Any clarifications or decisions made during development are documented.
- [ ] Unity-specific implementation details documented (scene changes, prefab modifications).
@@ -83,7 +78,6 @@ The goal is quality delivery, not just checking boxes.]]
6. **Dependencies, Build & Configuration:**
[[LLM: Build issues block everyone. Ensure Unity project builds for all target platforms]]
- [ ] Unity project builds successfully without errors.
- [ ] Project builds for all target platforms (desktop/mobile as specified).
- [ ] Any new Unity packages or Asset Store items were pre-approved OR approved by user.
@@ -95,7 +89,6 @@ The goal is quality delivery, not just checking boxes.]]
7. **Game-Specific Quality:**
[[LLM: Game quality matters. Check performance, game feel, and player experience]]
- [ ] Frame rate meets target (30/60 FPS) on all platforms.
- [ ] Memory usage within acceptable limits.
- [ ] Game feel and responsiveness meet design requirements.
@@ -107,7 +100,6 @@ The goal is quality delivery, not just checking boxes.]]
8. **Documentation (If Applicable):**
[[LLM: Good documentation prevents future confusion. Include Unity-specific docs]]
- [ ] Code documentation (XML comments) for public APIs complete.
- [ ] Unity component documentation in Inspector updated.
- [ ] User-facing documentation updated, if changes impact players.

View File

@@ -270,7 +270,6 @@ that can handle [specific game requirements] with stable performance."
**Prerequisites**: Game planning documents must exist in `docs/` folder of Unity project
1. **Document Sharding** (CRITICAL STEP for Game Development):
- Documents created by Game Designer/Architect (in Web or IDE) MUST be sharded for development
- Use core BMad agents or tools to shard:
a) **Manual**: Use core BMad `shard-doc` task if available
@@ -293,20 +292,17 @@ Resulting Unity Project Folder Structure:
3. **Game Development Cycle** (Sequential, one game story at a time):
**CRITICAL CONTEXT MANAGEMENT for Unity Development**:
- **Context windows matter!** Always use fresh, clean context windows
- **Model selection matters!** Use most powerful thinking model for Game SM story creation
- **ALWAYS start new chat between Game SM, Game Dev, and QA work**
**Step 1 - Game Story Creation**:
- **NEW CLEAN CHAT** → Select powerful model → `/bmad2du/game-sm``*draft`
- Game SM executes create-game-story task using `game-story-tmpl`
- Review generated story in `docs/game-stories/`
- Update status from "Draft" to "Approved"
**Step 2 - Unity Game Story Implementation**:
- **NEW CLEAN CHAT** → `/bmad2du/game-developer`
- Agent asks which game story to implement
- Include story file content to save game dev agent lookup time
@@ -315,7 +311,6 @@ Resulting Unity Project Folder Structure:
- Game Dev marks story as "Review" when complete with all Unity tests passing
**Step 3 - Game QA Review**:
- **NEW CLEAN CHAT** → Use core `@qa` agent → execute review-story task
- QA performs senior Unity developer code review
- QA can refactor and improve Unity code directly
@@ -355,14 +350,12 @@ Since this expansion pack doesn't include specific brownfield templates, you'll
1. **Upload Unity project to Web UI** (GitHub URL, files, or zip)
2. **Create adapted Game Design Document**: `/bmad2du/game-designer` - Modify `game-design-doc-tmpl` to include:
- Analysis of existing game systems
- Integration points for new features
- Compatibility requirements
- Risk assessment for changes
3. **Game Architecture Planning**:
- Use `/bmad2du/game-architect` with `game-architecture-tmpl`
- Focus on how new features integrate with existing Unity systems
- Plan for gradual rollout and testing
@@ -463,7 +456,7 @@ Use the `shard-doc` task or `@kayvan/markdown-tree-parser` tool for automatic ga
- **Claude Code**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Cursor**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Trae**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Roo Code**: Select mode from mode selector with bmad2du prefix
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select the appropriate game agent.

View File

@@ -531,25 +531,21 @@ Assets/
### Story Implementation Process
1. **Read Story Requirements:**
- Understand acceptance criteria
- Identify technical requirements
- Review performance constraints
2. **Plan Implementation:**
- Identify files to create/modify
- Consider Unity's component-based architecture
- Plan testing approach
3. **Implement Feature:**
- Write clean C# code following all guidelines
- Use established patterns
- Maintain stable FPS performance
4. **Test Implementation:**
- Write edit mode tests for game logic
- Write play mode tests for integration testing
- Test cross-platform functionality

View File

@@ -18,7 +18,6 @@
2. If the section contains game flow diagrams, level layouts, or system diagrams, explain each diagram briefly with game development context before offering elicitation options (e.g., "The gameplay loop diagram shows how player actions lead to rewards and progression. Notice how each step maintains player engagement and creates opportunities for skill development.")
3. If the section contains multiple game elements (like multiple mechanics, multiple levels, multiple systems, etc.), inform the user they can apply elicitation actions to:
- The entire section as a whole
- Individual game elements within the section (specify which element when selecting an action)

View File

@@ -14,7 +14,6 @@
### 1. Initial Setup & Mode Selection
- **Acknowledge Task & Inputs:**
- Confirm with the user that the "Game Development Correct Course Task" is being initiated.
- Verify the change trigger (e.g., performance issue, platform constraint, gameplay feedback, technical blocker).
- Confirm access to relevant game artifacts:
@@ -35,7 +34,6 @@
### 2. Execute Game Development Checklist Analysis
- Systematically work through the game-change-checklist sections:
1. **Change Context & Game Impact**
2. **Feature/System Impact Analysis**
3. **Technical Artifact Conflict Resolution**
@@ -60,7 +58,6 @@
Based on the analysis and agreed path forward:
- **Identify affected game artifacts requiring updates:**
- GDD sections (mechanics, systems, progression)
- Technical specifications (architecture, performance targets)
- Unity-specific configurations (build settings, quality settings)
@@ -69,7 +66,6 @@ Based on the analysis and agreed path forward:
- Platform-specific adaptations
- **Draft explicit changes for each artifact:**
- **Game Stories:** Revise story text, Unity-specific acceptance criteria, technical constraints
- **Technical Specs:** Update architecture diagrams, component hierarchies, performance budgets
- **Unity Configurations:** Propose settings changes, optimization strategies, platform variants
@@ -89,14 +85,12 @@ Based on the analysis and agreed path forward:
- Create a comprehensive proposal document containing:
**A. Change Summary:**
- Original issue (performance, gameplay, technical constraint)
- Game systems affected
- Platform/performance implications
- Chosen solution approach
**B. Technical Impact Analysis:**
- Unity architecture changes needed
- Performance implications (with metrics)
- Platform compatibility effects
@@ -104,14 +98,12 @@ Based on the analysis and agreed path forward:
- Third-party dependency impacts
**C. Specific Proposed Edits:**
- For each game story: "Change Story GS-X.Y from: [old] To: [new]"
- For technical specs: "Update Unity Architecture Section X: [changes]"
- For GDD: "Modify [Feature] in Section Y: [updates]"
- For configurations: "Change [Setting] from [old_value] to [new_value]"
**D. Implementation Considerations:**
- Required Unity version updates
- Asset reimport needs
- Shader recompilation requirements
@@ -123,7 +115,6 @@ Based on the analysis and agreed path forward:
- Provide the finalized document to the user
- **Based on change scope:**
- **Minor adjustments (can be handled in current sprint):**
- Confirm task completion
- Suggest handoff to game-dev agent for implementation
@@ -137,7 +128,6 @@ Based on the analysis and agreed path forward:
## Output Deliverables
- **Primary:** "Game Development Change Proposal" document containing:
- Game-specific change analysis
- Technical impact assessment with Unity context
- Platform and performance considerations

View File

@@ -9,7 +9,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Begin by understanding the game design context and goals. Ask clarifying questions if needed to determine the best approach for game-specific ideation.]]
1. **Establish Game Context**
- Understand the game genre or opportunity area
- Identify target audience and platform constraints
- Determine session goals (concept exploration vs. mechanic refinement)
@@ -27,7 +26,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **"What If" Game Scenarios**
[[LLM: Generate provocative what-if questions that challenge game design assumptions and expand thinking beyond current genre limitations.]]
- What if players could rewind time in any genre?
- What if the game world reacted to the player's real-world location?
- What if failure was more rewarding than success?
@@ -36,7 +34,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Cross-Genre Fusion**
[[LLM: Help user combine unexpected game genres and mechanics to create unique experiences.]]
- "How might [genre A] mechanics work in [genre B]?"
- Puzzle mechanics in action games
- Dating sim elements in strategy games
@@ -45,7 +42,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Player Motivation Reversal**
[[LLM: Flip traditional player motivations to reveal new gameplay possibilities.]]
- What if losing was the goal?
- What if cooperation was forced in competitive games?
- What if players had to help their enemies?
@@ -62,7 +58,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **SCAMPER for Game Mechanics**
[[LLM: Guide through each SCAMPER prompt specifically for game design.]]
- **S** = Substitute: What mechanics can be substituted? (walking → flying → swimming)
- **C** = Combine: What systems can be merged? (inventory + character growth)
- **A** = Adapt: What mechanics from other media? (books, movies, sports)
@@ -73,7 +68,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Agency Spectrum**
[[LLM: Explore different levels of player control and agency across game systems.]]
- Full Control: Direct character movement, combat, building
- Indirect Control: Setting rules, giving commands, environmental changes
- Influence Only: Suggestions, preferences, emotional reactions
@@ -81,7 +75,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Temporal Game Design**
[[LLM: Explore how time affects gameplay and player experience.]]
- Real-time vs. turn-based mechanics
- Time travel and manipulation
- Persistent vs. session-based progress
@@ -92,7 +85,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Emotion-First Design**
[[LLM: Start with target emotions and work backward to mechanics that create them.]]
- Target Emotion: Wonder → Mechanics: Discovery, mystery, scale
- Target Emotion: Triumph → Mechanics: Challenge, skill growth, recognition
- Target Emotion: Connection → Mechanics: Cooperation, shared goals, communication
@@ -100,7 +92,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player Archetype Brainstorming**
[[LLM: Design for different player types and motivations.]]
- Achievers: Progression, completion, mastery
- Explorers: Discovery, secrets, world-building
- Socializers: Interaction, cooperation, community
@@ -109,7 +100,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Accessibility-First Innovation**
[[LLM: Generate ideas that make games more accessible while creating new gameplay.]]
- Visual impairment considerations leading to audio-focused mechanics
- Motor accessibility inspiring one-handed or simplified controls
- Cognitive accessibility driving clear feedback and pacing
@@ -119,7 +109,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Environmental Storytelling**
[[LLM: Brainstorm ways the game world itself tells stories without explicit narrative.]]
- How does the environment show history?
- What do interactive objects reveal about characters?
- How can level design communicate mood?
@@ -127,7 +116,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Player-Generated Narrative**
[[LLM: Explore ways players create their own stories through gameplay.]]
- Emergent storytelling through player choices
- Procedural narrative generation
- Player-to-player story sharing
@@ -135,7 +123,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
3. **Genre Expectation Subversion**
[[LLM: Identify and deliberately subvert player expectations within genres.]]
- Fantasy RPG where magic is mundane
- Horror game where monsters are friendly
- Racing game where going slow is optimal
@@ -145,7 +132,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
1. **Platform-Specific Design**
[[LLM: Generate ideas that leverage unique platform capabilities.]]
- Mobile: GPS, accelerometer, camera, always-connected
- Web: URLs, tabs, social sharing, real-time collaboration
- Console: Controllers, TV viewing, couch co-op
@@ -153,7 +139,6 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
2. **Constraint-Based Creativity**
[[LLM: Use technical or design constraints as creative catalysts.]]
- One-button games
- Games without graphics
- Games that play in notification bars
@@ -199,19 +184,16 @@ This task provides a comprehensive toolkit of creative brainstorming techniques
[[LLM: Guide the brainstorming session with appropriate pacing for game design exploration.]]
1. **Inspiration Phase** (10-15 min)
- Reference existing games and mechanics
- Explore player experiences and emotions
- Gather visual and thematic inspiration
2. **Divergent Exploration** (25-35 min)
- Generate many game concepts or mechanics
- Use expansion and fusion techniques
- Encourage wild and impossible ideas
3. **Player-Centered Filtering** (15-20 min)
- Consider target audience reactions
- Evaluate emotional impact and engagement
- Group ideas by player experience goals

View File

@@ -17,21 +17,21 @@ workflow:
- brainstorming_session
- game_research_prompt
- player_research
notes: 'Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/design/ folder.'
notes: "Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project's docs/design/ folder."
- agent: game-designer
creates: game-design-doc.md
requires: game-brief.md
optional_steps:
- competitive_analysis
- technical_research
notes: 'Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project''s docs/design/ folder.'
notes: "Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project's docs/design/ folder."
- agent: game-designer
creates: level-design-doc.md
requires: game-design-doc.md
optional_steps:
- level_prototyping
- difficulty_analysis
notes: 'Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project''s docs/design/ folder.'
notes: "Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project's docs/design/ folder."
- agent: solution-architect
creates: game-architecture.md
requires:
@@ -41,7 +41,7 @@ workflow:
- technical_research_prompt
- performance_analysis
- platform_research
notes: 'Create comprehensive technical architecture using game-architecture-tmpl. Defines Unity systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project''s docs/architecture/ folder.'
notes: "Create comprehensive technical architecture using game-architecture-tmpl. Defines Unity systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project's docs/architecture/ folder."
- agent: game-designer
validates: design_consistency
requires: all_design_documents
@@ -66,7 +66,7 @@ workflow:
optional_steps:
- quick_brainstorming
- concept_validation
notes: 'Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/ folder.'
notes: "Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project's docs/ folder."
- agent: game-designer
creates: prototype-design.md
uses: create-doc prototype-design OR create-game-story

View File

@@ -44,7 +44,7 @@ workflow:
notes: Implement stories in priority order. Test frequently in the Unity Editor and adjust design based on what feels fun. Document discoveries.
workflow_end:
action: prototype_evaluation
notes: 'Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive.'
notes: "Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive."
game_jam_sequence:
- step: jam_concept
agent: game-designer

View File

@@ -247,17 +247,14 @@ A comprehensive 16-section checklist covering:
### Common Issues
1. **Infrastructure Drift**
- Solution: Implement drift detection in IaC pipelines
- Prevention: Restrict manual changes, enforce GitOps
2. **Cost Overruns**
- Solution: Implement cost monitoring and alerts
- Prevention: Resource tagging, budget limits
3. **Performance Problems**
- Solution: Review monitoring data, scale resources
- Prevention: Load testing, capacity planning

View File

@@ -32,7 +32,6 @@ To conduct a thorough review of existing infrastructure to identify improvement
### 3. Conduct Systematic Review
- **If "Incremental Mode" was selected:**
- For each section of the infrastructure checklist:
- **a. Present Section Focus:** Explain what aspects of infrastructure this section reviews
- **b. Work Through Items:** Examine each checklist item against current infrastructure

View File

@@ -55,7 +55,6 @@ To comprehensively validate platform infrastructure changes against security, re
### 4. Execute Comprehensive Platform Validation Process
- **If "Incremental Mode" was selected:**
- For each section of the infrastructure checklist (Sections 1-16):
- **a. Present Section Purpose:** Explain what this section validates and why it's important for platform operations
- **b. Work Through Items:** Present each checklist item, guide the user through validation, and document compliance or gaps

1607
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,23 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "bmad-method",
"version": "4.37.0-beta.6",
"version": "5.0.0",
"description": "Breakthrough Method of Agile AI-driven Development",
"keywords": [
"agile",
"ai",
"orchestrator",
"development",
"methodology",
"agents",
"bmad"
],
"repository": {
"type": "git",
"url": "git+https://github.com/bmadcode/BMAD-METHOD.git"
},
"license": "MIT",
"author": "Brian (BMad) Madison",
"main": "tools/cli.js",
"bin": {
"bmad": "tools/bmad-npx-wrapper.js",
@@ -11,27 +27,43 @@
"build": "node tools/cli.js build",
"build:agents": "node tools/cli.js build --agents-only",
"build:teams": "node tools/cli.js build --teams-only",
"list:agents": "node tools/cli.js list:agents",
"validate": "node tools/cli.js validate",
"flatten": "node tools/flattener/main.js",
"format": "prettier --write \"**/*.{js,cjs,mjs,json,md,yaml}\"",
"format:check": "prettier --check \"**/*.{js,cjs,mjs,json,md,yaml}\"",
"install:bmad": "node tools/installer/bin/bmad.js install",
"format": "prettier --write \"**/*.md\"",
"version:patch": "node tools/version-bump.js patch",
"version:minor": "node tools/version-bump.js minor",
"version:major": "node tools/version-bump.js major",
"version:expansion": "node tools/bump-expansion-version.js",
"version:expansion:set": "node tools/update-expansion-version.js",
"version:all": "node tools/bump-all-versions.js",
"version:all:minor": "node tools/bump-all-versions.js minor",
"version:all:major": "node tools/bump-all-versions.js major",
"version:all:patch": "node tools/bump-all-versions.js patch",
"version:expansion:all": "node tools/bump-all-versions.js",
"version:expansion:all:minor": "node tools/bump-all-versions.js minor",
"version:expansion:all:major": "node tools/bump-all-versions.js major",
"version:expansion:all:patch": "node tools/bump-all-versions.js patch",
"lint": "eslint . --ext .js,.cjs,.mjs,.yaml --max-warnings=0",
"lint:fix": "eslint . --ext .js,.cjs,.mjs,.yaml --fix",
"list:agents": "node tools/cli.js list:agents",
"prepare": "husky",
"release": "semantic-release",
"release:test": "semantic-release --dry-run --no-ci || echo 'Config test complete - authentication errors are expected locally'",
"prepare": "husky"
"validate": "node tools/cli.js validate",
"version:all": "node tools/bump-all-versions.js",
"version:all:major": "node tools/bump-all-versions.js major",
"version:all:minor": "node tools/bump-all-versions.js minor",
"version:all:patch": "node tools/bump-all-versions.js patch",
"version:expansion": "node tools/bump-expansion-version.js",
"version:expansion:all": "node tools/bump-all-versions.js",
"version:expansion:all:major": "node tools/bump-all-versions.js major",
"version:expansion:all:minor": "node tools/bump-all-versions.js minor",
"version:expansion:all:patch": "node tools/bump-all-versions.js patch",
"version:expansion:set": "node tools/update-expansion-version.js",
"version:major": "node tools/version-bump.js major",
"version:minor": "node tools/version-bump.js minor",
"version:patch": "node tools/version-bump.js patch"
},
"lint-staged": {
"**/*.{js,cjs,mjs}": [
"eslint --fix --max-warnings=0",
"prettier --write"
],
"**/*.yaml": [
"eslint --fix",
"prettier --write"
],
"**/*.{json,md}": [
"prettier --write"
]
},
"dependencies": {
"@kayvan/markdown-tree-parser": "^1.5.0",
@@ -46,37 +78,25 @@
"ora": "^5.4.1",
"semver": "^7.6.3"
},
"keywords": [
"agile",
"ai",
"orchestrator",
"development",
"methodology",
"agents",
"bmad"
],
"author": "Brian (BMad) Madison",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/bmadcode/BMAD-METHOD.git"
},
"engines": {
"node": ">=20.0.0"
},
"devDependencies": {
"@eslint/js": "^9.33.0",
"@semantic-release/changelog": "^6.0.3",
"@semantic-release/git": "^10.0.1",
"eslint": "^9.33.0",
"eslint-config-prettier": "^10.1.8",
"eslint-plugin-n": "^17.21.3",
"eslint-plugin-unicorn": "^60.0.0",
"eslint-plugin-yml": "^1.18.0",
"husky": "^9.1.7",
"jest": "^30.0.4",
"lint-staged": "^16.1.1",
"prettier": "^3.5.3",
"prettier-plugin-packagejson": "^2.5.19",
"semantic-release": "^22.0.0",
"yaml-eslint-parser": "^1.2.3",
"yaml-lint": "^1.7.0"
},
"lint-staged": {
"**/*.md": [
"prettier --write"
]
"engines": {
"node": ">=20.10.0"
}
}

32
prettier.config.mjs Normal file
View File

@@ -0,0 +1,32 @@
export default {
$schema: 'https://json.schemastore.org/prettierrc',
printWidth: 100,
tabWidth: 2,
useTabs: false,
semi: true,
singleQuote: true,
trailingComma: 'all',
bracketSpacing: true,
arrowParens: 'always',
endOfLine: 'lf',
proseWrap: 'preserve',
overrides: [
{
files: ['*.md'],
options: { proseWrap: 'preserve' },
},
{
files: ['*.yaml'],
options: { singleQuote: false },
},
{
files: ['*.json', '*.jsonc'],
options: { singleQuote: false },
},
{
files: ['*.cjs'],
options: { parser: 'babel' },
},
],
plugins: ['prettier-plugin-packagejson'],
};

Some files were not shown because too many files have changed in this diff Show More