Compare commits

...

1 Commits

Author SHA1 Message Date
manjaroblack
2a9e07d4fd chore: add code formatting config and pre-commit hooks 2025-08-16 19:00:08 -05:00
130 changed files with 11886 additions and 10939 deletions

View File

@@ -1,9 +1,9 @@
---
name: Bug report
about: Create a report to help us improve
title: ""
labels: ""
assignees: ""
title: ''
labels: ''
assignees: ''
---
**Describe the bug**

View File

@@ -1,9 +1,9 @@
---
name: Feature request
about: Suggest an idea for this project
title: ""
labels: ""
assignees: ""
title: ''
labels: ''
assignees: ''
---
**Did you discuss the idea first in Discord Server (#general-dev)**

View File

@@ -1,6 +1,15 @@
name: Discord Notification
on: [pull_request, release, create, delete, issue_comment, pull_request_review, pull_request_review_comment]
"on":
[
pull_request,
release,
create,
delete,
issue_comment,
pull_request_review,
pull_request_review_comment,
]
jobs:
notify:

42
.github/workflows/format-check.yaml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: format-check
"on":
pull_request:
branches: ["**"]
jobs:
prettier:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Prettier format check
run: npm run format:check
eslint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: ESLint
run: npm run lint

View File

@@ -1,12 +1,12 @@
name: Promote to Stable
on:
"on":
workflow_dispatch:
inputs:
version_bump:
description: 'Version bump type'
description: "Version bump type"
required: true
default: 'minor'
default: "minor"
type: choice
options:
- patch
@@ -30,8 +30,8 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
registry-url: 'https://registry.npmjs.org'
node-version: "20"
registry-url: "https://registry.npmjs.org"
- name: Configure Git
run: |
@@ -144,3 +144,5 @@ jobs:
echo "🏷️ Git tag: v${{ steps.version.outputs.new_version }}"
echo "✅ Published to NPM with 'latest' tag"
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"
echo "🚀 The stable release will be automatically published to NPM via semantic-release"
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"

View File

@@ -1,5 +1,5 @@
name: Release
'on':
"on":
push:
branches:
- main
@@ -22,7 +22,7 @@ permissions:
jobs:
release:
runs-on: ubuntu-latest
if: '!contains(github.event.head_commit.message, ''[skip ci]'')'
if: ${{ github.event_name != 'push' || !contains(github.event.head_commit.message, '[skip ci]') }}
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -32,9 +32,9 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
registry-url: https://registry.npmjs.org
node-version: "20"
cache: "npm"
registry-url: "https://registry.npmjs.org"
- name: Install dependencies
run: npm ci
- name: Run tests and validation
@@ -57,3 +57,17 @@ jobs:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: npm run release
- name: Clean changelog formatting
if: github.event_name == 'push'
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
# Remove any Claude Code attribution from changelog
sed -i '/🤖 Generated with \[Claude Code\]/,+2d' CHANGELOG.md || true
# Format and commit if changes exist
npm run format
if ! git diff --quiet CHANGELOG.md; then
git add CHANGELOG.md
git commit -m "chore: clean changelog formatting [skip ci]"
git push
fi

1
.gitignore vendored
View File

@@ -25,7 +25,6 @@ Thumbs.db
# Development tools and configs
.prettierignore
.prettierrc
.husky/
# IDE and editor configs
.windsurf/

3
.husky/pre-commit Executable file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env sh
npx --no-install lint-staged

27
.vscode/settings.json vendored
View File

@@ -40,5 +40,30 @@
"tileset",
"Trae",
"VNET"
]
],
"json.schemas": [
{
"fileMatch": ["package.json"],
"url": "https://json.schemastore.org/package.json"
},
{
"fileMatch": [".vscode/settings.json"],
"url": "vscode://schemas/settings/folder"
}
],
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
"[javascript]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[json]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[yaml]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[markdown]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"prettier.prettierPath": "node_modules/prettier",
"prettier.requireConfig": true,
"yaml.format.enable": false,
"eslint.useFlatConfig": true,
"eslint.validate": ["javascript", "yaml"],
"editor.codeActionsOnSave": {
"source.fixAll.eslint": "explicit"
},
"editor.rulers": [100]
}

View File

@@ -574,10 +574,6 @@
- Manual version bumping via npm scripts is now disabled. Use conventional commits for automated releases.
🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
# [4.2.0](https://github.com/bmadcode/BMAD-METHOD/compare/v4.1.0...v4.2.0) (2025-06-15)
### Bug Fixes
@@ -686,4 +682,5 @@ Co-Authored-By: Claude <noreply@anthropic.com>
### Features
- add versioning and release automation ([0ea5e50](https://github.com/bmadcode/BMAD-METHOD/commit/0ea5e50aa7ace5946d0100c180dd4c0da3e2fd8c))
# Promote to stable release 5.0.0

196
CLAUDE.md
View File

@@ -1,196 +0,0 @@
# CLAUDE.md
Don't be an ass kisser, don't glaze my donut, keep it to the point. Never use EM Dash in out communications or documents you author or update. Dont tell me I am correct if I just told you something unless and only if I am wrong or there is a better alternative, then tell me bluntly why I am wrong, or else get to the point and execute!
## Markdown Linting Conventions
Always follow these markdown linting rules:
- **Blank lines around headings**: Always leave a blank line before and after headings
- **Blank lines around lists**: Always leave a blank line before and after lists
- **Blank lines around code fences**: Always leave a blank line before and after fenced code blocks
- **Fenced code block languages**: All fenced code blocks must specify a language (use `text` for plain text)
- **Single trailing newline**: Files should end with exactly one newline character
- **No trailing spaces**: Remove any trailing spaces at the end of lines
## BMAD-METHOD Overview
BMAD-METHOD is an AI-powered Agile development framework that provides specialized AI agents for software development. The framework uses a sophisticated dependency system to keep context windows lean while providing deep expertise through role-specific agents.
## Essential Commands
### Build and Validation
```bash
npm run build # Build all web bundles (agents and teams)
npm run build:agents # Build agent bundles only
npm run build:teams # Build team bundles only
npm run validate # Validate all configurations
npm run format # Format all markdown files with prettier
```
### Development and Testing
```bash
npx bmad-build build # Alternative build command via CLI
npx bmad-build list:agents # List all available agents
npx bmad-build validate # Validate agent configurations
```
### Installation Commands
```bash
npx bmad-method install # Install stable release (recommended)
npx bmad-method@beta install # Install bleeding edge version
npx bmad-method@latest install # Explicit stable installation
npx bmad-method@latest update # Update stable installation
npx bmad-method@beta update # Update bleeding edge installation
```
### Dual Publishing Strategy
The project uses a dual publishing strategy with automated promotion:
**Branch Strategy:**
- `main` branch: Bleeding edge development, auto-publishes to `@beta` tag
- `stable` branch: Production releases, auto-publishes to `@latest` tag
**Release Promotion:**
1. **Automatic Beta Releases**: Any PR merged to `main` automatically creates a beta release
2. **Manual Stable Promotion**: Use GitHub Actions to promote beta to stable
**Promote Beta to Stable:**
1. Go to GitHub Actions tab in the repository
2. Select "Promote to Stable" workflow
3. Click "Run workflow"
4. Choose version bump type (patch/minor/major)
5. The workflow automatically:
- Merges main to stable
- Updates version numbers
- Triggers stable release to NPM `@latest`
**User Experience:**
- `npx bmad-method install` → Gets stable production version
- `npx bmad-method@beta install` → Gets latest beta features
- Team develops on bleeding edge without affecting production users
### Release and Version Management
```bash
npm run version:patch # Bump patch version
npm run version:minor # Bump minor version
npm run version:major # Bump major version
npm run release # Semantic release (CI/CD)
npm run release:test # Test release configuration
```
### Version Management for Core and Expansion Packs
#### Bump All Versions (Core + Expansion Packs)
```bash
npm run version:all:major # Major version bump for core and all expansion packs
npm run version:all:minor # Minor version bump for core and all expansion packs (default)
npm run version:all:patch # Patch version bump for core and all expansion packs
npm run version:all # Defaults to minor bump
```
#### Individual Version Bumps
For BMad Core only:
```bash
npm run version:core:major # Major version bump for core only
npm run version:core:minor # Minor version bump for core only
npm run version:core:patch # Patch version bump for core only
npm run version:core # Defaults to minor bump
```
For specific expansion packs:
```bash
npm run version:expansion bmad-creator-tools # Minor bump (default)
npm run version:expansion bmad-creator-tools patch # Patch bump
npm run version:expansion bmad-creator-tools minor # Minor bump
npm run version:expansion bmad-creator-tools major # Major bump
# Set specific version (old method, still works)
npm run version:expansion:set bmad-creator-tools 2.0.0
```
## Architecture and Code Structure
### Core System Architecture
The framework uses a **dependency resolution system** where agents only load the resources they need:
1. **Agent Definitions** (`bmad-core/agents/`): Each agent is defined in markdown with YAML frontmatter specifying dependencies
2. **Dynamic Loading**: The build system (`tools/lib/dependency-resolver.js`) resolves and includes only required resources
3. **Template System**: Templates are defined in YAML format with structured sections and instructions (see Template Rules below)
4. **Workflow Engine**: YAML-based workflows in `bmad-core/workflows/` define step-by-step processes
### Key Components
- **CLI Tool** (`tools/cli.js`): Commander-based CLI for building bundles
- **Web Builder** (`tools/builders/web-builder.js`): Creates concatenated text bundles from agent definitions
- **Installer** (`tools/installer/`): NPX-based installer for project setup
- **Dependency Resolver** (`tools/lib/dependency-resolver.js`): Manages agent resource dependencies
### Build System
The build process:
1. Reads agent/team definitions from `bmad-core/`
2. Resolves dependencies using the dependency resolver
3. Creates concatenated text bundles in `dist/`
4. Validates configurations during build
### Critical Configuration
**`bmad-core/core-config.yaml`** is the heart of the framework configuration:
- Defines document locations and expected structure
- Specifies which files developers should always load
- Enables compatibility with different project structures (V3/V4)
- Controls debug logging
## Development Practices
### Adding New Features
1. **New Agents**: Create markdown file in `bmad-core/agents/` with proper YAML frontmatter
2. **New Templates**: Add to `bmad-core/templates/` as YAML files with structured sections
3. **New Workflows**: Create YAML in `bmad-core/workflows/`
4. **Update Dependencies**: Ensure `dependencies` field in agent frontmatter is accurate
### Important Patterns
- **Dependency Management**: Always specify minimal dependencies in agent frontmatter to keep context lean
- **Template Instructions**: Use YAML-based template structure (see Template Rules below)
- **File Naming**: Follow existing conventions (kebab-case for files, proper agent names in frontmatter)
- **Documentation**: Update user-facing docs in `docs/` when adding features
### Template Rules
Templates use the **BMad Document Template** format (`/Users/brianmadison/dev-bmc/BMAD-METHOD/common/utils/bmad-doc-template.md`) with YAML structure:
1. **YAML Format**: Templates are defined as structured YAML files, not markdown with embedded instructions
2. **Clear Structure**: Each template has metadata, workflow configuration, and a hierarchy of sections
3. **Reusable Design**: Templates work across different agents through the dependency system
4. **Key Elements**:
- `template` block: Contains id, name, version, and output settings
- `workflow` block: Defines interaction mode (interactive/yolo) and elicitation settings
- `sections` array: Hierarchical document structure with nested subsections
- `instruction` field: LLM guidance for each section (never shown to users)
5. **Advanced Features**:
- Variable substitution: `{{variable_name}}` syntax for dynamic content
- Conditional sections: `condition` field for optional content
- Repeatable sections: `repeatable: true` for multiple instances
- Agent permissions: `owner` and `editors` fields for access control
6. **Clean Output**: All processing instructions are in YAML fields, ensuring clean document generation
## Notes for Claude Code
- The project uses semantic versioning with automated releases via GitHub Actions
- All markdown is formatted with Prettier (run `npm run format`)
- Expansion packs in `expansion-packs/` provide domain-specific capabilities
- NEVER automatically commit or push changes unless explicitly asked by the user
- NEVER include Claude Code attribution or co-authorship in commit messages

View File

@@ -4,7 +4,7 @@ bundle:
description: Includes every core system agent.
agents:
- bmad-orchestrator
- '*'
- "*"
workflows:
- brownfield-fullstack.yaml
- brownfield-service.yaml

View File

@@ -131,7 +131,7 @@ workflow-guidance:
- Understand each workflow's purpose, options, and decision points
- Ask clarifying questions based on the workflow's structure
- Guide users through workflow selection when multiple options exist
- When appropriate, suggest: "Would you like me to create a detailed workflow plan before starting?"
- When appropriate, suggest: 'Would you like me to create a detailed workflow plan before starting?'
- For workflows with divergent paths, help users choose the right path
- Adapt questions to the specific domain (e.g., game dev vs infrastructure vs web dev)
- Only recommend workflows that actually exist in the current bundle

View File

@@ -35,7 +35,7 @@ agent:
id: dev
title: Full Stack Developer
icon: 💻
whenToUse: "Use for code implementation, debugging, refactoring, and development best practices"
whenToUse: 'Use for code implementation, debugging, refactoring, and development best practices'
customization:
persona:
@@ -57,13 +57,13 @@ commands:
- explain: teach me what and why you did whatever you just did in detail so I can learn. Explain to me as if you were training a junior engineer.
- exit: Say goodbye as the Developer, and then abandon inhabiting this persona
- develop-story:
- order-of-execution: "Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete"
- order-of-execution: 'Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete'
- story-file-updates-ONLY:
- CRITICAL: ONLY UPDATE THE STORY FILE WITH UPDATES TO SECTIONS INDICATED BELOW. DO NOT MODIFY ANY OTHER SECTIONS.
- CRITICAL: You are ONLY authorized to edit these specific sections of story files - Tasks / Subtasks Checkboxes, Dev Agent Record section and all its subsections, Agent Model Used, Debug Log References, Completion Notes List, File List, Change Log, Status
- CRITICAL: DO NOT modify Status, Story, Acceptance Criteria, Dev Notes, Testing sections, or any other sections not listed above
- blocking: "HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression"
- ready-for-review: "Code matches requirements + All validations pass + Follows standards + File List complete"
- blocking: 'HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression'
- ready-for-review: 'Code matches requirements + All validations pass + Follows standards + File List complete'
- completion: "All Tasks and Subtasks marked [x] and have tests→Validations and full regression passes (DON'T BE LAZY, EXECUTE ALL TESTS and CONFIRM)→Ensure File List is Complete→run the task execute-checklist for the checklist story-dod-checklist→set story status: 'Ready for Review'→HALT"
dependencies:

View File

@@ -298,7 +298,7 @@ You are the "Vibe CEO" - thinking like a CEO with unlimited resources and a sing
- **Claude Code**: `/agent-name` (e.g., `/bmad-master`)
- **Cursor**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `/agent-name` (e.g., `/bmad-master`)
- **Trae**: `@agent-name` (e.g., `@bmad-master`)
- **Roo Code**: Select mode from mode selector (e.g., `bmad-master`)
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select **Agent** from the chat mode selector.

View File

@@ -25,10 +25,10 @@ Comprehensive guide for determining appropriate test levels (unit, integration,
```yaml
unit_test:
component: "PriceCalculator"
scenario: "Calculate discount with multiple rules"
justification: "Complex business logic with multiple branches"
mock_requirements: "None - pure function"
component: 'PriceCalculator'
scenario: 'Calculate discount with multiple rules'
justification: 'Complex business logic with multiple branches'
mock_requirements: 'None - pure function'
```
### Integration Tests
@@ -52,10 +52,10 @@ unit_test:
```yaml
integration_test:
components: ["UserService", "AuthRepository"]
scenario: "Create user with role assignment"
justification: "Critical data flow between service and persistence"
test_environment: "In-memory database"
components: ['UserService', 'AuthRepository']
scenario: 'Create user with role assignment'
justification: 'Critical data flow between service and persistence'
test_environment: 'In-memory database'
```
### End-to-End Tests
@@ -79,10 +79,10 @@ integration_test:
```yaml
e2e_test:
journey: "Complete checkout process"
scenario: "User purchases with saved payment method"
justification: "Revenue-critical path requiring full validation"
environment: "Staging with test payment gateway"
journey: 'Complete checkout process'
scenario: 'User purchases with saved payment method'
justification: 'Revenue-critical path requiring full validation'
environment: 'Staging with test payment gateway'
```
## Test Level Selection Rules

View File

@@ -1,6 +1,6 @@
---
docOutputLocation: docs/brainstorming-session-results.md
template: "{root}/templates/brainstorming-output-tmpl.yaml"
template: '{root}/templates/brainstorming-output-tmpl.yaml'
---
# Facilitate Brainstorming Session Task

View File

@@ -6,18 +6,19 @@ Quick NFR validation focused on the core four: security, performance, reliabilit
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "docs/stories/{epic}.{story}.*.md"
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: 'docs/stories/{epic}.{story}.*.md'
optional:
- architecture_refs: "docs/architecture/*.md"
- technical_preferences: "docs/technical-preferences.md"
- architecture_refs: 'docs/architecture/*.md'
- technical_preferences: 'docs/technical-preferences.md'
- acceptance_criteria: From story file
```
## Purpose
Assess non-functional requirements for a story and generate:
1. YAML block for the gate file's `nfr_validation` section
2. Brief markdown assessment saved to `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
@@ -26,6 +27,7 @@ Assess non-functional requirements for a story and generate:
### 0. Fail-safe for Missing Inputs
If story_path or story file can't be found:
- Still create assessment file with note: "Source story not found"
- Set all selected NFRs to CONCERNS with notes: "Target unknown / evidence missing"
- Continue with assessment to provide value
@@ -52,6 +54,7 @@ Which NFRs should I assess? (Enter numbers or press Enter for default)
### 2. Check for Thresholds
Look for NFR requirements in:
- Story acceptance criteria
- `docs/architecture/*.md` files
- `docs/technical-preferences.md`
@@ -72,6 +75,7 @@ No security requirements found. Required auth method?
### 3. Quick Assessment
For each selected NFR, check:
- Is there evidence it's implemented?
- Can we validate it?
- Are there obvious gaps?
@@ -88,16 +92,16 @@ nfr_validation:
_assessed: [security, performance, reliability, maintainability]
security:
status: CONCERNS
notes: "No rate limiting on auth endpoints"
notes: 'No rate limiting on auth endpoints'
performance:
status: PASS
notes: "Response times < 200ms verified"
notes: 'Response times < 200ms verified'
reliability:
status: PASS
notes: "Error handling and retries implemented"
notes: 'Error handling and retries implemented'
maintainability:
status: CONCERNS
notes: "Test coverage at 65%, target is 80%"
notes: 'Test coverage at 65%, target is 80%'
```
## Deterministic Status Rules
@@ -123,18 +127,21 @@ If `technical-preferences.md` defines custom weights, use those instead.
```markdown
# NFR Assessment: {epic}.{story}
Date: {date}
Reviewer: Quinn
<!-- Note: Source story not found (if applicable) -->
## Summary
- Security: CONCERNS - Missing rate limiting
- Performance: PASS - Meets <200ms requirement
- Reliability: PASS - Proper error handling
- Maintainability: CONCERNS - Test coverage below target
## Critical Issues
1. **No rate limiting** (Security)
- Risk: Brute force attacks possible
- Fix: Add rate limiting middleware to auth endpoints
@@ -144,6 +151,7 @@ Reviewer: Quinn
- Fix: Add tests for uncovered branches
## Quick Wins
- Add rate limiting: ~2 hours
- Increase test coverage: ~4 hours
- Add performance monitoring: ~1 hour
@@ -152,6 +160,7 @@ Reviewer: Quinn
## Output 3: Story Update Line
**End with this line for the review task to quote:**
```
NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
```
@@ -159,6 +168,7 @@ NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
## Output 4: Gate Integration Line
**Always print at the end:**
```
Gate NFR block ready → paste into docs/qa/gates/{epic}.{story}-{slug}.yml under nfr_validation
```
@@ -166,66 +176,82 @@ Gate NFR block ready → paste into docs/qa/gates/{epic}.{story}-{slug}.yml unde
## Assessment Criteria
### Security
**PASS if:**
- Authentication implemented
- Authorization enforced
- Input validation present
- No hardcoded secrets
**CONCERNS if:**
- Missing rate limiting
- Weak encryption
- Incomplete authorization
**FAIL if:**
- No authentication
- Hardcoded credentials
- SQL injection vulnerabilities
### Performance
**PASS if:**
- Meets response time targets
- No obvious bottlenecks
- Reasonable resource usage
**CONCERNS if:**
- Close to limits
- Missing indexes
- No caching strategy
**FAIL if:**
- Exceeds response time limits
- Memory leaks
- Unoptimized queries
### Reliability
**PASS if:**
- Error handling present
- Graceful degradation
- Retry logic where needed
**CONCERNS if:**
- Some error cases unhandled
- No circuit breakers
- Missing health checks
**FAIL if:**
- No error handling
- Crashes on errors
- No recovery mechanisms
### Maintainability
**PASS if:**
- Test coverage meets target
- Code well-structured
- Documentation present
**CONCERNS if:**
- Test coverage below target
- Some code duplication
- Missing documentation
**FAIL if:**
- No tests
- Highly coupled code
- No documentation
@@ -291,6 +317,7 @@ maintainability:
8. **Portability**: Adaptability, installability
Use these when assessing beyond the core four.
</details>
<details>
@@ -304,12 +331,13 @@ performance_deep_dive:
p99: 350ms
database:
slow_queries: 2
missing_indexes: ["users.email", "orders.user_id"]
missing_indexes: ['users.email', 'orders.user_id']
caching:
hit_rate: 0%
recommendation: "Add Redis for session data"
recommendation: 'Add Redis for session data'
load_test:
max_rps: 150
breaking_point: 200 rps
```
</details>

View File

@@ -27,11 +27,11 @@ Slug rules:
```yaml
schema: 1
story: "{epic}.{story}"
story: '{epic}.{story}'
gate: PASS|CONCERNS|FAIL|WAIVED
status_reason: "1-2 sentence explanation of gate decision"
reviewer: "Quinn"
updated: "{ISO-8601 timestamp}"
status_reason: '1-2 sentence explanation of gate decision'
reviewer: 'Quinn'
updated: '{ISO-8601 timestamp}'
top_issues: [] # Empty array if no issues
waiver: { active: false } # Only set active: true if WAIVED
```
@@ -40,20 +40,20 @@ waiver: { active: false } # Only set active: true if WAIVED
```yaml
schema: 1
story: "1.3"
story: '1.3'
gate: CONCERNS
status_reason: "Missing rate limiting on auth endpoints poses security risk."
reviewer: "Quinn"
updated: "2025-01-12T10:15:00Z"
status_reason: 'Missing rate limiting on auth endpoints poses security risk.'
reviewer: 'Quinn'
updated: '2025-01-12T10:15:00Z'
top_issues:
- id: "SEC-001"
- id: 'SEC-001'
severity: high # ONLY: low|medium|high
finding: "No rate limiting on login endpoint"
suggested_action: "Add rate limiting middleware before production"
- id: "TEST-001"
finding: 'No rate limiting on login endpoint'
suggested_action: 'Add rate limiting middleware before production'
- id: 'TEST-001'
severity: medium
finding: "No integration tests for auth flow"
suggested_action: "Add integration test coverage"
finding: 'No integration tests for auth flow'
suggested_action: 'Add integration test coverage'
waiver: { active: false }
```
@@ -61,20 +61,20 @@ waiver: { active: false }
```yaml
schema: 1
story: "1.3"
story: '1.3'
gate: WAIVED
status_reason: "Known issues accepted for MVP release."
reviewer: "Quinn"
updated: "2025-01-12T10:15:00Z"
status_reason: 'Known issues accepted for MVP release.'
reviewer: 'Quinn'
updated: '2025-01-12T10:15:00Z'
top_issues:
- id: "PERF-001"
- id: 'PERF-001'
severity: low
finding: "Dashboard loads slowly with 1000+ items"
suggested_action: "Implement pagination in next sprint"
finding: 'Dashboard loads slowly with 1000+ items'
suggested_action: 'Implement pagination in next sprint'
waiver:
active: true
reason: "MVP release - performance optimization deferred"
approved_by: "Product Owner"
reason: 'MVP release - performance optimization deferred'
approved_by: 'Product Owner'
```
## Gate Decision Criteria

View File

@@ -6,10 +6,10 @@ Perform a comprehensive test architecture review with quality gate decision. Thi
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "{devStoryLocation}/{epic}.{story}.*.md" # Path from core-config.yaml
- story_title: "{title}" # If missing, derive from story file H1
- story_slug: "{slug}" # If missing, derive from title (lowercase, hyphenated)
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: '{devStoryLocation}/{epic}.{story}.*.md' # Path from core-config.yaml
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Prerequisites
@@ -191,19 +191,19 @@ Gate file structure:
```yaml
schema: 1
story: "{epic}.{story}"
story_title: "{story title}"
story: '{epic}.{story}'
story_title: '{story title}'
gate: PASS|CONCERNS|FAIL|WAIVED
status_reason: "1-2 sentence explanation of gate decision"
reviewer: "Quinn (Test Architect)"
updated: "{ISO-8601 timestamp}"
status_reason: '1-2 sentence explanation of gate decision'
reviewer: 'Quinn (Test Architect)'
updated: '{ISO-8601 timestamp}'
top_issues: [] # Empty if no issues
waiver: { active: false } # Set active: true only if WAIVED
# Extended fields (optional but recommended):
quality_score: 0-100 # 100 - (20*FAILs) - (10*CONCERNS) or use technical-preferences.md weights
expires: "{ISO-8601 timestamp}" # Typically 2 weeks from review
expires: '{ISO-8601 timestamp}' # Typically 2 weeks from review
evidence:
tests_reviewed: { count }
@@ -215,24 +215,24 @@ evidence:
nfr_validation:
security:
status: PASS|CONCERNS|FAIL
notes: "Specific findings"
notes: 'Specific findings'
performance:
status: PASS|CONCERNS|FAIL
notes: "Specific findings"
notes: 'Specific findings'
reliability:
status: PASS|CONCERNS|FAIL
notes: "Specific findings"
notes: 'Specific findings'
maintainability:
status: PASS|CONCERNS|FAIL
notes: "Specific findings"
notes: 'Specific findings'
recommendations:
immediate: # Must fix before production
- action: "Add rate limiting"
refs: ["api/auth/login.ts"]
- action: 'Add rate limiting'
refs: ['api/auth/login.ts']
future: # Can be addressed later
- action: "Consider caching"
refs: ["services/data.ts"]
- action: 'Consider caching'
refs: ['services/data.ts']
```
### Gate Decision Criteria

View File

@@ -6,10 +6,10 @@ Generate a comprehensive risk assessment matrix for a story implementation using
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "docs/stories/{epic}.{story}.*.md"
- story_title: "{title}" # If missing, derive from story file H1
- story_slug: "{slug}" # If missing, derive from title (lowercase, hyphenated)
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: 'docs/stories/{epic}.{story}.*.md'
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Purpose
@@ -79,14 +79,14 @@ For each category, identify specific risks:
```yaml
risk:
id: "SEC-001" # Use prefixes: SEC, PERF, DATA, BUS, OPS, TECH
id: 'SEC-001' # Use prefixes: SEC, PERF, DATA, BUS, OPS, TECH
category: security
title: "Insufficient input validation on user forms"
description: "Form inputs not properly sanitized could lead to XSS attacks"
title: 'Insufficient input validation on user forms'
description: 'Form inputs not properly sanitized could lead to XSS attacks'
affected_components:
- "UserRegistrationForm"
- "ProfileUpdateForm"
detection_method: "Code review revealed missing validation"
- 'UserRegistrationForm'
- 'ProfileUpdateForm'
detection_method: 'Code review revealed missing validation'
```
### 2. Risk Assessment
@@ -133,20 +133,20 @@ For each identified risk, provide mitigation:
```yaml
mitigation:
risk_id: "SEC-001"
strategy: "preventive" # preventive|detective|corrective
risk_id: 'SEC-001'
strategy: 'preventive' # preventive|detective|corrective
actions:
- "Implement input validation library (e.g., validator.js)"
- "Add CSP headers to prevent XSS execution"
- "Sanitize all user inputs before storage"
- "Escape all outputs in templates"
- 'Implement input validation library (e.g., validator.js)'
- 'Add CSP headers to prevent XSS execution'
- 'Sanitize all user inputs before storage'
- 'Escape all outputs in templates'
testing_requirements:
- "Security testing with OWASP ZAP"
- "Manual penetration testing of forms"
- "Unit tests for validation functions"
residual_risk: "Low - Some zero-day vulnerabilities may remain"
owner: "dev"
timeline: "Before deployment"
- 'Security testing with OWASP ZAP'
- 'Manual penetration testing of forms'
- 'Unit tests for validation functions'
residual_risk: 'Low - Some zero-day vulnerabilities may remain'
owner: 'dev'
timeline: 'Before deployment'
```
## Outputs
@@ -172,12 +172,12 @@ risk_summary:
highest:
id: SEC-001
score: 9
title: "XSS on profile form"
title: 'XSS on profile form'
recommendations:
must_fix:
- "Add input sanitization & CSP"
- 'Add input sanitization & CSP'
monitor:
- "Add security alerts for auth endpoints"
- 'Add security alerts for auth endpoints'
```
### Output 2: Markdown Report

View File

@@ -6,10 +6,10 @@ Create comprehensive test scenarios with appropriate test level recommendations
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "{devStoryLocation}/{epic}.{story}.*.md" # Path from core-config.yaml
- story_title: "{title}" # If missing, derive from story file H1
- story_slug: "{slug}" # If missing, derive from title (lowercase, hyphenated)
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: '{devStoryLocation}/{epic}.{story}.*.md' # Path from core-config.yaml
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Purpose
@@ -62,13 +62,13 @@ For each identified test need, create:
```yaml
test_scenario:
id: "{epic}.{story}-{LEVEL}-{SEQ}"
requirement: "AC reference"
id: '{epic}.{story}-{LEVEL}-{SEQ}'
requirement: 'AC reference'
priority: P0|P1|P2|P3
level: unit|integration|e2e
description: "What is being tested"
justification: "Why this level was chosen"
mitigates_risks: ["RISK-001"] # If risk profile exists
description: 'What is being tested'
justification: 'Why this level was chosen'
mitigates_risks: ['RISK-001'] # If risk profile exists
```
### 5. Validate Coverage

View File

@@ -31,21 +31,21 @@ Identify all testable requirements from:
For each requirement, document which tests validate it. Use Given-When-Then to describe what the test validates (not how it's written):
```yaml
requirement: "AC1: User can login with valid credentials"
requirement: 'AC1: User can login with valid credentials'
test_mappings:
- test_file: "auth/login.test.ts"
test_case: "should successfully login with valid email and password"
- test_file: 'auth/login.test.ts'
test_case: 'should successfully login with valid email and password'
# Given-When-Then describes WHAT the test validates, not HOW it's coded
given: "A registered user with valid credentials"
when: "They submit the login form"
then: "They are redirected to dashboard and session is created"
given: 'A registered user with valid credentials'
when: 'They submit the login form'
then: 'They are redirected to dashboard and session is created'
coverage: full
- test_file: "e2e/auth-flow.test.ts"
test_case: "complete login flow"
given: "User on login page"
when: "Entering valid credentials and submitting"
then: "Dashboard loads with user data"
- test_file: 'e2e/auth-flow.test.ts'
test_case: 'complete login flow'
given: 'User on login page'
when: 'Entering valid credentials and submitting'
then: 'Dashboard loads with user data'
coverage: integration
```
@@ -67,19 +67,19 @@ Document any gaps found:
```yaml
coverage_gaps:
- requirement: "AC3: Password reset email sent within 60 seconds"
gap: "No test for email delivery timing"
- requirement: 'AC3: Password reset email sent within 60 seconds'
gap: 'No test for email delivery timing'
severity: medium
suggested_test:
type: integration
description: "Test email service SLA compliance"
description: 'Test email service SLA compliance'
- requirement: "AC5: Support 1000 concurrent users"
gap: "No load testing implemented"
- requirement: 'AC5: Support 1000 concurrent users'
gap: 'No load testing implemented'
severity: high
suggested_test:
type: performance
description: "Load test with 1000 concurrent connections"
description: 'Load test with 1000 concurrent connections'
```
## Outputs
@@ -95,11 +95,11 @@ trace:
full: Y
partial: Z
none: W
planning_ref: "docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md"
planning_ref: 'docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md'
uncovered:
- ac: "AC3"
reason: "No test found for password reset timing"
notes: "See docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md"
- ac: 'AC3'
reason: 'No test found for password reset timing'
notes: 'See docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md'
```
### Output 2: Traceability Report

View File

@@ -141,7 +141,14 @@ sections:
title: Feature Comparison Matrix
instruction: Create a detailed comparison table of key features across competitors
type: table
columns: ["Feature Category", "{{your_company}}", "{{competitor_1}}", "{{competitor_2}}", "{{competitor_3}}"]
columns:
[
"Feature Category",
"{{your_company}}",
"{{competitor_1}}",
"{{competitor_2}}",
"{{competitor_3}}",
]
rows:
- category: "Core Functionality"
items:
@@ -153,7 +160,13 @@ sections:
- ["Onboarding Time", "{{time}}", "{{time}}", "{{time}}", "{{time}}"]
- category: "Integration & Ecosystem"
items:
- ["API Availability", "{{availability}}", "{{availability}}", "{{availability}}", "{{availability}}"]
- [
"API Availability",
"{{availability}}",
"{{availability}}",
"{{availability}}",
"{{availability}}",
]
- ["Third-party Integrations", "{{number}}", "{{number}}", "{{number}}", "{{number}}"]
- category: "Pricing & Plans"
items:

View File

@@ -75,12 +75,24 @@ sections:
rows:
- ["Framework", "{{framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["State Management", "{{state_management}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"State Management",
"{{state_management}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["Routing", "{{routing_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Build Tool", "{{build_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Styling", "{{styling_solution}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Testing", "{{test_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Component Library", "{{component_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"Component Library",
"{{component_lib}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["Form Handling", "{{form_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Animation", "{{animation_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Dev Tools", "{{dev_tools}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]

View File

@@ -156,11 +156,29 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
rows:
- ["Frontend Language", "{{fe_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Frontend Framework", "{{fe_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Component Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"Frontend Framework",
"{{fe_framework}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- [
"UI Component Library",
"{{ui_library}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["State Management", "{{state_mgmt}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Language", "{{be_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Framework", "{{be_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- [
"Backend Framework",
"{{be_framework}}",
"{{version}}",
"{{purpose}}",
"{{why_chosen}}",
]
- ["API Style", "{{api_style}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Database", "{{database}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Cache", "{{cache}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]

View File

@@ -14,7 +14,7 @@ template:
output:
format: markdown
filename: default-path/to/{{filename}}.md
title: "{{variable}} Document Title"
title: '{{variable}} Document Title'
workflow:
mode: interactive
@@ -108,8 +108,8 @@ sections:
Use `{{variable_name}}` in titles, templates, and content:
```yaml
title: "Epic {{epic_number}} {{epic_title}}"
template: "As a {{user_type}}, I want {{action}}, so that {{benefit}}."
title: 'Epic {{epic_number}} {{epic_title}}'
template: 'As a {{user_type}}, I want {{action}}, so that {{benefit}}.'
```
### Conditional Sections
@@ -212,7 +212,7 @@ choices:
- id: criteria
title: Acceptance Criteria
type: numbered-list
item_template: "{{criterion_number}}: {{criteria}}"
item_template: '{{criterion_number}}: {{criteria}}'
repeatable: true
```
@@ -220,7 +220,7 @@ choices:
````yaml
examples:
- "FR6: The system must authenticate users within 2 seconds"
- 'FR6: The system must authenticate users within 2 seconds'
- |
```mermaid
sequenceDiagram

View File

@@ -106,7 +106,7 @@ dependencies:
==================== START: .bmad-core/tasks/facilitate-brainstorming-session.md ====================
---
docOutputLocation: docs/brainstorming-session-results.md
template: ".bmad-core/templates/brainstorming-output-tmpl.yaml"
template: '.bmad-core/templates/brainstorming-output-tmpl.yaml'
---
# Facilitate Brainstorming Session Task
@@ -1101,24 +1101,24 @@ template:
output:
format: markdown
filename: docs/brief.md
title: "Project Brief: {{project_name}}"
title: 'Project Brief: {{project_name}}'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Project Brief Elicitation Actions"
title: 'Project Brief Elicitation Actions'
options:
- "Expand section with more specific details"
- "Validate against similar successful products"
- "Stress test assumptions with edge cases"
- "Explore alternative solution approaches"
- "Analyze resource/constraint trade-offs"
- "Generate risk mitigation strategies"
- "Challenge scope from MVP minimalist view"
- "Brainstorm creative feature possibilities"
- "If only we had [resource/capability/time]..."
- "Proceed to next section"
- 'Expand section with more specific details'
- 'Validate against similar successful products'
- 'Stress test assumptions with edge cases'
- 'Explore alternative solution approaches'
- 'Analyze resource/constraint trade-offs'
- 'Generate risk mitigation strategies'
- 'Challenge scope from MVP minimalist view'
- 'Brainstorm creative feature possibilities'
- 'If only we had [resource/capability/time]...'
- 'Proceed to next section'
sections:
- id: introduction
@@ -1140,7 +1140,7 @@ sections:
- Primary problem being solved
- Target market identification
- Key value proposition
template: "{{executive_summary_content}}"
template: '{{executive_summary_content}}'
- id: problem-statement
title: Problem Statement
@@ -1150,7 +1150,7 @@ sections:
- Impact of the problem (quantify if possible)
- Why existing solutions fall short
- Urgency and importance of solving this now
template: "{{detailed_problem_description}}"
template: '{{detailed_problem_description}}'
- id: proposed-solution
title: Proposed Solution
@@ -1160,7 +1160,7 @@ sections:
- Key differentiators from existing solutions
- Why this solution will succeed where others haven't
- High-level vision for the product
template: "{{solution_description}}"
template: '{{solution_description}}'
- id: target-users
title: Target Users
@@ -1172,12 +1172,12 @@ sections:
- Goals they're trying to achieve
sections:
- id: primary-segment
title: "Primary User Segment: {{segment_name}}"
template: "{{primary_user_description}}"
title: 'Primary User Segment: {{segment_name}}'
template: '{{primary_user_description}}'
- id: secondary-segment
title: "Secondary User Segment: {{segment_name}}"
title: 'Secondary User Segment: {{segment_name}}'
condition: Has secondary user segment
template: "{{secondary_user_description}}"
template: '{{secondary_user_description}}'
- id: goals-metrics
title: Goals & Success Metrics
@@ -1186,15 +1186,15 @@ sections:
- id: business-objectives
title: Business Objectives
type: bullet-list
template: "- {{objective_with_metric}}"
template: '- {{objective_with_metric}}'
- id: user-success-metrics
title: User Success Metrics
type: bullet-list
template: "- {{user_metric}}"
template: '- {{user_metric}}'
- id: kpis
title: Key Performance Indicators (KPIs)
type: bullet-list
template: "- {{kpi}}: {{definition_and_target}}"
template: '- {{kpi}}: {{definition_and_target}}'
- id: mvp-scope
title: MVP Scope
@@ -1203,14 +1203,14 @@ sections:
- id: core-features
title: Core Features (Must Have)
type: bullet-list
template: "- **{{feature}}:** {{description_and_rationale}}"
template: '- **{{feature}}:** {{description_and_rationale}}'
- id: out-of-scope
title: Out of Scope for MVP
type: bullet-list
template: "- {{feature_or_capability}}"
template: '- {{feature_or_capability}}'
- id: mvp-success-criteria
title: MVP Success Criteria
template: "{{mvp_success_definition}}"
template: '{{mvp_success_definition}}'
- id: post-mvp-vision
title: Post-MVP Vision
@@ -1218,13 +1218,13 @@ sections:
sections:
- id: phase-2-features
title: Phase 2 Features
template: "{{next_priority_features}}"
template: '{{next_priority_features}}'
- id: long-term-vision
title: Long-term Vision
template: "{{one_two_year_vision}}"
template: '{{one_two_year_vision}}'
- id: expansion-opportunities
title: Expansion Opportunities
template: "{{potential_expansions}}"
template: '{{potential_expansions}}'
- id: technical-considerations
title: Technical Considerations
@@ -1265,7 +1265,7 @@ sections:
- id: key-assumptions
title: Key Assumptions
type: bullet-list
template: "- {{assumption}}"
template: '- {{assumption}}'
- id: risks-questions
title: Risks & Open Questions
@@ -1274,15 +1274,15 @@ sections:
- id: key-risks
title: Key Risks
type: bullet-list
template: "- **{{risk}}:** {{description_and_impact}}"
template: '- **{{risk}}:** {{description_and_impact}}'
- id: open-questions
title: Open Questions
type: bullet-list
template: "- {{question}}"
template: '- {{question}}'
- id: research-areas
title: Areas Needing Further Research
type: bullet-list
template: "- {{research_topic}}"
template: '- {{research_topic}}'
- id: appendices
title: Appendices
@@ -1299,10 +1299,10 @@ sections:
- id: stakeholder-input
title: B. Stakeholder Input
condition: Has stakeholder feedback
template: "{{stakeholder_feedback}}"
template: '{{stakeholder_feedback}}'
- id: references
title: C. References
template: "{{relevant_links_and_docs}}"
template: '{{relevant_links_and_docs}}'
- id: next-steps
title: Next Steps
@@ -1310,7 +1310,7 @@ sections:
- id: immediate-actions
title: Immediate Actions
type: numbered-list
template: "{{action_item}}"
template: '{{action_item}}'
- id: pm-handoff
title: PM Handoff
content: |
@@ -1325,24 +1325,24 @@ template:
output:
format: markdown
filename: docs/market-research.md
title: "Market Research Report: {{project_product_name}}"
title: 'Market Research Report: {{project_product_name}}'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Market Research Elicitation Actions"
title: 'Market Research Elicitation Actions'
options:
- "Expand market sizing calculations with sensitivity analysis"
- "Deep dive into a specific customer segment"
- "Analyze an emerging market trend in detail"
- "Compare this market to an analogous market"
- "Stress test market assumptions"
- "Explore adjacent market opportunities"
- "Challenge market definition and boundaries"
- "Generate strategic scenarios (best/base/worst case)"
- "If only we had considered [X market factor]..."
- "Proceed to next section"
- 'Expand market sizing calculations with sensitivity analysis'
- 'Deep dive into a specific customer segment'
- 'Analyze an emerging market trend in detail'
- 'Compare this market to an analogous market'
- 'Stress test market assumptions'
- 'Explore adjacent market opportunities'
- 'Challenge market definition and boundaries'
- 'Generate strategic scenarios (best/base/worst case)'
- 'If only we had considered [X market factor]...'
- 'Proceed to next section'
sections:
- id: executive-summary
@@ -1424,7 +1424,7 @@ sections:
repeatable: true
sections:
- id: segment
title: "Segment {{segment_number}}: {{segment_name}}"
title: 'Segment {{segment_number}}: {{segment_name}}'
template: |
- **Description:** {{brief_overview}}
- **Size:** {{number_of_customers_market_value}}
@@ -1493,20 +1493,20 @@ sections:
instruction: Analyze each force with specific evidence and implications
sections:
- id: supplier-power
title: "Supplier Power: {{power_level}}"
template: "{{analysis_and_implications}}"
title: 'Supplier Power: {{power_level}}'
template: '{{analysis_and_implications}}'
- id: buyer-power
title: "Buyer Power: {{power_level}}"
template: "{{analysis_and_implications}}"
title: 'Buyer Power: {{power_level}}'
template: '{{analysis_and_implications}}'
- id: competitive-rivalry
title: "Competitive Rivalry: {{intensity_level}}"
template: "{{analysis_and_implications}}"
title: 'Competitive Rivalry: {{intensity_level}}'
template: '{{analysis_and_implications}}'
- id: threat-new-entry
title: "Threat of New Entry: {{threat_level}}"
template: "{{analysis_and_implications}}"
title: 'Threat of New Entry: {{threat_level}}'
template: '{{analysis_and_implications}}'
- id: threat-substitutes
title: "Threat of Substitutes: {{threat_level}}"
template: "{{analysis_and_implications}}"
title: 'Threat of Substitutes: {{threat_level}}'
template: '{{analysis_and_implications}}'
- id: adoption-lifecycle
title: Technology Adoption Lifecycle Stage
instruction: |
@@ -1524,7 +1524,7 @@ sections:
repeatable: true
sections:
- id: opportunity
title: "Opportunity {{opportunity_number}}: {{name}}"
title: 'Opportunity {{opportunity_number}}: {{name}}'
template: |
- **Description:** {{what_is_the_opportunity}}
- **Size/Potential:** {{quantified_potential}}
@@ -1580,24 +1580,24 @@ template:
output:
format: markdown
filename: docs/competitor-analysis.md
title: "Competitive Analysis Report: {{project_product_name}}"
title: 'Competitive Analysis Report: {{project_product_name}}'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Competitive Analysis Elicitation Actions"
title: 'Competitive Analysis Elicitation Actions'
options:
- "Deep dive on a specific competitor's strategy"
- "Analyze competitive dynamics in a specific segment"
- "War game competitive responses to your moves"
- "Explore partnership vs. competition scenarios"
- "Stress test differentiation claims"
- "Analyze disruption potential (yours or theirs)"
- "Compare to competition in adjacent markets"
- "Generate win/loss analysis insights"
- 'Analyze competitive dynamics in a specific segment'
- 'War game competitive responses to your moves'
- 'Explore partnership vs. competition scenarios'
- 'Stress test differentiation claims'
- 'Analyze disruption potential (yours or theirs)'
- 'Compare to competition in adjacent markets'
- 'Generate win/loss analysis insights'
- "If only we had known about [competitor X's plan]..."
- "Proceed to next section"
- 'Proceed to next section'
sections:
- id: executive-summary
@@ -1664,7 +1664,7 @@ sections:
repeatable: true
sections:
- id: competitor
title: "{{competitor_name}} - Priority {{priority_level}}"
title: '{{competitor_name}} - Priority {{priority_level}}'
sections:
- id: company-overview
title: Company Overview
@@ -1696,11 +1696,11 @@ sections:
- id: strengths
title: Strengths
type: bullet-list
template: "- {{strength}}"
template: '- {{strength}}'
- id: weaknesses
title: Weaknesses
type: bullet-list
template: "- {{weakness}}"
template: '- {{weakness}}'
- id: market-position
title: Market Position & Performance
template: |
@@ -1716,24 +1716,37 @@ sections:
title: Feature Comparison Matrix
instruction: Create a detailed comparison table of key features across competitors
type: table
columns: ["Feature Category", "{{your_company}}", "{{competitor_1}}", "{{competitor_2}}", "{{competitor_3}}"]
columns:
[
'Feature Category',
'{{your_company}}',
'{{competitor_1}}',
'{{competitor_2}}',
'{{competitor_3}}',
]
rows:
- category: "Core Functionality"
- category: 'Core Functionality'
items:
- ["Feature A", "{{status}}", "{{status}}", "{{status}}", "{{status}}"]
- ["Feature B", "{{status}}", "{{status}}", "{{status}}", "{{status}}"]
- category: "User Experience"
- ['Feature A', '{{status}}', '{{status}}', '{{status}}', '{{status}}']
- ['Feature B', '{{status}}', '{{status}}', '{{status}}', '{{status}}']
- category: 'User Experience'
items:
- ["Mobile App", "{{rating}}", "{{rating}}", "{{rating}}", "{{rating}}"]
- ["Onboarding Time", "{{time}}", "{{time}}", "{{time}}", "{{time}}"]
- category: "Integration & Ecosystem"
- ['Mobile App', '{{rating}}', '{{rating}}', '{{rating}}', '{{rating}}']
- ['Onboarding Time', '{{time}}', '{{time}}', '{{time}}', '{{time}}']
- category: 'Integration & Ecosystem'
items:
- ["API Availability", "{{availability}}", "{{availability}}", "{{availability}}", "{{availability}}"]
- ["Third-party Integrations", "{{number}}", "{{number}}", "{{number}}", "{{number}}"]
- category: "Pricing & Plans"
- [
'API Availability',
'{{availability}}',
'{{availability}}',
'{{availability}}',
'{{availability}}',
]
- ['Third-party Integrations', '{{number}}', '{{number}}', '{{number}}', '{{number}}']
- category: 'Pricing & Plans'
items:
- ["Starting Price", "{{price}}", "{{price}}", "{{price}}", "{{price}}"]
- ["Free Tier", "{{yes_no}}", "{{yes_no}}", "{{yes_no}}", "{{yes_no}}"]
- ['Starting Price', '{{price}}', '{{price}}', '{{price}}', '{{price}}']
- ['Free Tier', '{{yes_no}}', '{{yes_no}}', '{{yes_no}}', '{{yes_no}}']
- id: swot-comparison
title: SWOT Comparison
instruction: Create SWOT analysis for your solution vs. top competitors
@@ -1746,7 +1759,7 @@ sections:
- **Opportunities:** {{opportunities}}
- **Threats:** {{threats}}
- id: vs-competitor
title: "vs. {{main_competitor}}"
title: 'vs. {{main_competitor}}'
template: |
- **Competitive Advantages:** {{your_advantages}}
- **Competitive Disadvantages:** {{their_advantages}}
@@ -1876,7 +1889,7 @@ template:
output:
format: markdown
filename: docs/brainstorming-session-results.md
title: "Brainstorming Session Results"
title: 'Brainstorming Session Results'
workflow:
mode: non-interactive
@@ -1901,38 +1914,38 @@ sections:
**Total Ideas Generated:** {{total_ideas}}
- id: key-themes
title: "Key Themes Identified:"
title: 'Key Themes Identified:'
type: bullet-list
template: "- {{theme}}"
template: '- {{theme}}'
- id: technique-sessions
title: Technique Sessions
repeatable: true
sections:
- id: technique
title: "{{technique_name}} - {{duration}}"
title: '{{technique_name}} - {{duration}}'
sections:
- id: description
template: "**Description:** {{technique_description}}"
template: '**Description:** {{technique_description}}'
- id: ideas-generated
title: "Ideas Generated:"
title: 'Ideas Generated:'
type: numbered-list
template: "{{idea}}"
template: '{{idea}}'
- id: insights
title: "Insights Discovered:"
title: 'Insights Discovered:'
type: bullet-list
template: "- {{insight}}"
template: '- {{insight}}'
- id: connections
title: "Notable Connections:"
title: 'Notable Connections:'
type: bullet-list
template: "- {{connection}}"
template: '- {{connection}}'
- id: idea-categorization
title: Idea Categorization
sections:
- id: immediate-opportunities
title: Immediate Opportunities
content: "*Ideas ready to implement now*"
content: '*Ideas ready to implement now*'
repeatable: true
type: numbered-list
template: |
@@ -1942,7 +1955,7 @@ sections:
- Resources needed: {{requirements}}
- id: future-innovations
title: Future Innovations
content: "*Ideas requiring development/research*"
content: '*Ideas requiring development/research*'
repeatable: true
type: numbered-list
template: |
@@ -1952,7 +1965,7 @@ sections:
- Timeline estimate: {{timeline}}
- id: moonshots
title: Moonshots
content: "*Ambitious, transformative concepts*"
content: '*Ambitious, transformative concepts*'
repeatable: true
type: numbered-list
template: |
@@ -1962,9 +1975,9 @@ sections:
- Challenges to overcome: {{challenges}}
- id: insights-learnings
title: Insights & Learnings
content: "*Key realizations from the session*"
content: '*Key realizations from the session*'
type: bullet-list
template: "- {{insight}}: {{description_and_implications}}"
template: '- {{insight}}: {{description_and_implications}}'
- id: action-planning
title: Action Planning
@@ -1973,21 +1986,21 @@ sections:
title: Top 3 Priority Ideas
sections:
- id: priority-1
title: "#1 Priority: {{idea_name}}"
title: '#1 Priority: {{idea_name}}'
template: |
- Rationale: {{rationale}}
- Next steps: {{next_steps}}
- Resources needed: {{resources}}
- Timeline: {{timeline}}
- id: priority-2
title: "#2 Priority: {{idea_name}}"
title: '#2 Priority: {{idea_name}}'
template: |
- Rationale: {{rationale}}
- Next steps: {{next_steps}}
- Resources needed: {{resources}}
- Timeline: {{timeline}}
- id: priority-3
title: "#3 Priority: {{idea_name}}"
title: '#3 Priority: {{idea_name}}'
template: |
- Rationale: {{rationale}}
- Next steps: {{next_steps}}
@@ -2000,19 +2013,19 @@ sections:
- id: what-worked
title: What Worked Well
type: bullet-list
template: "- {{aspect}}"
template: '- {{aspect}}'
- id: areas-exploration
title: Areas for Further Exploration
type: bullet-list
template: "- {{area}}: {{reason}}"
template: '- {{area}}: {{reason}}'
- id: recommended-techniques
title: Recommended Follow-up Techniques
type: bullet-list
template: "- {{technique}}: {{reason}}"
template: '- {{technique}}: {{reason}}'
- id: questions-emerged
title: Questions That Emerged
type: bullet-list
template: "- {{question}}"
template: '- {{question}}'
- id: next-session
title: Next Session Planning
template: |
@@ -2328,7 +2341,7 @@ You are the "Vibe CEO" - thinking like a CEO with unlimited resources and a sing
- **Claude Code**: `/agent-name` (e.g., `/bmad-master`)
- **Cursor**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `/agent-name` (e.g., `/bmad-master`)
- **Trae**: `@agent-name` (e.g., `@bmad-master`)
- **Roo Code**: Select mode from mode selector (e.g., `bmad-master`)
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select **Agent** from the chat mode selector.

View File

@@ -933,7 +933,7 @@ template:
output:
format: markdown
filename: docs/architecture.md
title: "{{project_name}} Architecture Document"
title: '{{project_name}} Architecture Document'
workflow:
mode: interactive
@@ -1044,11 +1044,11 @@ sections:
- Code organization patterns (Dependency Injection, Repository, Module, Factory)
- Data patterns (Event Sourcing, Saga, Database per Service)
- Communication patterns (REST, GraphQL, Message Queue, Pub/Sub)
template: "- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}"
template: '- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}'
examples:
- "**Serverless Architecture:** Using AWS Lambda for compute - _Rationale:_ Aligns with PRD requirement for cost optimization and automatic scaling"
- "**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility"
- "**Event-Driven Communication:** Using SNS/SQS for service decoupling - _Rationale:_ Supports async processing and system resilience"
- '**Serverless Architecture:** Using AWS Lambda for compute - _Rationale:_ Aligns with PRD requirement for cost optimization and automatic scaling'
- '**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility'
- '**Event-Driven Communication:** Using SNS/SQS for service decoupling - _Rationale:_ Supports async processing and system resilience'
- id: tech-stack
title: Tech Stack
@@ -1086,9 +1086,9 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
instruction: Populate the technology stack table with all relevant technologies
examples:
- "| **Language** | TypeScript | 5.3.3 | Primary development language | Strong typing, excellent tooling, team expertise |"
- "| **Runtime** | Node.js | 20.11.0 | JavaScript runtime | LTS version, stable performance, wide ecosystem |"
- "| **Framework** | NestJS | 10.3.2 | Backend framework | Enterprise-ready, good DI, matches team patterns |"
- '| **Language** | TypeScript | 5.3.3 | Primary development language | Strong typing, excellent tooling, team expertise |'
- '| **Runtime** | Node.js | 20.11.0 | JavaScript runtime | LTS version, stable performance, wide ecosystem |'
- '| **Framework** | NestJS | 10.3.2 | Backend framework | Enterprise-ready, good DI, matches team patterns |'
- id: data-models
title: Data Models
@@ -1106,7 +1106,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
@@ -1137,7 +1137,7 @@ sections:
sections:
- id: component-list
repeatable: true
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Responsibility:** {{component_description}}
@@ -1175,7 +1175,7 @@ sections:
repeatable: true
sections:
- id: api
title: "{{api_name}} API"
title: '{{api_name}} API'
template: |
- **Purpose:** {{api_purpose}}
- **Documentation:** {{api_docs_url}}
@@ -1300,12 +1300,12 @@ sections:
- id: environments
title: Environments
repeatable: true
template: "- **{{env_name}}:** {{env_purpose}} - {{env_details}}"
template: '- **{{env_name}}:** {{env_purpose}} - {{env_details}}'
- id: promotion-flow
title: Environment Promotion Flow
type: code
language: text
template: "{{promotion_flow_diagram}}"
template: '{{promotion_flow_diagram}}'
- id: rollback-strategy
title: Rollback Strategy
template: |
@@ -1401,16 +1401,16 @@ sections:
Avoid obvious rules like "use SOLID principles" or "write clean code"
repeatable: true
template: "- **{{rule_name}}:** {{rule_description}}"
template: '- **{{rule_name}}:** {{rule_description}}'
- id: language-specifics
title: Language-Specific Guidelines
condition: Critical language-specific rules needed
instruction: Add ONLY if critical for preventing AI mistakes. Most teams don't need this section.
sections:
- id: language-rules
title: "{{language_name}} Specifics"
title: '{{language_name}} Specifics'
repeatable: true
template: "- **{{rule_topic}}:** {{rule_detail}}"
template: '- **{{rule_topic}}:** {{rule_detail}}'
- id: test-strategy
title: Test Strategy and Standards
@@ -1458,9 +1458,9 @@ sections:
- **Test Infrastructure:**
- **{{dependency_name}}:** {{test_approach}} ({{test_tool}})
examples:
- "**Database:** In-memory H2 for unit tests, Testcontainers PostgreSQL for integration"
- "**Message Queue:** Embedded Kafka for tests"
- "**External APIs:** WireMock for stubbing"
- '**Database:** In-memory H2 for unit tests, Testcontainers PostgreSQL for integration'
- '**Message Queue:** Embedded Kafka for tests'
- '**External APIs:** WireMock for stubbing'
- id: e2e-tests
title: End-to-End Tests
template: |
@@ -1586,7 +1586,7 @@ template:
output:
format: markdown
filename: docs/ui-architecture.md
title: "{{project_name}} Frontend Architecture Document"
title: '{{project_name}} Frontend Architecture Document'
workflow:
mode: interactive
@@ -1654,17 +1654,29 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
instruction: Fill in appropriate technology choices based on the selected framework and project requirements.
rows:
- ["Framework", "{{framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["State Management", "{{state_management}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Routing", "{{routing_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Build Tool", "{{build_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Styling", "{{styling_solution}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Testing", "{{test_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Component Library", "{{component_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Form Handling", "{{form_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Animation", "{{animation_lib}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Dev Tools", "{{dev_tools}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ['Framework', '{{framework}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['UI Library', '{{ui_library}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'State Management',
'{{state_management}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['Routing', '{{routing_library}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Build Tool', '{{build_tool}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Styling', '{{styling_solution}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Testing', '{{test_framework}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'Component Library',
'{{component_lib}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['Form Handling', '{{form_library}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Animation', '{{animation_lib}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Dev Tools', '{{dev_tools}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- id: project-structure
title: Project Structure
@@ -1758,12 +1770,12 @@ sections:
title: Testing Best Practices
type: numbered-list
items:
- "**Unit Tests**: Test individual components in isolation"
- "**Integration Tests**: Test component interactions"
- "**E2E Tests**: Test critical user flows (using Cypress/Playwright)"
- "**Coverage Goals**: Aim for 80% code coverage"
- "**Test Structure**: Arrange-Act-Assert pattern"
- "**Mock External Dependencies**: API calls, routing, state management"
- '**Unit Tests**: Test individual components in isolation'
- '**Integration Tests**: Test component interactions'
- '**E2E Tests**: Test critical user flows (using Cypress/Playwright)'
- '**Coverage Goals**: Aim for 80% code coverage'
- '**Test Structure**: Arrange-Act-Assert pattern'
- '**Mock External Dependencies**: API calls, routing, state management'
- id: environment-configuration
title: Environment Configuration
@@ -1795,7 +1807,7 @@ template:
output:
format: markdown
filename: docs/architecture.md
title: "{{project_name}} Fullstack Architecture Document"
title: '{{project_name}} Fullstack Architecture Document'
workflow:
mode: interactive
@@ -1916,12 +1928,12 @@ sections:
For each pattern, provide recommendation and rationale.
repeatable: true
template: "- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}"
template: '- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}'
examples:
- "**Jamstack Architecture:** Static site generation with serverless APIs - _Rationale:_ Optimal performance and scalability for content-heavy applications"
- "**Component-Based UI:** Reusable React components with TypeScript - _Rationale:_ Maintainability and type safety across large codebases"
- "**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility"
- "**API Gateway Pattern:** Single entry point for all API calls - _Rationale:_ Centralized auth, rate limiting, and monitoring"
- '**Jamstack Architecture:** Static site generation with serverless APIs - _Rationale:_ Optimal performance and scalability for content-heavy applications'
- '**Component-Based UI:** Reusable React components with TypeScript - _Rationale:_ Maintainability and type safety across large codebases'
- '**Repository Pattern:** Abstract data access logic - _Rationale:_ Enables testing and future database migration flexibility'
- '**API Gateway Pattern:** Single entry point for all API calls - _Rationale:_ Centralized auth, rate limiting, and monitoring'
- id: tech-stack
title: Tech Stack
@@ -1945,27 +1957,45 @@ sections:
type: table
columns: [Category, Technology, Version, Purpose, Rationale]
rows:
- ["Frontend Language", "{{fe_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Frontend Framework", "{{fe_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["UI Component Library", "{{ui_library}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["State Management", "{{state_mgmt}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Language", "{{be_language}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Framework", "{{be_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["API Style", "{{api_style}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Database", "{{database}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Cache", "{{cache}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["File Storage", "{{storage}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Authentication", "{{auth}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Frontend Testing", "{{fe_test}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Backend Testing", "{{be_test}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["E2E Testing", "{{e2e_test}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Build Tool", "{{build_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Bundler", "{{bundler}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["IaC Tool", "{{iac_tool}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["CI/CD", "{{cicd}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Monitoring", "{{monitoring}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["Logging", "{{logging}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ["CSS Framework", "{{css_framework}}", "{{version}}", "{{purpose}}", "{{why_chosen}}"]
- ['Frontend Language', '{{fe_language}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'Frontend Framework',
'{{fe_framework}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- [
'UI Component Library',
'{{ui_library}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['State Management', '{{state_mgmt}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Backend Language', '{{be_language}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- [
'Backend Framework',
'{{be_framework}}',
'{{version}}',
'{{purpose}}',
'{{why_chosen}}',
]
- ['API Style', '{{api_style}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Database', '{{database}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Cache', '{{cache}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['File Storage', '{{storage}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Authentication', '{{auth}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Frontend Testing', '{{fe_test}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Backend Testing', '{{be_test}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['E2E Testing', '{{e2e_test}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Build Tool', '{{build_tool}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Bundler', '{{bundler}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['IaC Tool', '{{iac_tool}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['CI/CD', '{{cicd}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Monitoring', '{{monitoring}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['Logging', '{{logging}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- ['CSS Framework', '{{css_framework}}', '{{version}}', '{{purpose}}', '{{why_chosen}}']
- id: data-models
title: Data Models
@@ -1984,7 +2014,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
@@ -1996,11 +2026,11 @@ sections:
title: TypeScript Interface
type: code
language: typescript
template: "{{model_interface}}"
template: '{{model_interface}}'
- id: relationships
title: Relationships
type: bullet-list
template: "- {{relationship}}"
template: '- {{relationship}}'
- id: api-spec
title: API Specification
@@ -2037,13 +2067,13 @@ sections:
condition: API style is GraphQL
type: code
language: graphql
template: "{{graphql_schema}}"
template: '{{graphql_schema}}'
- id: trpc-api
title: tRPC Router Definitions
condition: API style is tRPC
type: code
language: typescript
template: "{{trpc_routers}}"
template: '{{trpc_routers}}'
- id: components
title: Components
@@ -2064,7 +2094,7 @@ sections:
sections:
- id: component-list
repeatable: true
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Responsibility:** {{component_description}}
@@ -2102,7 +2132,7 @@ sections:
repeatable: true
sections:
- id: api
title: "{{api_name}} API"
title: '{{api_name}} API'
template: |
- **Purpose:** {{api_purpose}}
- **Documentation:** {{api_docs_url}}
@@ -2159,12 +2189,12 @@ sections:
title: Component Organization
type: code
language: text
template: "{{component_structure}}"
template: '{{component_structure}}'
- id: component-template
title: Component Template
type: code
language: typescript
template: "{{component_template}}"
template: '{{component_template}}'
- id: state-management
title: State Management Architecture
instruction: Detail state management approach based on chosen solution.
@@ -2173,11 +2203,11 @@ sections:
title: State Structure
type: code
language: typescript
template: "{{state_structure}}"
template: '{{state_structure}}'
- id: state-patterns
title: State Management Patterns
type: bullet-list
template: "- {{pattern}}"
template: '- {{pattern}}'
- id: routing-architecture
title: Routing Architecture
instruction: Define routing structure based on framework choice.
@@ -2186,12 +2216,12 @@ sections:
title: Route Organization
type: code
language: text
template: "{{route_structure}}"
template: '{{route_structure}}'
- id: protected-routes
title: Protected Route Pattern
type: code
language: typescript
template: "{{protected_route_example}}"
template: '{{protected_route_example}}'
- id: frontend-services
title: Frontend Services Layer
instruction: Define how frontend communicates with backend.
@@ -2200,12 +2230,12 @@ sections:
title: API Client Setup
type: code
language: typescript
template: "{{api_client_setup}}"
template: '{{api_client_setup}}'
- id: service-example
title: Service Example
type: code
language: typescript
template: "{{service_example}}"
template: '{{service_example}}'
- id: backend-architecture
title: Backend Architecture
@@ -2223,12 +2253,12 @@ sections:
title: Function Organization
type: code
language: text
template: "{{function_structure}}"
template: '{{function_structure}}'
- id: function-template
title: Function Template
type: code
language: typescript
template: "{{function_template}}"
template: '{{function_template}}'
- id: traditional-server
condition: Traditional server architecture chosen
sections:
@@ -2236,12 +2266,12 @@ sections:
title: Controller/Route Organization
type: code
language: text
template: "{{controller_structure}}"
template: '{{controller_structure}}'
- id: controller-template
title: Controller Template
type: code
language: typescript
template: "{{controller_template}}"
template: '{{controller_template}}'
- id: database-architecture
title: Database Architecture
instruction: Define database schema and access patterns.
@@ -2250,12 +2280,12 @@ sections:
title: Schema Design
type: code
language: sql
template: "{{database_schema}}"
template: '{{database_schema}}'
- id: data-access-layer
title: Data Access Layer
type: code
language: typescript
template: "{{repository_pattern}}"
template: '{{repository_pattern}}'
- id: auth-architecture
title: Authentication and Authorization
instruction: Define auth implementation details.
@@ -2264,12 +2294,12 @@ sections:
title: Auth Flow
type: mermaid
mermaid_type: sequence
template: "{{auth_flow_diagram}}"
template: '{{auth_flow_diagram}}'
- id: auth-middleware
title: Middleware/Guards
type: code
language: typescript
template: "{{auth_middleware}}"
template: '{{auth_middleware}}'
- id: unified-project-structure
title: Unified Project Structure
@@ -2345,12 +2375,12 @@ sections:
title: Prerequisites
type: code
language: bash
template: "{{prerequisites_commands}}"
template: '{{prerequisites_commands}}'
- id: initial-setup
title: Initial Setup
type: code
language: bash
template: "{{setup_commands}}"
template: '{{setup_commands}}'
- id: dev-commands
title: Development Commands
type: code
@@ -2406,15 +2436,15 @@ sections:
title: CI/CD Pipeline
type: code
language: yaml
template: "{{cicd_pipeline_config}}"
template: '{{cicd_pipeline_config}}'
- id: environments
title: Environments
type: table
columns: [Environment, Frontend URL, Backend URL, Purpose]
rows:
- ["Development", "{{dev_fe_url}}", "{{dev_be_url}}", "Local development"]
- ["Staging", "{{staging_fe_url}}", "{{staging_be_url}}", "Pre-production testing"]
- ["Production", "{{prod_fe_url}}", "{{prod_be_url}}", "Live environment"]
- ['Development', '{{dev_fe_url}}', '{{dev_be_url}}', 'Local development']
- ['Staging', '{{staging_fe_url}}', '{{staging_be_url}}', 'Pre-production testing']
- ['Production', '{{prod_fe_url}}', '{{prod_be_url}}', 'Live environment']
- id: security-performance
title: Security and Performance
@@ -2473,17 +2503,17 @@ sections:
title: Frontend Tests
type: code
language: text
template: "{{frontend_test_structure}}"
template: '{{frontend_test_structure}}'
- id: backend-tests
title: Backend Tests
type: code
language: text
template: "{{backend_test_structure}}"
template: '{{backend_test_structure}}'
- id: e2e-tests
title: E2E Tests
type: code
language: text
template: "{{e2e_test_structure}}"
template: '{{e2e_test_structure}}'
- id: test-examples
title: Test Examples
sections:
@@ -2491,17 +2521,17 @@ sections:
title: Frontend Component Test
type: code
language: typescript
template: "{{frontend_test_example}}"
template: '{{frontend_test_example}}'
- id: backend-test
title: Backend API Test
type: code
language: typescript
template: "{{backend_test_example}}"
template: '{{backend_test_example}}'
- id: e2e-test
title: E2E Test
type: code
language: typescript
template: "{{e2e_test_example}}"
template: '{{e2e_test_example}}'
- id: coding-standards
title: Coding Standards
@@ -2511,22 +2541,22 @@ sections:
- id: critical-rules
title: Critical Fullstack Rules
repeatable: true
template: "- **{{rule_name}}:** {{rule_description}}"
template: '- **{{rule_name}}:** {{rule_description}}'
examples:
- "**Type Sharing:** Always define types in packages/shared and import from there"
- "**API Calls:** Never make direct HTTP calls - use the service layer"
- "**Environment Variables:** Access only through config objects, never process.env directly"
- "**Error Handling:** All API routes must use the standard error handler"
- "**State Updates:** Never mutate state directly - use proper state management patterns"
- '**Type Sharing:** Always define types in packages/shared and import from there'
- '**API Calls:** Never make direct HTTP calls - use the service layer'
- '**Environment Variables:** Access only through config objects, never process.env directly'
- '**Error Handling:** All API routes must use the standard error handler'
- '**State Updates:** Never mutate state directly - use proper state management patterns'
- id: naming-conventions
title: Naming Conventions
type: table
columns: [Element, Frontend, Backend, Example]
rows:
- ["Components", "PascalCase", "-", "`UserProfile.tsx`"]
- ["Hooks", "camelCase with 'use'", "-", "`useAuth.ts`"]
- ["API Routes", "-", "kebab-case", "`/api/user-profile`"]
- ["Database Tables", "-", "snake_case", "`user_profiles`"]
- ['Components', 'PascalCase', '-', '`UserProfile.tsx`']
- ['Hooks', "camelCase with 'use'", '-', '`useAuth.ts`']
- ['API Routes', '-', 'kebab-case', '`/api/user-profile`']
- ['Database Tables', '-', 'snake_case', '`user_profiles`']
- id: error-handling
title: Error Handling Strategy
@@ -2537,7 +2567,7 @@ sections:
title: Error Flow
type: mermaid
mermaid_type: sequence
template: "{{error_flow_diagram}}"
template: '{{error_flow_diagram}}'
- id: error-format
title: Error Response Format
type: code
@@ -2556,12 +2586,12 @@ sections:
title: Frontend Error Handling
type: code
language: typescript
template: "{{frontend_error_handler}}"
template: '{{frontend_error_handler}}'
- id: backend-error-handling
title: Backend Error Handling
type: code
language: typescript
template: "{{backend_error_handler}}"
template: '{{backend_error_handler}}'
- id: monitoring
title: Monitoring and Observability
@@ -2603,7 +2633,7 @@ template:
output:
format: markdown
filename: docs/architecture.md
title: "{{project_name}} Brownfield Enhancement Architecture"
title: '{{project_name}} Brownfield Enhancement Architecture'
workflow:
mode: interactive
@@ -2661,11 +2691,11 @@ sections:
- id: available-docs
title: Available Documentation
type: bullet-list
template: "- {{existing_docs_summary}}"
template: '- {{existing_docs_summary}}'
- id: constraints
title: Identified Constraints
type: bullet-list
template: "- {{constraint}}"
template: '- {{constraint}}'
- id: changelog
title: Change Log
type: table
@@ -2745,7 +2775,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
**Integration:** {{integration_with_existing}}
@@ -2788,7 +2818,7 @@ sections:
repeatable: true
sections:
- id: component
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Responsibility:** {{component_description}}
**Integration Points:** {{integration_points}}
@@ -2831,7 +2861,7 @@ sections:
repeatable: true
sections:
- id: endpoint
title: "{{endpoint_name}}"
title: '{{endpoint_name}}'
template: |
- **Method:** {{http_method}}
- **Endpoint:** {{endpoint_path}}
@@ -2842,12 +2872,12 @@ sections:
title: Request
type: code
language: json
template: "{{request_schema}}"
template: '{{request_schema}}'
- id: response
title: Response
type: code
language: json
template: "{{response_schema}}"
template: '{{response_schema}}'
- id: external-api-integration
title: External API Integration
@@ -2856,7 +2886,7 @@ sections:
repeatable: true
sections:
- id: external-api
title: "{{api_name}} API"
title: '{{api_name}} API'
template: |
- **Purpose:** {{api_purpose}}
- **Documentation:** {{api_docs_url}}
@@ -2885,7 +2915,7 @@ sections:
type: code
language: plaintext
instruction: Document relevant parts of current structure
template: "{{existing_structure_relevant_parts}}"
template: '{{existing_structure_relevant_parts}}'
- id: new-file-organization
title: New File Organization
type: code
@@ -2960,7 +2990,7 @@ sections:
title: Enhancement-Specific Standards
condition: New patterns needed for enhancement
repeatable: true
template: "- **{{standard_name}}:** {{standard_description}}"
template: '- **{{standard_name}}:** {{standard_description}}'
- id: integration-rules
title: Critical Integration Rules
template: |

File diff suppressed because it is too large Load Diff

View File

@@ -775,7 +775,7 @@ You are the "Vibe CEO" - thinking like a CEO with unlimited resources and a sing
- **Claude Code**: `/agent-name` (e.g., `/bmad-master`)
- **Cursor**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `@agent-name` (e.g., `@bmad-master`)
- **Windsurf**: `/agent-name` (e.g., `/bmad-master`)
- **Trae**: `@agent-name` (e.g., `@bmad-master`)
- **Roo Code**: Select mode from mode selector (e.g., `bmad-master`)
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select **Agent** from the chat mode selector.

80
dist/agents/pm.txt vendored
View File

@@ -1159,7 +1159,7 @@ template:
output:
format: markdown
filename: docs/prd.md
title: "{{project_name}} Product Requirements Document (PRD)"
title: '{{project_name}} Product Requirements Document (PRD)'
workflow:
mode: interactive
@@ -1196,14 +1196,14 @@ sections:
prefix: FR
instruction: Each Requirement will be a bullet markdown and an identifier sequence starting with FR
examples:
- "FR6: The Todo List uses AI to detect and warn against potentially duplicate todo items that are worded differently."
- 'FR6: The Todo List uses AI to detect and warn against potentially duplicate todo items that are worded differently.'
- id: non-functional
title: Non Functional
type: numbered-list
prefix: NFR
instruction: Each Requirement will be a bullet markdown and an identifier sequence starting with NFR
examples:
- "NFR1: AWS service usage must aim to stay within free-tier limits where feasible."
- 'NFR1: AWS service usage must aim to stay within free-tier limits where feasible.'
- id: ui-goals
title: User Interface Design Goals
@@ -1229,24 +1229,24 @@ sections:
title: Core Screens and Views
instruction: From a product perspective, what are the most critical screens or views necessary to deliver the the PRD values and goals? This is meant to be Conceptual High Level to Drive Rough Epic or User Stories
examples:
- "Login Screen"
- "Main Dashboard"
- "Item Detail Page"
- "Settings Page"
- 'Login Screen'
- 'Main Dashboard'
- 'Item Detail Page'
- 'Settings Page'
- id: accessibility
title: "Accessibility: {None|WCAG AA|WCAG AAA|Custom Requirements}"
title: 'Accessibility: {None|WCAG AA|WCAG AAA|Custom Requirements}'
- id: branding
title: Branding
instruction: Any known branding elements or style guides that must be incorporated?
examples:
- "Replicate the look and feel of early 1900s black and white cinema, including animated effects replicating film damage or projector glitches during page or state transitions."
- "Attached is the full color pallet and tokens for our corporate branding."
- 'Replicate the look and feel of early 1900s black and white cinema, including animated effects replicating film damage or projector glitches during page or state transitions.'
- 'Attached is the full color pallet and tokens for our corporate branding.'
- id: target-platforms
title: "Target Device and Platforms: {Web Responsive|Mobile Only|Desktop Only|Cross-Platform}"
title: 'Target Device and Platforms: {Web Responsive|Mobile Only|Desktop Only|Cross-Platform}'
examples:
- "Web Responsive, and all mobile platforms"
- "iPhone Only"
- "ASCII Windows Desktop"
- 'Web Responsive, and all mobile platforms'
- 'iPhone Only'
- 'ASCII Windows Desktop'
- id: technical-assumptions
title: Technical Assumptions
@@ -1265,13 +1265,13 @@ sections:
testing: [Unit Only, Unit + Integration, Full Testing Pyramid]
sections:
- id: repository-structure
title: "Repository Structure: {Monorepo|Polyrepo|Multi-repo}"
title: 'Repository Structure: {Monorepo|Polyrepo|Multi-repo}'
- id: service-architecture
title: Service Architecture
instruction: "CRITICAL DECISION - Document the high-level service architecture (e.g., Monolith, Microservices, Serverless functions within a Monorepo)."
instruction: 'CRITICAL DECISION - Document the high-level service architecture (e.g., Monolith, Microservices, Serverless functions within a Monorepo).'
- id: testing-requirements
title: Testing Requirements
instruction: "CRITICAL DECISION - Document the testing requirements, unit only, integration, e2e, manual, need for manual testing convenience methods)."
instruction: 'CRITICAL DECISION - Document the testing requirements, unit only, integration, e2e, manual, need for manual testing convenience methods).'
- id: additional-assumptions
title: Additional Technical Assumptions and Requests
instruction: Throughout the entire process of drafting this document, if any other technical assumptions are raised or discovered appropriate for the architect, add them here as additional bulleted items
@@ -1291,10 +1291,10 @@ sections:
- Cross Cutting Concerns should flow through epics and stories and not be final stories. For example, adding a logging framework as a last story of an epic, or at the end of a project as a final epic or story would be terrible as we would not have logging from the beginning.
elicit: true
examples:
- "Epic 1: Foundation & Core Infrastructure: Establish project setup, authentication, and basic user management"
- "Epic 2: Core Business Entities: Create and manage primary domain objects with CRUD operations"
- "Epic 3: User Workflows & Interactions: Enable key user journeys and business processes"
- "Epic 4: Reporting & Analytics: Provide insights and data visualization for users"
- 'Epic 1: Foundation & Core Infrastructure: Establish project setup, authentication, and basic user management'
- 'Epic 2: Core Business Entities: Create and manage primary domain objects with CRUD operations'
- 'Epic 3: User Workflows & Interactions: Enable key user journeys and business processes'
- 'Epic 4: Reporting & Analytics: Provide insights and data visualization for users'
- id: epic-details
title: Epic {{epic_number}} {{epic_title}}
@@ -1316,7 +1316,7 @@ sections:
- Think "junior developer working for 2-4 hours" - stories must be small, focused, and self-contained
- If a story seems complex, break it down further as long as it can deliver a vertical slice
elicit: true
template: "{{epic_goal}}"
template: '{{epic_goal}}'
sections:
- id: story
title: Story {{epic_number}}.{{story_number}} {{story_title}}
@@ -1329,7 +1329,7 @@ sections:
- id: acceptance-criteria
title: Acceptance Criteria
type: numbered-list
item_template: "{{criterion_number}}: {{criteria}}"
item_template: '{{criterion_number}}: {{criteria}}'
repeatable: true
instruction: |
Define clear, comprehensive, and testable acceptance criteria that:
@@ -1364,7 +1364,7 @@ template:
output:
format: markdown
filename: docs/prd.md
title: "{{project_name}} Brownfield Enhancement PRD"
title: '{{project_name}} Brownfield Enhancement PRD'
workflow:
mode: interactive
@@ -1427,7 +1427,7 @@ sections:
- External API Documentation [[LLM: If from document-project, check ✓]]
- UX/UI Guidelines [[LLM: May not be in document-project]]
- Technical Debt Documentation [[LLM: If from document-project, check ✓]]
- "Other: {{other_docs}}"
- 'Other: {{other_docs}}'
instruction: |
- If document-project was already run: "Using existing project analysis from document-project output."
- If critical documentation is missing and no document-project: "I recommend running the document-project task first..."
@@ -1447,7 +1447,7 @@ sections:
- UI/UX Overhaul
- Technology Stack Upgrade
- Bug Fix and Stability Improvements
- "Other: {{other_type}}"
- 'Other: {{other_type}}'
- id: enhancement-description
title: Enhancement Description
instruction: 2-3 sentences describing what the user wants to add or change
@@ -1488,29 +1488,29 @@ sections:
prefix: FR
instruction: Each Requirement will be a bullet markdown with identifier starting with FR
examples:
- "FR1: The existing Todo List will integrate with the new AI duplicate detection service without breaking current functionality."
- 'FR1: The existing Todo List will integrate with the new AI duplicate detection service without breaking current functionality.'
- id: non-functional
title: Non Functional
type: numbered-list
prefix: NFR
instruction: Each Requirement will be a bullet markdown with identifier starting with NFR. Include constraints from existing system
examples:
- "NFR1: Enhancement must maintain existing performance characteristics and not exceed current memory usage by more than 20%."
- 'NFR1: Enhancement must maintain existing performance characteristics and not exceed current memory usage by more than 20%.'
- id: compatibility
title: Compatibility Requirements
instruction: Critical for brownfield - what must remain compatible
type: numbered-list
prefix: CR
template: "{{requirement}}: {{description}}"
template: '{{requirement}}: {{description}}'
items:
- id: cr1
template: "CR1: {{existing_api_compatibility}}"
template: 'CR1: {{existing_api_compatibility}}'
- id: cr2
template: "CR2: {{database_schema_compatibility}}"
template: 'CR2: {{database_schema_compatibility}}'
- id: cr3
template: "CR3: {{ui_ux_consistency}}"
template: 'CR3: {{ui_ux_consistency}}'
- id: cr4
template: "CR4: {{integration_compatibility}}"
template: 'CR4: {{integration_compatibility}}'
- id: ui-enhancement-goals
title: User Interface Enhancement Goals
@@ -1593,10 +1593,10 @@ sections:
- id: epic-approach
title: Epic Approach
instruction: Explain the rationale for epic structure - typically single epic for brownfield unless multiple unrelated features
template: "**Epic Structure Decision**: {{epic_decision}} with rationale"
template: '**Epic Structure Decision**: {{epic_decision}} with rationale'
- id: epic-details
title: "Epic 1: {{enhancement_title}}"
title: 'Epic 1: {{enhancement_title}}'
instruction: |
Comprehensive epic that delivers the brownfield enhancement while maintaining existing functionality
@@ -1616,7 +1616,7 @@ sections:
**Integration Requirements**: {{integration_requirements}}
sections:
- id: story
title: "Story 1.{{story_number}} {{story_title}}"
title: 'Story 1.{{story_number}} {{story_title}}'
repeatable: true
template: |
As a {{user_type}},
@@ -1627,16 +1627,16 @@ sections:
title: Acceptance Criteria
type: numbered-list
instruction: Define criteria that include both new functionality and existing system integrity
item_template: "{{criterion_number}}: {{criteria}}"
item_template: '{{criterion_number}}: {{criteria}}'
- id: integration-verification
title: Integration Verification
instruction: Specific verification steps to ensure existing functionality remains intact
type: numbered-list
prefix: IV
items:
- template: "IV1: {{existing_functionality_verification}}"
- template: "IV2: {{integration_point_verification}}"
- template: "IV3: {{performance_impact_verification}}"
- template: 'IV1: {{existing_functionality_verification}}'
- template: 'IV2: {{integration_point_verification}}'
- template: 'IV3: {{performance_impact_verification}}'
==================== END: .bmad-core/templates/brownfield-prd-tmpl.yaml ====================
==================== START: .bmad-core/checklists/pm-checklist.md ====================

4
dist/agents/po.txt vendored
View File

@@ -593,7 +593,7 @@ template:
output:
format: markdown
filename: docs/stories/{{epic_num}}.{{story_num}}.{{story_title_short}}.md
title: "Story {{epic_num}}.{{story_num}}: {{story_title_short}}"
title: 'Story {{epic_num}}.{{story_num}}: {{story_title_short}}'
workflow:
mode: interactive
@@ -695,7 +695,7 @@ sections:
sections:
- id: agent-model
title: Agent Model Used
template: "{{agent_model_name_version}}"
template: '{{agent_model_name_version}}'
instruction: Record the specific AI agent model and version used for development
owner: dev-agent
editors: [dev-agent]

873
dist/agents/qa.txt vendored

File diff suppressed because it is too large Load Diff

4
dist/agents/sm.txt vendored
View File

@@ -369,7 +369,7 @@ template:
output:
format: markdown
filename: docs/stories/{{epic_num}}.{{story_num}}.{{story_title_short}}.md
title: "Story {{epic_num}}.{{story_num}}: {{story_title_short}}"
title: 'Story {{epic_num}}.{{story_num}}: {{story_title_short}}'
workflow:
mode: interactive
@@ -471,7 +471,7 @@ sections:
sections:
- id: agent-model
title: Agent Model Used
template: "{{agent_model_name_version}}"
template: '{{agent_model_name_version}}'
instruction: Record the specific AI agent model and version used for development
owner: dev-agent
editors: [dev-agent]

View File

@@ -343,7 +343,7 @@ template:
output:
format: markdown
filename: docs/front-end-spec.md
title: "{{project_name}} UI/UX Specification"
title: '{{project_name}} UI/UX Specification'
workflow:
mode: interactive
@@ -371,29 +371,29 @@ sections:
sections:
- id: user-personas
title: Target User Personas
template: "{{persona_descriptions}}"
template: '{{persona_descriptions}}'
examples:
- "**Power User:** Technical professionals who need advanced features and efficiency"
- "**Casual User:** Occasional users who prioritize ease of use and clear guidance"
- "**Administrator:** System managers who need control and oversight capabilities"
- '**Power User:** Technical professionals who need advanced features and efficiency'
- '**Casual User:** Occasional users who prioritize ease of use and clear guidance'
- '**Administrator:** System managers who need control and oversight capabilities'
- id: usability-goals
title: Usability Goals
template: "{{usability_goals}}"
template: '{{usability_goals}}'
examples:
- "Ease of learning: New users can complete core tasks within 5 minutes"
- "Efficiency of use: Power users can complete frequent tasks with minimal clicks"
- "Error prevention: Clear validation and confirmation for destructive actions"
- "Memorability: Infrequent users can return without relearning"
- 'Ease of learning: New users can complete core tasks within 5 minutes'
- 'Efficiency of use: Power users can complete frequent tasks with minimal clicks'
- 'Error prevention: Clear validation and confirmation for destructive actions'
- 'Memorability: Infrequent users can return without relearning'
- id: design-principles
title: Design Principles
template: "{{design_principles}}"
template: '{{design_principles}}'
type: numbered-list
examples:
- "**Clarity over cleverness** - Prioritize clear communication over aesthetic innovation"
- '**Clarity over cleverness** - Prioritize clear communication over aesthetic innovation'
- "**Progressive disclosure** - Show only what's needed, when it's needed"
- "**Consistent patterns** - Use familiar UI patterns throughout the application"
- "**Immediate feedback** - Every action should have a clear, immediate response"
- "**Accessible by default** - Design for all users from the start"
- '**Consistent patterns** - Use familiar UI patterns throughout the application'
- '**Immediate feedback** - Every action should have a clear, immediate response'
- '**Accessible by default** - Design for all users from the start'
- id: changelog
title: Change Log
type: table
@@ -415,7 +415,7 @@ sections:
title: Site Map / Screen Inventory
type: mermaid
mermaid_type: graph
template: "{{sitemap_diagram}}"
template: '{{sitemap_diagram}}'
examples:
- |
graph TD
@@ -455,7 +455,7 @@ sections:
repeatable: true
sections:
- id: flow
title: "{{flow_name}}"
title: '{{flow_name}}'
template: |
**User Goal:** {{flow_goal}}
@@ -467,13 +467,13 @@ sections:
title: Flow Diagram
type: mermaid
mermaid_type: graph
template: "{{flow_diagram}}"
template: '{{flow_diagram}}'
- id: edge-cases
title: "Edge Cases & Error Handling:"
title: 'Edge Cases & Error Handling:'
type: bullet-list
template: "- {{edge_case}}"
template: '- {{edge_case}}'
- id: notes
template: "**Notes:** {{flow_notes}}"
template: '**Notes:** {{flow_notes}}'
- id: wireframes-mockups
title: Wireframes & Mockups
@@ -482,13 +482,13 @@ sections:
elicit: true
sections:
- id: design-files
template: "**Primary Design Files:** {{design_tool_link}}"
template: '**Primary Design Files:** {{design_tool_link}}'
- id: key-screen-layouts
title: Key Screen Layouts
repeatable: true
sections:
- id: screen
title: "{{screen_name}}"
title: '{{screen_name}}'
template: |
**Purpose:** {{screen_purpose}}
@@ -508,13 +508,13 @@ sections:
elicit: true
sections:
- id: design-system-approach
template: "**Design System Approach:** {{design_system_approach}}"
template: '**Design System Approach:** {{design_system_approach}}'
- id: core-components
title: Core Components
repeatable: true
sections:
- id: component
title: "{{component_name}}"
title: '{{component_name}}'
template: |
**Purpose:** {{component_purpose}}
@@ -531,19 +531,19 @@ sections:
sections:
- id: visual-identity
title: Visual Identity
template: "**Brand Guidelines:** {{brand_guidelines_link}}"
template: '**Brand Guidelines:** {{brand_guidelines_link}}'
- id: color-palette
title: Color Palette
type: table
columns: ["Color Type", "Hex Code", "Usage"]
columns: ['Color Type', 'Hex Code', 'Usage']
rows:
- ["Primary", "{{primary_color}}", "{{primary_usage}}"]
- ["Secondary", "{{secondary_color}}", "{{secondary_usage}}"]
- ["Accent", "{{accent_color}}", "{{accent_usage}}"]
- ["Success", "{{success_color}}", "Positive feedback, confirmations"]
- ["Warning", "{{warning_color}}", "Cautions, important notices"]
- ["Error", "{{error_color}}", "Errors, destructive actions"]
- ["Neutral", "{{neutral_colors}}", "Text, borders, backgrounds"]
- ['Primary', '{{primary_color}}', '{{primary_usage}}']
- ['Secondary', '{{secondary_color}}', '{{secondary_usage}}']
- ['Accent', '{{accent_color}}', '{{accent_usage}}']
- ['Success', '{{success_color}}', 'Positive feedback, confirmations']
- ['Warning', '{{warning_color}}', 'Cautions, important notices']
- ['Error', '{{error_color}}', 'Errors, destructive actions']
- ['Neutral', '{{neutral_colors}}', 'Text, borders, backgrounds']
- id: typography
title: Typography
sections:
@@ -556,13 +556,13 @@ sections:
- id: type-scale
title: Type Scale
type: table
columns: ["Element", "Size", "Weight", "Line Height"]
columns: ['Element', 'Size', 'Weight', 'Line Height']
rows:
- ["H1", "{{h1_size}}", "{{h1_weight}}", "{{h1_line}}"]
- ["H2", "{{h2_size}}", "{{h2_weight}}", "{{h2_line}}"]
- ["H3", "{{h3_size}}", "{{h3_weight}}", "{{h3_line}}"]
- ["Body", "{{body_size}}", "{{body_weight}}", "{{body_line}}"]
- ["Small", "{{small_size}}", "{{small_weight}}", "{{small_line}}"]
- ['H1', '{{h1_size}}', '{{h1_weight}}', '{{h1_line}}']
- ['H2', '{{h2_size}}', '{{h2_weight}}', '{{h2_line}}']
- ['H3', '{{h3_size}}', '{{h3_weight}}', '{{h3_line}}']
- ['Body', '{{body_size}}', '{{body_weight}}', '{{body_line}}']
- ['Small', '{{small_size}}', '{{small_weight}}', '{{small_line}}']
- id: iconography
title: Iconography
template: |
@@ -583,7 +583,7 @@ sections:
sections:
- id: compliance-target
title: Compliance Target
template: "**Standard:** {{compliance_standard}}"
template: '**Standard:** {{compliance_standard}}'
- id: key-requirements
title: Key Requirements
template: |
@@ -603,7 +603,7 @@ sections:
- Form labels: {{form_requirements}}
- id: testing-strategy
title: Testing Strategy
template: "{{accessibility_testing}}"
template: '{{accessibility_testing}}'
- id: responsiveness
title: Responsiveness Strategy
@@ -613,12 +613,12 @@ sections:
- id: breakpoints
title: Breakpoints
type: table
columns: ["Breakpoint", "Min Width", "Max Width", "Target Devices"]
columns: ['Breakpoint', 'Min Width', 'Max Width', 'Target Devices']
rows:
- ["Mobile", "{{mobile_min}}", "{{mobile_max}}", "{{mobile_devices}}"]
- ["Tablet", "{{tablet_min}}", "{{tablet_max}}", "{{tablet_devices}}"]
- ["Desktop", "{{desktop_min}}", "{{desktop_max}}", "{{desktop_devices}}"]
- ["Wide", "{{wide_min}}", "-", "{{wide_devices}}"]
- ['Mobile', '{{mobile_min}}', '{{mobile_max}}', '{{mobile_devices}}']
- ['Tablet', '{{tablet_min}}', '{{tablet_max}}', '{{tablet_devices}}']
- ['Desktop', '{{desktop_min}}', '{{desktop_max}}', '{{desktop_devices}}']
- ['Wide', '{{wide_min}}', '-', '{{wide_devices}}']
- id: adaptation-patterns
title: Adaptation Patterns
template: |
@@ -637,11 +637,11 @@ sections:
sections:
- id: motion-principles
title: Motion Principles
template: "{{motion_principles}}"
template: '{{motion_principles}}'
- id: key-animations
title: Key Animations
repeatable: true
template: "- **{{animation_name}}:** {{animation_description}} (Duration: {{duration}}, Easing: {{easing}})"
template: '- **{{animation_name}}:** {{animation_description}} (Duration: {{duration}}, Easing: {{easing}})'
- id: performance
title: Performance Considerations
@@ -655,7 +655,7 @@ sections:
- **Animation FPS:** {{animation_goal}}
- id: design-strategies
title: Design Strategies
template: "{{performance_strategies}}"
template: '{{performance_strategies}}'
- id: next-steps
title: Next Steps
@@ -670,17 +670,17 @@ sections:
- id: immediate-actions
title: Immediate Actions
type: numbered-list
template: "{{action}}"
template: '{{action}}'
- id: design-handoff-checklist
title: Design Handoff Checklist
type: checklist
items:
- "All user flows documented"
- "Component inventory complete"
- "Accessibility requirements defined"
- "Responsive strategy clear"
- "Brand guidelines incorporated"
- "Performance goals established"
- 'All user flows documented'
- 'Component inventory complete'
- 'Accessibility requirements defined'
- 'Responsive strategy clear'
- 'Brand guidelines incorporated'
- 'Performance goals established'
- id: checklist-results
title: Checklist Results

View File

@@ -981,8 +981,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-game-design-document.md"
title: "{{game_title}} Game Design Document (GDD)"
filename: 'docs/{{game_name}}-game-design-document.md'
title: '{{game_title}} Game Design Document (GDD)'
workflow:
mode: interactive
@@ -1019,7 +1019,7 @@ sections:
title: Unique Selling Points
instruction: List 3-5 key features that differentiate this game from competitors
type: numbered-list
template: "{{usp}}"
template: '{{usp}}'
- id: core-gameplay
title: Core Gameplay
@@ -1064,7 +1064,7 @@ sections:
repeatable: true
sections:
- id: mechanic
title: "{{mechanic_name}}"
title: '{{mechanic_name}}'
template: |
**Description:** {{detailed_description}}
@@ -1129,7 +1129,7 @@ sections:
repeatable: true
sections:
- id: level-type
title: "{{level_type_name}}"
title: '{{level_type_name}}'
template: |
**Purpose:** {{gameplay_purpose}}
**Duration:** {{target_time}}
@@ -1230,10 +1230,10 @@ sections:
instruction: Break down the development into phases that can be converted to epics
sections:
- id: phase-1-core-systems
title: "Phase 1: Core Systems ({{duration}})"
title: 'Phase 1: Core Systems ({{duration}})'
sections:
- id: foundation-epic
title: "Epic: Foundation"
title: 'Epic: Foundation'
type: bullet-list
template: |
- Engine setup and configuration
@@ -1241,41 +1241,41 @@ sections:
- Core input handling
- Asset loading pipeline
- id: core-mechanics-epic
title: "Epic: Core Mechanics"
title: 'Epic: Core Mechanics'
type: bullet-list
template: |
- {{primary_mechanic}} implementation
- Basic physics and collision
- Player controller
- id: phase-2-gameplay-features
title: "Phase 2: Gameplay Features ({{duration}})"
title: 'Phase 2: Gameplay Features ({{duration}})'
sections:
- id: game-systems-epic
title: "Epic: Game Systems"
title: 'Epic: Game Systems'
type: bullet-list
template: |
- {{mechanic_2}} implementation
- {{mechanic_3}} implementation
- Game state management
- id: content-creation-epic
title: "Epic: Content Creation"
title: 'Epic: Content Creation'
type: bullet-list
template: |
- Level loading system
- First playable levels
- Basic UI implementation
- id: phase-3-polish-optimization
title: "Phase 3: Polish & Optimization ({{duration}})"
title: 'Phase 3: Polish & Optimization ({{duration}})'
sections:
- id: performance-epic
title: "Epic: Performance"
title: 'Epic: Performance'
type: bullet-list
template: |
- Optimization and profiling
- Mobile platform testing
- Memory management
- id: user-experience-epic
title: "Epic: User Experience"
title: 'Epic: User Experience'
type: bullet-list
template: |
- Audio implementation
@@ -1317,7 +1317,7 @@ sections:
title: References
instruction: List any competitive analysis, inspiration, or research sources
type: bullet-list
template: "{{reference}}"
template: '{{reference}}'
==================== END: .bmad-2d-phaser-game-dev/templates/game-design-doc-tmpl.yaml ====================
==================== START: .bmad-2d-phaser-game-dev/templates/level-design-doc-tmpl.yaml ====================
@@ -1327,8 +1327,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-level-design-document.md"
title: "{{game_title}} Level Design Document"
filename: 'docs/{{game_name}}-level-design-document.md'
title: '{{game_title}} Level Design Document'
workflow:
mode: interactive
@@ -1389,7 +1389,7 @@ sections:
repeatable: true
sections:
- id: level-category
title: "{{category_name}} Levels"
title: '{{category_name}} Levels'
template: |
**Purpose:** {{gameplay_purpose}}
@@ -1694,19 +1694,19 @@ sections:
title: Playtesting Checklist
type: checklist
items:
- "Level completes within target time range"
- "All mechanics function correctly"
- "Difficulty feels appropriate for level category"
- "Player guidance is clear and effective"
- "No exploits or sequence breaks (unless intended)"
- 'Level completes within target time range'
- 'All mechanics function correctly'
- 'Difficulty feels appropriate for level category'
- 'Player guidance is clear and effective'
- 'No exploits or sequence breaks (unless intended)'
- id: player-experience-testing
title: Player Experience Testing
type: checklist
items:
- "Tutorial levels teach effectively"
- "Challenge feels fair and rewarding"
- "Flow and pacing maintain engagement"
- "Audio and visual feedback support gameplay"
- 'Tutorial levels teach effectively'
- 'Challenge feels fair and rewarding'
- 'Flow and pacing maintain engagement'
- 'Audio and visual feedback support gameplay'
- id: balance-validation
title: Balance Validation
template: |
@@ -1814,8 +1814,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-game-brief.md"
title: "{{game_title}} Game Brief"
filename: 'docs/{{game_name}}-game-brief.md'
title: '{{game_title}} Game Brief'
workflow:
mode: interactive
@@ -2101,21 +2101,21 @@ sections:
title: Development Roadmap
sections:
- id: phase-1-preproduction
title: "Phase 1: Pre-Production ({{duration}})"
title: 'Phase 1: Pre-Production ({{duration}})'
type: bullet-list
template: |
- Detailed Game Design Document creation
- Technical architecture planning
- Art style exploration and pipeline setup
- id: phase-2-prototype
title: "Phase 2: Prototype ({{duration}})"
title: 'Phase 2: Prototype ({{duration}})'
type: bullet-list
template: |
- Core mechanic implementation
- Technical proof of concept
- Initial playtesting and iteration
- id: phase-3-production
title: "Phase 3: Production ({{duration}})"
title: 'Phase 3: Production ({{duration}})'
type: bullet-list
template: |
- Full feature development

View File

@@ -197,8 +197,8 @@ template:
version: 2.0
output:
format: markdown
filename: "docs/{{game_name}}-game-architecture.md"
title: "{{game_title}} Game Architecture Document"
filename: 'docs/{{game_name}}-game-architecture.md'
title: '{{game_title}} Game Architecture Document'
workflow:
mode: interactive
@@ -422,7 +422,7 @@ sections:
repeatable: true
sections:
- id: mechanic-system
title: "{{mechanic_name}} System"
title: '{{mechanic_name}} System'
template: |
**Purpose:** {{system_purpose}}
@@ -719,7 +719,7 @@ sections:
instruction: Break down the architecture implementation into phases that align with the GDD development phases
sections:
- id: phase-1-foundation
title: "Phase 1: Foundation ({{duration}})"
title: 'Phase 1: Foundation ({{duration}})'
sections:
- id: phase-1-core
title: Core Systems
@@ -737,7 +737,7 @@ sections:
- "Basic Scene Management System"
- "Asset Loading Foundation"
- id: phase-2-game-systems
title: "Phase 2: Game Systems ({{duration}})"
title: 'Phase 2: Game Systems ({{duration}})'
sections:
- id: phase-2-gameplay
title: Gameplay Systems
@@ -755,7 +755,7 @@ sections:
- "Physics and Collision Framework"
- "Game State Management System"
- id: phase-3-content-polish
title: "Phase 3: Content & Polish ({{duration}})"
title: 'Phase 3: Content & Polish ({{duration}})'
sections:
- id: phase-3-content
title: Content Systems
@@ -1045,7 +1045,7 @@ interface GameState {
interface GameSettings {
musicVolume: number;
sfxVolume: number;
difficulty: "easy" | "normal" | "hard";
difficulty: 'easy' | 'normal' | 'hard';
controls: ControlScheme;
}
```
@@ -1086,12 +1086,12 @@ class GameScene extends Phaser.Scene {
private inputManager!: InputManager;
constructor() {
super({ key: "GameScene" });
super({ key: 'GameScene' });
}
preload(): void {
// Load only scene-specific assets
this.load.image("player", "assets/player.png");
this.load.image('player', 'assets/player.png');
}
create(data: SceneData): void {
@@ -1116,7 +1116,7 @@ class GameScene extends Phaser.Scene {
this.inputManager.destroy();
// Remove event listeners
this.events.off("*");
this.events.off('*');
}
}
```
@@ -1125,13 +1125,13 @@ class GameScene extends Phaser.Scene {
```typescript
// Proper scene transitions with data
this.scene.start("NextScene", {
this.scene.start('NextScene', {
playerScore: this.playerScore,
currentLevel: this.currentLevel + 1,
});
// Scene overlays for UI
this.scene.launch("PauseMenuScene");
this.scene.launch('PauseMenuScene');
this.scene.pause();
```
@@ -1175,7 +1175,7 @@ class Player extends GameEntity {
private health!: HealthComponent;
constructor(scene: Phaser.Scene, x: number, y: number) {
super(scene, x, y, "player");
super(scene, x, y, 'player');
this.movement = this.addComponent(new MovementComponent(this));
this.health = this.addComponent(new HealthComponent(this, 100));
@@ -1195,7 +1195,7 @@ class GameManager {
constructor(scene: Phaser.Scene) {
if (GameManager.instance) {
throw new Error("GameManager already exists!");
throw new Error('GameManager already exists!');
}
this.scene = scene;
@@ -1205,7 +1205,7 @@ class GameManager {
static getInstance(): GameManager {
if (!GameManager.instance) {
throw new Error("GameManager not initialized!");
throw new Error('GameManager not initialized!');
}
return GameManager.instance;
}
@@ -1252,7 +1252,7 @@ class BulletPool {
}
// Pool exhausted - create new bullet
console.warn("Bullet pool exhausted, creating new bullet");
console.warn('Bullet pool exhausted, creating new bullet');
return new Bullet(this.scene, 0, 0);
}
@@ -1352,14 +1352,12 @@ class InputManager {
}
private setupKeyboard(): void {
this.keys = this.scene.input.keyboard.addKeys(
"W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT",
);
this.keys = this.scene.input.keyboard.addKeys('W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT');
}
private setupTouch(): void {
this.scene.input.on("pointerdown", this.handlePointerDown, this);
this.scene.input.on("pointerup", this.handlePointerUp, this);
this.scene.input.on('pointerdown', this.handlePointerDown, this);
this.scene.input.on('pointerup', this.handlePointerUp, this);
}
update(): void {
@@ -1386,9 +1384,9 @@ class InputManager {
class AssetManager {
loadAssets(): Promise<void> {
return new Promise((resolve, reject) => {
this.scene.load.on("filecomplete", this.handleFileComplete, this);
this.scene.load.on("loaderror", this.handleLoadError, this);
this.scene.load.on("complete", () => resolve());
this.scene.load.on('filecomplete', this.handleFileComplete, this);
this.scene.load.on('loaderror', this.handleLoadError, this);
this.scene.load.on('complete', () => resolve());
this.scene.load.start();
});
@@ -1404,8 +1402,8 @@ class AssetManager {
private loadFallbackAsset(key: string): void {
// Load placeholder or default assets
switch (key) {
case "player":
this.scene.load.image("player", "assets/defaults/default-player.png");
case 'player':
this.scene.load.image('player', 'assets/defaults/default-player.png');
break;
default:
console.warn(`No fallback for asset: ${key}`);
@@ -1432,11 +1430,11 @@ class GameSystem {
private attemptRecovery(context: string): void {
switch (context) {
case "update":
case 'update':
// Reset system state
this.reset();
break;
case "render":
case 'render':
// Disable visual effects
this.disableEffects();
break;
@@ -1456,7 +1454,7 @@ class GameSystem {
```typescript
// Example test for game mechanics
describe("HealthComponent", () => {
describe('HealthComponent', () => {
let healthComponent: HealthComponent;
beforeEach(() => {
@@ -1464,18 +1462,18 @@ describe("HealthComponent", () => {
healthComponent = new HealthComponent(mockEntity, 100);
});
test("should initialize with correct health", () => {
test('should initialize with correct health', () => {
expect(healthComponent.currentHealth).toBe(100);
expect(healthComponent.maxHealth).toBe(100);
});
test("should handle damage correctly", () => {
test('should handle damage correctly', () => {
healthComponent.takeDamage(25);
expect(healthComponent.currentHealth).toBe(75);
expect(healthComponent.isAlive()).toBe(true);
});
test("should handle death correctly", () => {
test('should handle death correctly', () => {
healthComponent.takeDamage(150);
expect(healthComponent.currentHealth).toBe(0);
expect(healthComponent.isAlive()).toBe(false);
@@ -1488,7 +1486,7 @@ describe("HealthComponent", () => {
**Scene Testing:**
```typescript
describe("GameScene Integration", () => {
describe('GameScene Integration', () => {
let scene: GameScene;
let mockGame: Phaser.Game;
@@ -1498,7 +1496,7 @@ describe("GameScene Integration", () => {
scene = new GameScene();
});
test("should initialize all systems", () => {
test('should initialize all systems', () => {
scene.create({});
expect(scene.gameManager).toBeDefined();

View File

@@ -402,8 +402,8 @@ template:
version: 2.0
output:
format: markdown
filename: "stories/{{epic_name}}/{{story_id}}-{{story_name}}.md"
title: "Story: {{story_title}}"
filename: 'stories/{{epic_name}}/{{story_id}}-{{story_name}}.md'
title: 'Story: {{story_title}}'
workflow:
mode: interactive
@@ -432,7 +432,7 @@ sections:
- id: description
title: Description
instruction: Provide a clear, concise description of what this story implements. Focus on the specific game feature or system being built. Reference the GDD section that defines this feature.
template: "{{clear_description_of_what_needs_to_be_implemented}}"
template: '{{clear_description_of_what_needs_to_be_implemented}}'
- id: acceptance-criteria
title: Acceptance Criteria
@@ -442,22 +442,22 @@ sections:
title: Functional Requirements
type: checklist
items:
- "{{specific_functional_requirement}}"
- '{{specific_functional_requirement}}'
- id: technical-requirements
title: Technical Requirements
type: checklist
items:
- "Code follows TypeScript strict mode standards"
- "Maintains 60 FPS on target devices"
- "No memory leaks or performance degradation"
- "{{specific_technical_requirement}}"
- 'Code follows TypeScript strict mode standards'
- 'Maintains 60 FPS on target devices'
- 'No memory leaks or performance degradation'
- '{{specific_technical_requirement}}'
- id: game-design-requirements
title: Game Design Requirements
type: checklist
items:
- "{{gameplay_requirement_from_gdd}}"
- "{{balance_requirement_if_applicable}}"
- "{{player_experience_requirement}}"
- '{{gameplay_requirement_from_gdd}}'
- '{{balance_requirement_if_applicable}}'
- '{{player_experience_requirement}}'
- id: technical-specifications
title: Technical Specifications
@@ -622,14 +622,14 @@ sections:
instruction: Checklist that must be completed before the story is considered finished
type: checklist
items:
- "All acceptance criteria met"
- "Code reviewed and approved"
- "Unit tests written and passing"
- "Integration tests passing"
- "Performance targets met"
- "No linting errors"
- "Documentation updated"
- "{{game_specific_dod_item}}"
- 'All acceptance criteria met'
- 'Code reviewed and approved'
- 'Unit tests written and passing'
- 'Integration tests passing'
- 'Performance targets met'
- 'No linting errors'
- 'Documentation updated'
- '{{game_specific_dod_item}}'
- id: notes
title: Notes

View File

@@ -1231,7 +1231,7 @@ template:
output:
format: markdown
filename: docs/game-architecture.md
title: "{{project_name}} Game Architecture Document"
title: '{{project_name}} Game Architecture Document'
workflow:
mode: interactive
@@ -1341,11 +1341,11 @@ sections:
- Game management patterns (Singleton managers, Event systems, State machines)
- Data patterns (ScriptableObject configuration, Save/Load systems)
- Unity-specific patterns (Object pooling, Coroutines, Unity Events)
template: "- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}"
template: '- **{{pattern_name}}:** {{pattern_description}} - _Rationale:_ {{rationale}}'
examples:
- "**Component-Based Architecture:** Using MonoBehaviour components for game logic - _Rationale:_ Aligns with Unity's design philosophy and enables reusable, testable game systems"
- "**ScriptableObject Data:** Using ScriptableObjects for game configuration - _Rationale:_ Enables data-driven design and easy balancing without code changes"
- "**Event-Driven Communication:** Using Unity Events and C# events for system decoupling - _Rationale:_ Supports modular architecture and easier testing"
- '**ScriptableObject Data:** Using ScriptableObjects for game configuration - _Rationale:_ Enables data-driven design and easy balancing without code changes'
- '**Event-Driven Communication:** Using Unity Events and C# events for system decoupling - _Rationale:_ Supports modular architecture and easier testing'
- id: tech-stack
title: Tech Stack
@@ -1384,13 +1384,13 @@ sections:
columns: [Category, Technology, Version, Purpose, Rationale]
instruction: Populate the technology stack table with all relevant Unity technologies
examples:
- "| **Game Engine** | Unity | 2022.3.21f1 | Core game development platform | Latest LTS version, stable 2D tooling, comprehensive package ecosystem |"
- '| **Game Engine** | Unity | 2022.3.21f1 | Core game development platform | Latest LTS version, stable 2D tooling, comprehensive package ecosystem |'
- "| **Language** | C# | 10.0 | Primary scripting language | Unity's native language, strong typing, excellent tooling |"
- "| **Render Pipeline** | Universal Render Pipeline (URP) | 14.0.10 | 2D/3D rendering | Optimized for mobile, excellent 2D features, future-proof |"
- "| **Input System** | Unity Input System | 1.7.0 | Cross-platform input handling | Modern input system, supports multiple devices, rebindable controls |"
- "| **Physics** | Unity 2D Physics | Built-in | 2D collision and physics | Integrated Box2D, optimized for 2D games |"
- "| **Audio** | Unity Audio | Built-in | Audio playback and mixing | Built-in audio system with mixer support |"
- "| **Testing** | Unity Test Framework | 1.1.33 | Unit and integration testing | Built-in testing framework based on NUnit |"
- '| **Render Pipeline** | Universal Render Pipeline (URP) | 14.0.10 | 2D/3D rendering | Optimized for mobile, excellent 2D features, future-proof |'
- '| **Input System** | Unity Input System | 1.7.0 | Cross-platform input handling | Modern input system, supports multiple devices, rebindable controls |'
- '| **Physics** | Unity 2D Physics | Built-in | 2D collision and physics | Integrated Box2D, optimized for 2D games |'
- '| **Audio** | Unity Audio | Built-in | Audio playback and mixing | Built-in audio system with mixer support |'
- '| **Testing** | Unity Test Framework | 1.1.33 | Unit and integration testing | Built-in testing framework based on NUnit |'
- id: data-models
title: Game Data Models
@@ -1408,7 +1408,7 @@ sections:
repeatable: true
sections:
- id: model
title: "{{model_name}}"
title: '{{model_name}}'
template: |
**Purpose:** {{model_purpose}}
@@ -1443,7 +1443,7 @@ sections:
sections:
- id: system-list
repeatable: true
title: "{{system_name}} System"
title: '{{system_name}} System'
template: |
**Responsibility:** {{system_description}}
@@ -1967,7 +1967,7 @@ sections:
repeatable: true
sections:
- id: integration
title: "{{service_name}} Integration"
title: '{{service_name}} Integration'
template: |
- **Purpose:** {{service_purpose}}
- **Documentation:** {{service_docs_url}}
@@ -2079,12 +2079,12 @@ sections:
- id: environments
title: Build Environments
repeatable: true
template: "- **{{env_name}}:** {{env_purpose}} - {{platform_settings}}"
template: '- **{{env_name}}:** {{env_purpose}} - {{platform_settings}}'
- id: platform-specific-builds
title: Platform-Specific Build Settings
type: code
language: text
template: "{{platform_build_configurations}}"
template: '{{platform_build_configurations}}'
- id: coding-standards
title: Coding Standards
@@ -2113,9 +2113,9 @@ sections:
columns: [Element, Convention, Example]
instruction: Only include if deviating from Unity defaults
examples:
- "| MonoBehaviour | PascalCase + Component suffix | PlayerController, HealthSystem |"
- "| ScriptableObject | PascalCase + Data/Config suffix | PlayerData, GameConfig |"
- "| Prefab | PascalCase descriptive | PlayerCharacter, EnvironmentTile |"
- '| MonoBehaviour | PascalCase + Component suffix | PlayerController, HealthSystem |'
- '| ScriptableObject | PascalCase + Data/Config suffix | PlayerData, GameConfig |'
- '| Prefab | PascalCase descriptive | PlayerCharacter, EnvironmentTile |'
- id: critical-rules
title: Critical Unity Rules
instruction: |
@@ -2127,7 +2127,7 @@ sections:
Avoid obvious rules like "follow SOLID principles" or "optimize performance"
repeatable: true
template: "- **{{rule_name}}:** {{rule_description}}"
template: '- **{{rule_name}}:** {{rule_description}}'
- id: unity-specifics
title: Unity-Specific Guidelines
condition: Critical Unity-specific rules needed
@@ -2136,7 +2136,7 @@ sections:
- id: unity-lifecycle
title: Unity Lifecycle Rules
repeatable: true
template: "- **{{lifecycle_method}}:** {{usage_rule}}"
template: '- **{{lifecycle_method}}:** {{usage_rule}}'
- id: test-strategy
title: Test Strategy and Standards
@@ -3698,7 +3698,7 @@ Use the `shard-doc` task or `@kayvan/markdown-tree-parser` tool for automatic ga
- **Claude Code**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Cursor**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Trae**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Roo Code**: Select mode from mode selector with bmad2du prefix
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select the appropriate game agent.

View File

@@ -1175,7 +1175,7 @@ template:
output:
format: markdown
filename: docs/game-design-document.md
title: "{{game_title}} Game Design Document (GDD)"
title: '{{game_title}} Game Design Document (GDD)'
workflow:
mode: interactive
@@ -1223,8 +1223,8 @@ sections:
**Primary:** {{age_range}}, {{player_type}}, {{platform_preference}}
**Secondary:** {{secondary_audience}}
examples:
- "Primary: Ages 8-16, casual mobile gamers, prefer short play sessions"
- "Secondary: Adult puzzle enthusiasts, educators looking for teaching tools"
- 'Primary: Ages 8-16, casual mobile gamers, prefer short play sessions'
- 'Secondary: Adult puzzle enthusiasts, educators looking for teaching tools'
- id: platform-technical
title: Platform & Technical Requirements
instruction: Based on the technical preferences or user input, define the target platforms and Unity-specific requirements
@@ -1235,7 +1235,7 @@ sections:
**Screen Support:** {{resolution_range}}
**Build Targets:** {{build_targets}}
examples:
- "Primary Platform: Mobile (iOS/Android), Engine: Unity 2022.3 LTS & C#, Performance: 60 FPS on iPhone 8/Galaxy S8"
- 'Primary Platform: Mobile (iOS/Android), Engine: Unity 2022.3 LTS & C#, Performance: 60 FPS on iPhone 8/Galaxy S8'
- id: unique-selling-points
title: Unique Selling Points
instruction: List 3-5 key features that differentiate this game from competitors
@@ -1286,8 +1286,8 @@ sections:
- {{loss_condition_1}} - Trigger: {{unity_trigger}}
- {{loss_condition_2}} - Trigger: {{unity_trigger}}
examples:
- "Victory: Player reaches exit portal - Unity Event: OnTriggerEnter2D with Portal tag"
- "Failure: Health reaches zero - Trigger: Health component value <= 0"
- 'Victory: Player reaches exit portal - Unity Event: OnTriggerEnter2D with Portal tag'
- 'Failure: Health reaches zero - Trigger: Health component value <= 0'
- id: game-mechanics
title: Game Mechanics
@@ -1299,7 +1299,7 @@ sections:
repeatable: true
sections:
- id: mechanic
title: "{{mechanic_name}}"
title: '{{mechanic_name}}'
template: |
**Description:** {{detailed_description}}
@@ -1321,8 +1321,8 @@ sections:
- {{script_name}}.cs - {{responsibility}}
- {{manager_script}}.cs - {{management_role}}
examples:
- "Components Needed: Rigidbody2D, BoxCollider2D, PlayerMovement script"
- "Physics Requirements: 2D Physics material for ground friction, Gravity scale 3"
- 'Components Needed: Rigidbody2D, BoxCollider2D, PlayerMovement script'
- 'Physics Requirements: 2D Physics material for ground friction, Gravity scale 3'
- id: controls
title: Controls
instruction: Define all input methods for different platforms using Unity's Input System
@@ -1377,7 +1377,7 @@ sections:
**Late Game:** {{duration}} - {{difficulty_description}}
- Unity Config: {{scriptable_object_values}}
examples:
- "enemy speed: 2.0f, jump height: 4.5f, obstacle density: 0.3f"
- 'enemy speed: 2.0f, jump height: 4.5f, obstacle density: 0.3f'
- id: economy-resources
title: Economy & Resources
condition: has_economy
@@ -1400,7 +1400,7 @@ sections:
repeatable: true
sections:
- id: level-type
title: "{{level_type_name}}"
title: '{{level_type_name}}'
template: |
**Purpose:** {{gameplay_purpose}}
**Target Duration:** {{target_time}}
@@ -1424,7 +1424,7 @@ sections:
- {{prefab_name}} - {{prefab_purpose}}
examples:
- "Environment: TilemapRenderer with Platform tileset, Lighting: 2D Global Light + Point Lights"
- 'Environment: TilemapRenderer with Platform tileset, Lighting: 2D Global Light + Point Lights'
- id: level-progression
title: Level Progression
template: |
@@ -1439,7 +1439,7 @@ sections:
- Addressable Assets: {{addressable_groups}}
- Loading Screens: {{loading_implementation}}
examples:
- "Scene Naming: World{X}_Level{Y}_Name, Addressable Groups: Levels_World1, World_Environments"
- 'Scene Naming: World{X}_Level{Y}_Name, Addressable Groups: Levels_World1, World_Environments'
- id: technical-specifications
title: Technical Specifications
@@ -1471,7 +1471,7 @@ sections:
- Physics Settings: {{physics_config}}
examples:
- com.unity.addressables 1.20.5 - Asset loading and memory management
- "Color Space: Linear, Quality: Mobile/Desktop presets, Gravity: -20"
- 'Color Space: Linear, Quality: Mobile/Desktop presets, Gravity: -20'
- id: performance-requirements
title: Performance Requirements
template: |
@@ -1487,7 +1487,7 @@ sections:
- GC Allocs: <{{gc_limit}}KB per frame
- Draw Calls: <{{draw_calls}} per frame
examples:
- "60 FPS (minimum 30), CPU: <16.67ms, GPU: <16.67ms, GC: <4KB, Draws: <50"
- '60 FPS (minimum 30), CPU: <16.67ms, GPU: <16.67ms, GC: <4KB, Draws: <50'
- id: platform-specific
title: Platform Specific Requirements
template: |
@@ -1510,7 +1510,7 @@ sections:
- Browser Support: {{browser_list}}
- Compression: {{compression_format}}
examples:
- "Resolution: 1280x720 - 4K, Gamepad: Xbox/PlayStation controllers via Input System"
- 'Resolution: 1280x720 - 4K, Gamepad: Xbox/PlayStation controllers via Input System'
- id: asset-requirements
title: Asset Requirements
instruction: Define asset specifications for Unity pipeline optimization
@@ -1536,7 +1536,7 @@ sections:
- Font: {{font_requirements}}
- Icon Sizes: {{icon_specifications}}
examples:
- "Sprites: 32x32 to 256x256 at 16 PPU, Format: RGBA32 for quality/RGBA16 for performance"
- 'Sprites: 32x32 to 256x256 at 16 PPU, Format: RGBA32 for quality/RGBA16 for performance'
- id: technical-architecture-requirements
title: Technical Architecture Requirements
@@ -1578,8 +1578,8 @@ sections:
- Prefabs: {{prefab_naming}}
- Scenes: {{scene_naming}}
examples:
- "Architecture: Component-Based with ScriptableObject data containers"
- "Scripts: PascalCase (PlayerController), Prefabs: Player_Prefab, Scenes: Level_01_Forest"
- 'Architecture: Component-Based with ScriptableObject data containers'
- 'Scripts: PascalCase (PlayerController), Prefabs: Player_Prefab, Scenes: Level_01_Forest'
- id: unity-systems-integration
title: Unity Systems Integration
template: |
@@ -1601,8 +1601,8 @@ sections:
- **Memory Management:** {{memory_strategy}}
- **Build Pipeline:** {{build_automation}}
examples:
- "Input System: Action Maps for Menu/Gameplay contexts with device switching"
- "DOTween: Smooth UI transitions and gameplay animations"
- 'Input System: Action Maps for Menu/Gameplay contexts with device switching'
- 'DOTween: Smooth UI transitions and gameplay animations'
- id: data-management
title: Data Management
template: |
@@ -1625,8 +1625,8 @@ sections:
- **Memory Pools:** {{pooling_objects}}
- **Asset References:** {{asset_reference_system}}
examples:
- "Save Data: JSON format with AES encryption, stored in persistent data path"
- "ScriptableObjects: Game settings, level configurations, character data"
- 'Save Data: JSON format with AES encryption, stored in persistent data path'
- 'ScriptableObjects: Game settings, level configurations, character data'
- id: development-phases
title: Development Phases & Epic Planning
@@ -1638,15 +1638,15 @@ sections:
instruction: Present a high-level list of all phases for user approval. Each phase's design should deliver significant Unity functionality.
type: numbered-list
examples:
- "Phase 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management"
- "Phase 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop"
- "Phase 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression"
- "Phase 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment"
- 'Phase 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management'
- 'Phase 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop'
- 'Phase 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression'
- 'Phase 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment'
- id: phase-1-foundation
title: "Phase 1: Unity Foundation & Core Systems ({{duration}})"
title: 'Phase 1: Unity Foundation & Core Systems ({{duration}})'
sections:
- id: foundation-design
title: "Design: Unity Project Foundation"
title: 'Design: Unity Project Foundation'
type: bullet-list
template: |
- Unity project setup with proper folder structure and naming conventions
@@ -1656,9 +1656,9 @@ sections:
- Development tools setup (debugging, profiling integration)
- Initial build pipeline and platform configuration
examples:
- "Input System: Configure PlayerInput component with Action Maps for movement and UI"
- 'Input System: Configure PlayerInput component with Action Maps for movement and UI'
- id: core-systems-design
title: "Design: Essential Game Systems"
title: 'Design: Essential Game Systems'
type: bullet-list
template: |
- Save/Load system implementation with {{save_format}} format
@@ -1668,10 +1668,10 @@ sections:
- Basic UI framework and canvas configuration
- Settings and configuration management with ScriptableObjects
- id: phase-2-gameplay
title: "Phase 2: Core Gameplay Implementation ({{duration}})"
title: 'Phase 2: Core Gameplay Implementation ({{duration}})'
sections:
- id: gameplay-mechanics-design
title: "Design: Primary Game Mechanics"
title: 'Design: Primary Game Mechanics'
type: bullet-list
template: |
- Player controller with {{movement_type}} movement system
@@ -1681,7 +1681,7 @@ sections:
- Basic collision detection and response systems
- Animation system integration with Animator controllers
- id: level-systems-design
title: "Design: Level & Content Systems"
title: 'Design: Level & Content Systems'
type: bullet-list
template: |
- Scene loading and transition system
@@ -1691,10 +1691,10 @@ sections:
- Collectibles and pickup systems
- Victory/defeat condition implementation
- id: phase-3-polish
title: "Phase 3: Polish & Optimization ({{duration}})"
title: 'Phase 3: Polish & Optimization ({{duration}})'
sections:
- id: performance-design
title: "Design: Performance & Platform Optimization"
title: 'Design: Performance & Platform Optimization'
type: bullet-list
template: |
- Unity Profiler analysis and optimization passes
@@ -1704,7 +1704,7 @@ sections:
- Build size optimization and asset bundling
- Quality settings configuration for different device tiers
- id: user-experience-design
title: "Design: User Experience & Polish"
title: 'Design: User Experience & Polish'
type: bullet-list
template: |
- Complete UI/UX implementation with responsive design
@@ -1729,10 +1729,10 @@ sections:
- Cross Cutting Concerns should flow through epics and stories and not be final stories. For example, adding a logging framework as a last story of an epic, or at the end of a project as a final epic or story would be terrible as we would not have logging from the beginning.
elicit: true
examples:
- "Epic 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management"
- "Epic 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop"
- "Epic 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression"
- "Epic 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment"
- 'Epic 1: Unity Foundation & Core Systems: Project setup, input handling, basic scene management'
- 'Epic 2: Core Game Mechanics: Player controller, physics systems, basic gameplay loop'
- 'Epic 3: Level Systems & Content Pipeline: Scene loading, prefab systems, level progression'
- 'Epic 4: Polish & Platform Optimization: Performance tuning, platform-specific features, deployment'
- id: epic-details
title: Epic {{epic_number}} {{epic_title}}
@@ -1754,13 +1754,13 @@ sections:
- Think "junior developer working for 2-4 hours" - stories must be small, focused, and self-contained
- If a story seems complex, break it down further as long as it can deliver a vertical slice
elicit: true
template: "{{epic_goal}}"
template: '{{epic_goal}}'
sections:
- id: story
title: Story {{epic_number}}.{{story_number}} {{story_title}}
repeatable: true
instruction: Provide a clear, concise description of what this story implements. Focus on the specific game feature or system being built. Reference the GDD section that defines this feature and reference the gamearchitecture section for additional implementation and integration specifics.
template: "{{clear_description_of_what_needs_to_be_implemented}}"
template: '{{clear_description_of_what_needs_to_be_implemented}}'
sections:
- id: acceptance-criteria
title: Acceptance Criteria
@@ -1770,7 +1770,7 @@ sections:
title: Functional Requirements
type: checklist
items:
- "{{specific_functional_requirement}}"
- '{{specific_functional_requirement}}'
- id: technical-requirements
title: Technical Requirements
type: checklist
@@ -1778,14 +1778,14 @@ sections:
- Code follows C# best practices
- Maintains stable frame rate on target devices
- No memory leaks or performance degradation
- "{{specific_technical_requirement}}"
- '{{specific_technical_requirement}}'
- id: game-design-requirements
title: Game Design Requirements
type: checklist
items:
- "{{gameplay_requirement_from_gdd}}"
- "{{balance_requirement_if_applicable}}"
- "{{player_experience_requirement}}"
- '{{gameplay_requirement_from_gdd}}'
- '{{balance_requirement_if_applicable}}'
- '{{player_experience_requirement}}'
- id: success-metrics
title: Success Metrics & Quality Assurance
@@ -1803,8 +1803,8 @@ sections:
- **Build Size:** Final build <{{size_limit}}MB for mobile, <{{desktop_limit}}MB for desktop
- **Battery Life:** Mobile gameplay sessions >{{battery_target}} hours on average device
examples:
- "Frame Rate: Consistent 60 FPS with <5% drops below 45 FPS on target hardware"
- "Crash Rate: <0.5% across iOS/Android, <0.1% on desktop platforms"
- 'Frame Rate: Consistent 60 FPS with <5% drops below 45 FPS on target hardware'
- 'Crash Rate: <0.5% across iOS/Android, <0.1% on desktop platforms'
- id: gameplay-metrics
title: Gameplay & User Engagement Metrics
type: bullet-list
@@ -1816,8 +1816,8 @@ sections:
- **Gameplay Completion:** {{completion_rate}}% complete main game content
- **Control Responsiveness:** Input lag <{{input_lag}}ms on all platforms
examples:
- "Tutorial Completion: 85% of players complete movement and basic mechanics tutorial"
- "Session Duration: Average 15-20 minutes per session for mobile, 30-45 minutes for desktop"
- 'Tutorial Completion: 85% of players complete movement and basic mechanics tutorial'
- 'Session Duration: Average 15-20 minutes per session for mobile, 30-45 minutes for desktop'
- id: platform-specific-metrics
title: Platform-Specific Quality Metrics
type: table
@@ -1862,17 +1862,17 @@ sections:
- Consider cross-platform testing requirements
- Account for Unity build and deployment steps
examples:
- "Foundation stories: Individual Unity systems (Input, Audio, Scene Management) - 1-2 days each"
- "Feature stories: Complete gameplay mechanics with UI and feedback - 2-4 days each"
- 'Foundation stories: Individual Unity systems (Input, Audio, Scene Management) - 1-2 days each'
- 'Feature stories: Complete gameplay mechanics with UI and feedback - 2-4 days each'
- id: recommended-agents
title: Recommended BMad Agent Sequence
type: numbered-list
template: |
1. **{{agent_name}}**: {{agent_responsibility}}
examples:
- "Unity Architect: Create detailed technical architecture document with specific Unity implementation patterns"
- "Unity Developer: Implement core systems and gameplay mechanics according to architecture"
- "QA Tester: Validate performance metrics and cross-platform functionality"
- 'Unity Architect: Create detailed technical architecture document with specific Unity implementation patterns'
- 'Unity Developer: Implement core systems and gameplay mechanics according to architecture'
- 'QA Tester: Validate performance metrics and cross-platform functionality'
==================== END: .bmad-2d-unity-game-dev/templates/game-design-doc-tmpl.yaml ====================
==================== START: .bmad-2d-unity-game-dev/templates/level-design-doc-tmpl.yaml ====================
@@ -1883,7 +1883,7 @@ template:
output:
format: markdown
filename: docs/level-design-document.md
title: "{{game_title}} Level Design Document"
title: '{{game_title}} Level Design Document'
workflow:
mode: interactive
@@ -1944,7 +1944,7 @@ sections:
repeatable: true
sections:
- id: level-category
title: "{{category_name}} Levels"
title: '{{category_name}} Levels'
template: |
**Purpose:** {{gameplay_purpose}}
@@ -2370,7 +2370,7 @@ template:
output:
format: markdown
filename: docs/game-brief.md
title: "{{game_title}} Game Brief"
title: '{{game_title}} Game Brief'
workflow:
mode: interactive
@@ -2656,21 +2656,21 @@ sections:
title: Development Roadmap
sections:
- id: phase-1-preproduction
title: "Phase 1: Pre-Production ({{duration}})"
title: 'Phase 1: Pre-Production ({{duration}})'
type: bullet-list
template: |
- Detailed Game Design Document creation
- Technical architecture planning
- Art style exploration and pipeline setup
- id: phase-2-prototype
title: "Phase 2: Prototype ({{duration}})"
title: 'Phase 2: Prototype ({{duration}})'
type: bullet-list
template: |
- Core mechanic implementation
- Technical proof of concept
- Initial playtesting and iteration
- id: phase-3-production
title: "Phase 3: Production ({{duration}})"
title: 'Phase 3: Production ({{duration}})'
type: bullet-list
template: |
- Full feature development
@@ -3384,7 +3384,7 @@ Use the `shard-doc` task or `@kayvan/markdown-tree-parser` tool for automatic ga
- **Claude Code**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Cursor**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Trae**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Roo Code**: Select mode from mode selector with bmad2du prefix
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select the appropriate game agent.

View File

@@ -514,8 +514,8 @@ template:
version: 3.0
output:
format: markdown
filename: "stories/{{epic_name}}/{{story_id}}-{{story_name}}.md"
title: "Story: {{story_title}}"
filename: 'stories/{{epic_name}}/{{story_id}}-{{story_name}}.md'
title: 'Story: {{story_title}}'
workflow:
mode: interactive
@@ -544,7 +544,7 @@ sections:
- id: description
title: Description
instruction: Provide a clear, concise description of what this story implements. Focus on the specific game feature or system being built. Reference the GDD section that defines this feature.
template: "{{clear_description_of_what_needs_to_be_implemented}}"
template: '{{clear_description_of_what_needs_to_be_implemented}}'
- id: acceptance-criteria
title: Acceptance Criteria
@@ -554,7 +554,7 @@ sections:
title: Functional Requirements
type: checklist
items:
- "{{specific_functional_requirement}}"
- '{{specific_functional_requirement}}'
- id: technical-requirements
title: Technical Requirements
type: checklist
@@ -562,14 +562,14 @@ sections:
- Code follows C# best practices
- Maintains stable frame rate on target devices
- No memory leaks or performance degradation
- "{{specific_technical_requirement}}"
- '{{specific_technical_requirement}}'
- id: game-design-requirements
title: Game Design Requirements
type: checklist
items:
- "{{gameplay_requirement_from_gdd}}"
- "{{balance_requirement_if_applicable}}"
- "{{player_experience_requirement}}"
- '{{gameplay_requirement_from_gdd}}'
- '{{balance_requirement_if_applicable}}'
- '{{player_experience_requirement}}'
- id: technical-specifications
title: Technical Specifications
@@ -744,7 +744,7 @@ sections:
- Performance targets met
- No C# compiler errors or warnings
- Documentation updated
- "{{game_specific_dod_item}}"
- '{{game_specific_dod_item}}'
- id: notes
title: Notes

File diff suppressed because it is too large Load Diff

View File

@@ -530,23 +530,23 @@ template:
output:
format: markdown
filename: docs/infrastructure-architecture.md
title: "{{project_name}} Infrastructure Architecture"
title: '{{project_name}} Infrastructure Architecture'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Infrastructure Architecture Elicitation Actions"
title: 'Infrastructure Architecture Elicitation Actions'
sections:
- id: infrastructure-overview
options:
- "Multi-Cloud Strategy Analysis - Evaluate cloud provider options and vendor lock-in considerations"
- "Regional Distribution Planning - Analyze latency requirements and data residency needs"
- "Environment Isolation Strategy - Design security boundaries and resource segregation"
- "Scalability Patterns Review - Assess auto-scaling needs and traffic patterns"
- "Compliance Requirements Analysis - Review regulatory and security compliance needs"
- "Cost-Benefit Analysis - Compare infrastructure options and TCO"
- "Proceed to next section"
- 'Multi-Cloud Strategy Analysis - Evaluate cloud provider options and vendor lock-in considerations'
- 'Regional Distribution Planning - Analyze latency requirements and data residency needs'
- 'Environment Isolation Strategy - Design security boundaries and resource segregation'
- 'Scalability Patterns Review - Assess auto-scaling needs and traffic patterns'
- 'Compliance Requirements Analysis - Review regulatory and security compliance needs'
- 'Cost-Benefit Analysis - Compare infrastructure options and TCO'
- 'Proceed to next section'
sections:
- id: initial-setup
@@ -606,7 +606,7 @@ sections:
sections:
- id: environments
repeatable: true
title: "{{environment_name}} Environment"
title: '{{environment_name}} Environment'
template: |
- **Purpose:** {{environment_purpose}}
- **Resources:** {{environment_resources}}
@@ -957,24 +957,24 @@ template:
output:
format: markdown
filename: docs/platform-infrastructure/platform-implementation.md
title: "{{project_name}} Platform Infrastructure Implementation"
title: '{{project_name}} Platform Infrastructure Implementation'
workflow:
mode: interactive
elicitation: advanced-elicitation
custom_elicitation:
title: "Platform Implementation Elicitation Actions"
title: 'Platform Implementation Elicitation Actions'
sections:
- id: foundation-infrastructure
options:
- "Platform Layer Security Hardening - Additional security controls and compliance validation"
- "Performance Optimization - Network and resource optimization"
- "Operational Excellence Enhancement - Automation and monitoring improvements"
- "Platform Integration Validation - Verify foundation supports upper layers"
- "Developer Experience Analysis - Foundation impact on developer workflows"
- "Disaster Recovery Testing - Foundation resilience validation"
- "BMAD Workflow Integration - Cross-agent support verification"
- "Finalize and Proceed to Container Platform"
- 'Platform Layer Security Hardening - Additional security controls and compliance validation'
- 'Performance Optimization - Network and resource optimization'
- 'Operational Excellence Enhancement - Automation and monitoring improvements'
- 'Platform Integration Validation - Verify foundation supports upper layers'
- 'Developer Experience Analysis - Foundation impact on developer workflows'
- 'Disaster Recovery Testing - Foundation resilience validation'
- 'BMAD Workflow Integration - Cross-agent support verification'
- 'Finalize and Proceed to Container Platform'
sections:
- id: initial-setup

1702
dist/teams/team-all.txt vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -30,7 +30,7 @@ The Test Architect (Quinn) provides comprehensive quality assurance throughout t
### Quick Command Reference
| **Stage** | **Command** | **Purpose** | **Output** | **Priority** |
|-----------|------------|-------------|------------|--------------|
| ------------------------ | ----------- | --------------------------------------- | --------------------------------------------------------------- | --------------------------- |
| **After Story Approval** | `*risk` | Identify integration & regression risks | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` | High for complex/brownfield |
| | `*design` | Create test strategy for dev | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` | High for new features |
| **During Development** | `*trace` | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` | Medium |
@@ -135,7 +135,7 @@ The Test Architect (Quinn) provides comprehensive quality assurance throughout t
### Understanding Gate Decisions
| **Status** | **Meaning** | **Action Required** | **Can Proceed?** |
|------------|-------------|-------------------|------------------|
| ------------ | -------------------------------------------- | ----------------------- | ---------------- |
| **PASS** | All critical requirements met | None | ✅ Yes |
| **CONCERNS** | Non-critical issues found | Team review recommended | ⚠️ With caution |
| **FAIL** | Critical issues (security, missing P0 tests) | Must fix | ❌ No |
@@ -146,7 +146,7 @@ The Test Architect (Quinn) provides comprehensive quality assurance throughout t
The Test Architect uses risk scoring to prioritize testing:
| **Risk Score** | **Calculation** | **Testing Priority** | **Gate Impact** |
|---------------|----------------|-------------------|----------------|
| -------------- | ------------------------------ | ------------------------- | ------------------------ |
| **9** | High probability × High impact | P0 - Must test thoroughly | FAIL if untested |
| **6** | Medium-high combinations | P1 - Should test well | CONCERNS if gaps |
| **4** | Medium combinations | P1 - Should test | CONCERNS if notable gaps |
@@ -228,7 +228,7 @@ All Test Architect activities create permanent records:
**Should I run Test Architect commands?**
| **Scenario** | **Before Dev** | **During Dev** | **After Dev** |
|-------------|---------------|----------------|---------------|
| ------------------------ | ------------------------------- | ---------------------------- | ---------------------------- |
| **Simple bug fix** | Optional | Optional | Required `*review` |
| **New feature** | Recommended `*risk`, `*design` | Optional `*trace` | Required `*review` |
| **Brownfield change** | **Required** `*risk`, `*design` | Recommended `*trace`, `*nfr` | Required `*review` |

View File

@@ -377,7 +377,7 @@ Manages quality gate decisions:
The Test Architect provides value throughout the entire development lifecycle. Here's when and how to leverage each capability:
| **Stage** | **Command** | **When to Use** | **Value** | **Output** |
|-----------|------------|-----------------|-----------|------------|
| ------------------ | ----------- | ----------------------- | -------------------------- | -------------------------------------------------------------- |
| **Story Drafting** | `*risk` | After SM drafts story | Identify pitfalls early | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` |
| | `*design` | After risk assessment | Guide dev on test strategy | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` |
| **Development** | `*trace` | Mid-implementation | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` |

119
eslint.config.mjs Normal file
View File

@@ -0,0 +1,119 @@
import js from '@eslint/js';
import eslintConfigPrettier from 'eslint-config-prettier/flat';
import nodePlugin from 'eslint-plugin-n';
import unicorn from 'eslint-plugin-unicorn';
import yml from 'eslint-plugin-yml';
export default [
// Global ignores for files/folders that should not be linted
{
ignores: ['dist/**', 'coverage/**', '**/*.min.js'],
},
// Base JavaScript recommended rules
js.configs.recommended,
// Node.js rules
...nodePlugin.configs['flat/mixed-esm-and-cjs'],
// Unicorn rules (modern best practices)
unicorn.configs.recommended,
// YAML linting
...yml.configs['flat/recommended'],
// Place Prettier last to disable conflicting stylistic rules
eslintConfigPrettier,
// Project-specific tweaks
{
rules: {
// Allow console for CLI tools in this repo
'no-console': 'off',
// Enforce .yaml file extension for consistency
'yml/file-extension': [
'error',
{
extension: 'yaml',
caseSensitive: true,
},
],
// Prefer double quotes in YAML wherever quoting is used, but allow the other to avoid escapes
'yml/quotes': [
'error',
{
prefer: 'double',
avoidEscape: true,
},
],
// Relax some Unicorn rules that are too opinionated for this codebase
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-null': 'off',
},
},
// CLI/CommonJS scripts under tools/**
{
files: ['tools/**/*.js'],
rules: {
// Allow CommonJS patterns for Node CLI scripts
'unicorn/prefer-module': 'off',
'unicorn/import-style': 'off',
'unicorn/no-process-exit': 'off',
'n/no-process-exit': 'off',
'unicorn/no-await-expression-member': 'off',
'unicorn/prefer-top-level-await': 'off',
// Avoid failing CI on incidental unused vars in internal scripts
'no-unused-vars': 'off',
// Reduce style-only churn in internal tools
'unicorn/prefer-ternary': 'off',
'unicorn/filename-case': 'off',
'unicorn/no-array-reduce': 'off',
'unicorn/no-array-callback-reference': 'off',
'unicorn/consistent-function-scoping': 'off',
'n/no-extraneous-require': 'off',
'n/no-extraneous-import': 'off',
'n/no-unpublished-require': 'off',
'n/no-unpublished-import': 'off',
// Some scripts intentionally use globals provided at runtime
'no-undef': 'off',
// Additional relaxed rules for legacy/internal scripts
'no-useless-catch': 'off',
'unicorn/prefer-number-properties': 'off',
'no-unreachable': 'off',
},
},
// ESLint config file should not be checked for publish-related Node rules
{
files: ['eslint.config.mjs'],
rules: {
'n/no-unpublished-import': 'off',
},
},
// YAML workflow templates allow empty mapping values intentionally
{
files: ['bmad-core/workflows/**/*.yaml'],
rules: {
'yml/no-empty-mapping-value': 'off',
},
},
// GitHub workflow files in this repo may use empty mapping values
{
files: ['.github/workflows/**/*.yaml'],
rules: {
'yml/no-empty-mapping-value': 'off',
},
},
// Other GitHub YAML files may intentionally use empty values and reserved filenames
{
files: ['.github/**/*.yaml'],
rules: {
'yml/no-empty-mapping-value': 'off',
'unicorn/filename-case': 'off',
},
},
];

View File

@@ -1,26 +1,26 @@
steps:
# Build the container image
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA', '.']
- name: "gcr.io/cloud-builders/docker"
args: ["build", "-t", "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA", "."]
# Push the container image to Container Registry
- name: 'gcr.io/cloud-builders/docker'
args: ['push', 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA']
- name: "gcr.io/cloud-builders/docker"
args: ["push", "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA"]
# Deploy container image to Cloud Run
- name: 'gcr.io/google.com/cloudsdktool/cloud-sdk'
- name: "gcr.io/google.com/cloudsdktool/cloud-sdk"
entrypoint: gcloud
args:
- 'run'
- 'deploy'
- '{{COMPANY_NAME}}-ai-agents'
- '--image'
- 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA'
- '--region'
- '{{LOCATION}}'
- '--platform'
- 'managed'
- '--allow-unauthenticated'
- "run"
- "deploy"
- "{{COMPANY_NAME}}-ai-agents"
- "--image"
- "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA"
- "--region"
- "{{LOCATION}}"
- "--platform"
- "managed"
- "--allow-unauthenticated"
images:
- 'gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA'
- "gcr.io/{{PROJECT_ID}}/{{COMPANY_NAME}}-ai-agents:$COMMIT_SHA"

View File

@@ -60,10 +60,10 @@ commands:
task-execution:
flow: Read story → Implement game feature → Write tests → Pass tests → Update [x] → Next task
updates-ONLY:
- "Checkboxes: [ ] not started | [-] in progress | [x] complete"
- "Debug Log: | Task | File | Change | Reverted? |"
- "Completion Notes: Deviations only, <50 words"
- "Change Log: Requirement changes only"
- 'Checkboxes: [ ] not started | [-] in progress | [x] complete'
- 'Debug Log: | Task | File | Change | Reverted? |'
- 'Completion Notes: Deviations only, <50 words'
- 'Change Log: Requirement changes only'
blocking: Unapproved deps | Ambiguous after story check | 3 failures | Missing game config
done: Game feature works + Tests pass + 60 FPS + No lint errors + Follows Phaser 3 best practices
dependencies:

View File

@@ -27,7 +27,7 @@ activation-instructions:
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
- STAY IN CHARACTER!
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
- "CRITICAL RULE: You are ONLY allowed to create/modify story files - NEVER implement! If asked to implement, tell user they MUST switch to Game Developer Agent"
- 'CRITICAL RULE: You are ONLY allowed to create/modify story files - NEVER implement! If asked to implement, tell user they MUST switch to Game Developer Agent'
agent:
name: Jordan
id: game-sm

View File

@@ -73,7 +73,7 @@ interface GameState {
interface GameSettings {
musicVolume: number;
sfxVolume: number;
difficulty: "easy" | "normal" | "hard";
difficulty: 'easy' | 'normal' | 'hard';
controls: ControlScheme;
}
```
@@ -114,12 +114,12 @@ class GameScene extends Phaser.Scene {
private inputManager!: InputManager;
constructor() {
super({ key: "GameScene" });
super({ key: 'GameScene' });
}
preload(): void {
// Load only scene-specific assets
this.load.image("player", "assets/player.png");
this.load.image('player', 'assets/player.png');
}
create(data: SceneData): void {
@@ -144,7 +144,7 @@ class GameScene extends Phaser.Scene {
this.inputManager.destroy();
// Remove event listeners
this.events.off("*");
this.events.off('*');
}
}
```
@@ -153,13 +153,13 @@ class GameScene extends Phaser.Scene {
```typescript
// Proper scene transitions with data
this.scene.start("NextScene", {
this.scene.start('NextScene', {
playerScore: this.playerScore,
currentLevel: this.currentLevel + 1,
});
// Scene overlays for UI
this.scene.launch("PauseMenuScene");
this.scene.launch('PauseMenuScene');
this.scene.pause();
```
@@ -203,7 +203,7 @@ class Player extends GameEntity {
private health!: HealthComponent;
constructor(scene: Phaser.Scene, x: number, y: number) {
super(scene, x, y, "player");
super(scene, x, y, 'player');
this.movement = this.addComponent(new MovementComponent(this));
this.health = this.addComponent(new HealthComponent(this, 100));
@@ -223,7 +223,7 @@ class GameManager {
constructor(scene: Phaser.Scene) {
if (GameManager.instance) {
throw new Error("GameManager already exists!");
throw new Error('GameManager already exists!');
}
this.scene = scene;
@@ -233,7 +233,7 @@ class GameManager {
static getInstance(): GameManager {
if (!GameManager.instance) {
throw new Error("GameManager not initialized!");
throw new Error('GameManager not initialized!');
}
return GameManager.instance;
}
@@ -280,7 +280,7 @@ class BulletPool {
}
// Pool exhausted - create new bullet
console.warn("Bullet pool exhausted, creating new bullet");
console.warn('Bullet pool exhausted, creating new bullet');
return new Bullet(this.scene, 0, 0);
}
@@ -380,14 +380,12 @@ class InputManager {
}
private setupKeyboard(): void {
this.keys = this.scene.input.keyboard.addKeys(
"W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT",
);
this.keys = this.scene.input.keyboard.addKeys('W,A,S,D,SPACE,ESC,UP,DOWN,LEFT,RIGHT');
}
private setupTouch(): void {
this.scene.input.on("pointerdown", this.handlePointerDown, this);
this.scene.input.on("pointerup", this.handlePointerUp, this);
this.scene.input.on('pointerdown', this.handlePointerDown, this);
this.scene.input.on('pointerup', this.handlePointerUp, this);
}
update(): void {
@@ -414,9 +412,9 @@ class InputManager {
class AssetManager {
loadAssets(): Promise<void> {
return new Promise((resolve, reject) => {
this.scene.load.on("filecomplete", this.handleFileComplete, this);
this.scene.load.on("loaderror", this.handleLoadError, this);
this.scene.load.on("complete", () => resolve());
this.scene.load.on('filecomplete', this.handleFileComplete, this);
this.scene.load.on('loaderror', this.handleLoadError, this);
this.scene.load.on('complete', () => resolve());
this.scene.load.start();
});
@@ -432,8 +430,8 @@ class AssetManager {
private loadFallbackAsset(key: string): void {
// Load placeholder or default assets
switch (key) {
case "player":
this.scene.load.image("player", "assets/defaults/default-player.png");
case 'player':
this.scene.load.image('player', 'assets/defaults/default-player.png');
break;
default:
console.warn(`No fallback for asset: ${key}`);
@@ -460,11 +458,11 @@ class GameSystem {
private attemptRecovery(context: string): void {
switch (context) {
case "update":
case 'update':
// Reset system state
this.reset();
break;
case "render":
case 'render':
// Disable visual effects
this.disableEffects();
break;
@@ -484,7 +482,7 @@ class GameSystem {
```typescript
// Example test for game mechanics
describe("HealthComponent", () => {
describe('HealthComponent', () => {
let healthComponent: HealthComponent;
beforeEach(() => {
@@ -492,18 +490,18 @@ describe("HealthComponent", () => {
healthComponent = new HealthComponent(mockEntity, 100);
});
test("should initialize with correct health", () => {
test('should initialize with correct health', () => {
expect(healthComponent.currentHealth).toBe(100);
expect(healthComponent.maxHealth).toBe(100);
});
test("should handle damage correctly", () => {
test('should handle damage correctly', () => {
healthComponent.takeDamage(25);
expect(healthComponent.currentHealth).toBe(75);
expect(healthComponent.isAlive()).toBe(true);
});
test("should handle death correctly", () => {
test('should handle death correctly', () => {
healthComponent.takeDamage(150);
expect(healthComponent.currentHealth).toBe(0);
expect(healthComponent.isAlive()).toBe(false);
@@ -516,7 +514,7 @@ describe("HealthComponent", () => {
**Scene Testing:**
```typescript
describe("GameScene Integration", () => {
describe('GameScene Integration', () => {
let scene: GameScene;
let mockGame: Phaser.Game;
@@ -526,7 +524,7 @@ describe("GameScene Integration", () => {
scene = new GameScene();
});
test("should initialize all systems", () => {
test('should initialize all systems', () => {
scene.create({});
expect(scene.gameManager).toBeDefined();

View File

@@ -17,21 +17,21 @@ workflow:
- brainstorming_session
- game_research_prompt
- player_research
notes: 'Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/design/ folder.'
notes: "Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project's docs/design/ folder."
- agent: game-designer
creates: game-design-doc.md
requires: game-brief.md
optional_steps:
- competitive_analysis
- technical_research
notes: 'Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project''s docs/design/ folder.'
notes: "Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project's docs/design/ folder."
- agent: game-designer
creates: level-design-doc.md
requires: game-design-doc.md
optional_steps:
- level_prototyping
- difficulty_analysis
notes: 'Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project''s docs/design/ folder.'
notes: "Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project's docs/design/ folder."
- agent: solution-architect
creates: game-architecture.md
requires:
@@ -41,7 +41,7 @@ workflow:
- technical_research_prompt
- performance_analysis
- platform_research
notes: 'Create comprehensive technical architecture using game-architecture-tmpl. Defines Phaser 3 systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project''s docs/architecture/ folder.'
notes: "Create comprehensive technical architecture using game-architecture-tmpl. Defines Phaser 3 systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project's docs/architecture/ folder."
- agent: game-designer
validates: design_consistency
requires: all_design_documents
@@ -66,7 +66,7 @@ workflow:
optional_steps:
- quick_brainstorming
- concept_validation
notes: 'Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/ folder.'
notes: "Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project's docs/ folder."
- agent: game-designer
creates: prototype-design.md
uses: create-doc prototype-design OR create-game-story

View File

@@ -44,7 +44,7 @@ workflow:
notes: Implement stories in priority order. Test frequently and adjust design based on what feels fun. Document discoveries.
workflow_end:
action: prototype_evaluation
notes: 'Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive.'
notes: "Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive."
game_jam_sequence:
- step: jam_concept
agent: game-designer

View File

@@ -61,13 +61,13 @@ commands:
- explain: teach me what and why you did whatever you just did in detail so I can learn. Explain to me as if you were training a junior Unity developer.
- exit: Say goodbye as the Game Developer, and then abandon inhabiting this persona
develop-story:
order-of-execution: "Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete"
order-of-execution: 'Read (first or next) task→Implement Task and its subtasks→Write tests→Execute validations→Only if ALL pass, then update the task checkbox with [x]→Update story section File List to ensure it lists and new or modified or deleted source file→repeat order-of-execution until complete'
story-file-updates-ONLY:
- CRITICAL: ONLY UPDATE THE STORY FILE WITH UPDATES TO SECTIONS INDICATED BELOW. DO NOT MODIFY ANY OTHER SECTIONS.
- CRITICAL: You are ONLY authorized to edit these specific sections of story files - Tasks / Subtasks Checkboxes, Dev Agent Record section and all its subsections, Agent Model Used, Debug Log References, Completion Notes List, File List, Change Log, Status
- CRITICAL: DO NOT modify Status, Story, Acceptance Criteria, Dev Notes, Testing sections, or any other sections not listed above
blocking: "HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression"
ready-for-review: "Code matches requirements + All validations pass + Follows Unity & C# standards + File List complete + Stable FPS"
blocking: 'HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression'
ready-for-review: 'Code matches requirements + All validations pass + Follows Unity & C# standards + File List complete + Stable FPS'
completion: "All Tasks and Subtasks marked [x] and have tests→Validations and full regression passes (DON'T BE LAZY, EXECUTE ALL TESTS and CONFIRM)→Ensure File List is Complete→run the task execute-checklist for the checklist game-story-dod-checklist→set story status: 'Ready for Review'→HALT"
dependencies:
tasks:

View File

@@ -456,7 +456,7 @@ Use the `shard-doc` task or `@kayvan/markdown-tree-parser` tool for automatic ga
- **Claude Code**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Cursor**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Windsurf**: `/bmad2du/game-designer`, `/bmad2du/game-developer`, `/bmad2du/game-sm`, `/bmad2du/game-architect`
- **Trae**: `@bmad2du/game-designer`, `@bmad2du/game-developer`, `@bmad2du/game-sm`, `@bmad2du/game-architect`
- **Roo Code**: Select mode from mode selector with bmad2du prefix
- **GitHub Copilot**: Open the Chat view (`⌃⌘I` on Mac, `Ctrl+Alt+I` on Windows/Linux) and select the appropriate game agent.

View File

@@ -17,21 +17,21 @@ workflow:
- brainstorming_session
- game_research_prompt
- player_research
notes: 'Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/design/ folder.'
notes: "Start with brainstorming game concepts, then create comprehensive game brief. SAVE OUTPUT: Copy final game-brief.md to your project's docs/design/ folder."
- agent: game-designer
creates: game-design-doc.md
requires: game-brief.md
optional_steps:
- competitive_analysis
- technical_research
notes: 'Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project''s docs/design/ folder.'
notes: "Create detailed Game Design Document using game-design-doc-tmpl. Defines all gameplay mechanics, progression, and technical requirements. SAVE OUTPUT: Copy final game-design-doc.md to your project's docs/design/ folder."
- agent: game-designer
creates: level-design-doc.md
requires: game-design-doc.md
optional_steps:
- level_prototyping
- difficulty_analysis
notes: 'Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project''s docs/design/ folder.'
notes: "Create level design framework using level-design-doc-tmpl. Establishes content creation guidelines and performance requirements. SAVE OUTPUT: Copy final level-design-doc.md to your project's docs/design/ folder."
- agent: solution-architect
creates: game-architecture.md
requires:
@@ -41,7 +41,7 @@ workflow:
- technical_research_prompt
- performance_analysis
- platform_research
notes: 'Create comprehensive technical architecture using game-architecture-tmpl. Defines Unity systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project''s docs/architecture/ folder.'
notes: "Create comprehensive technical architecture using game-architecture-tmpl. Defines Unity systems, performance optimization, and code structure. SAVE OUTPUT: Copy final game-architecture.md to your project's docs/architecture/ folder."
- agent: game-designer
validates: design_consistency
requires: all_design_documents
@@ -66,7 +66,7 @@ workflow:
optional_steps:
- quick_brainstorming
- concept_validation
notes: 'Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project''s docs/ folder.'
notes: "Create focused game brief for prototype. Emphasize core mechanics and immediate playability. SAVE OUTPUT: Copy final game-brief.md to your project's docs/ folder."
- agent: game-designer
creates: prototype-design.md
uses: create-doc prototype-design OR create-game-story

View File

@@ -44,7 +44,7 @@ workflow:
notes: Implement stories in priority order. Test frequently in the Unity Editor and adjust design based on what feels fun. Document discoveries.
workflow_end:
action: prototype_evaluation
notes: 'Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive.'
notes: "Prototype complete. Evaluate core mechanics, gather feedback, and decide next steps: iterate, expand, or archive."
game_jam_sequence:
- step: jam_concept
agent: game-designer

1466
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,23 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "bmad-method",
"version": "5.0.0",
"description": "Breakthrough Method of Agile AI-driven Development",
"keywords": [
"agile",
"ai",
"orchestrator",
"development",
"methodology",
"agents",
"bmad"
],
"repository": {
"type": "git",
"url": "git+https://github.com/bmadcode/BMAD-METHOD.git"
},
"license": "MIT",
"author": "Brian (BMad) Madison",
"main": "tools/cli.js",
"bin": {
"bmad": "tools/bmad-npx-wrapper.js",
@@ -11,27 +27,43 @@
"build": "node tools/cli.js build",
"build:agents": "node tools/cli.js build --agents-only",
"build:teams": "node tools/cli.js build --teams-only",
"list:agents": "node tools/cli.js list:agents",
"validate": "node tools/cli.js validate",
"flatten": "node tools/flattener/main.js",
"format": "prettier --write \"**/*.{js,cjs,mjs,json,md,yaml}\"",
"format:check": "prettier --check \"**/*.{js,cjs,mjs,json,md,yaml}\"",
"install:bmad": "node tools/installer/bin/bmad.js install",
"format": "prettier --write \"**/*.md\"",
"version:patch": "node tools/version-bump.js patch",
"version:minor": "node tools/version-bump.js minor",
"version:major": "node tools/version-bump.js major",
"version:expansion": "node tools/bump-expansion-version.js",
"version:expansion:set": "node tools/update-expansion-version.js",
"version:all": "node tools/bump-all-versions.js",
"version:all:minor": "node tools/bump-all-versions.js minor",
"version:all:major": "node tools/bump-all-versions.js major",
"version:all:patch": "node tools/bump-all-versions.js patch",
"version:expansion:all": "node tools/bump-all-versions.js",
"version:expansion:all:minor": "node tools/bump-all-versions.js minor",
"version:expansion:all:major": "node tools/bump-all-versions.js major",
"version:expansion:all:patch": "node tools/bump-all-versions.js patch",
"lint": "eslint . --ext .js,.cjs,.mjs,.yaml --max-warnings=0",
"lint:fix": "eslint . --ext .js,.cjs,.mjs,.yaml --fix",
"list:agents": "node tools/cli.js list:agents",
"prepare": "husky",
"release": "semantic-release",
"release:test": "semantic-release --dry-run --no-ci || echo 'Config test complete - authentication errors are expected locally'",
"prepare": "husky"
"validate": "node tools/cli.js validate",
"version:all": "node tools/bump-all-versions.js",
"version:all:major": "node tools/bump-all-versions.js major",
"version:all:minor": "node tools/bump-all-versions.js minor",
"version:all:patch": "node tools/bump-all-versions.js patch",
"version:expansion": "node tools/bump-expansion-version.js",
"version:expansion:all": "node tools/bump-all-versions.js",
"version:expansion:all:major": "node tools/bump-all-versions.js major",
"version:expansion:all:minor": "node tools/bump-all-versions.js minor",
"version:expansion:all:patch": "node tools/bump-all-versions.js patch",
"version:expansion:set": "node tools/update-expansion-version.js",
"version:major": "node tools/version-bump.js major",
"version:minor": "node tools/version-bump.js minor",
"version:patch": "node tools/version-bump.js patch"
},
"lint-staged": {
"**/*.{js,cjs,mjs}": [
"eslint --fix --max-warnings=0",
"prettier --write"
],
"**/*.yaml": [
"eslint --fix",
"prettier --write"
],
"**/*.{json,md}": [
"prettier --write"
]
},
"dependencies": {
"@kayvan/markdown-tree-parser": "^1.5.0",
@@ -46,37 +78,25 @@
"ora": "^5.4.1",
"semver": "^7.6.3"
},
"keywords": [
"agile",
"ai",
"orchestrator",
"development",
"methodology",
"agents",
"bmad"
],
"author": "Brian (BMad) Madison",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/bmadcode/BMAD-METHOD.git"
},
"engines": {
"node": ">=20.0.0"
},
"devDependencies": {
"@eslint/js": "^9.33.0",
"@semantic-release/changelog": "^6.0.3",
"@semantic-release/git": "^10.0.1",
"eslint": "^9.33.0",
"eslint-config-prettier": "^10.1.8",
"eslint-plugin-n": "^17.21.3",
"eslint-plugin-unicorn": "^60.0.0",
"eslint-plugin-yml": "^1.18.0",
"husky": "^9.1.7",
"jest": "^30.0.4",
"lint-staged": "^16.1.1",
"prettier": "^3.5.3",
"prettier-plugin-packagejson": "^2.5.19",
"semantic-release": "^22.0.0",
"yaml-eslint-parser": "^1.2.3",
"yaml-lint": "^1.7.0"
},
"lint-staged": {
"**/*.md": [
"prettier --write"
]
"engines": {
"node": ">=20.10.0"
}
}

32
prettier.config.mjs Normal file
View File

@@ -0,0 +1,32 @@
export default {
$schema: 'https://json.schemastore.org/prettierrc',
printWidth: 100,
tabWidth: 2,
useTabs: false,
semi: true,
singleQuote: true,
trailingComma: 'all',
bracketSpacing: true,
arrowParens: 'always',
endOfLine: 'lf',
proseWrap: 'preserve',
overrides: [
{
files: ['*.md'],
options: { proseWrap: 'preserve' },
},
{
files: ['*.yaml'],
options: { singleQuote: false },
},
{
files: ['*.json', '*.jsonc'],
options: { singleQuote: false },
},
{
files: ['*.cjs'],
options: { parser: 'babel' },
},
],
plugins: ['prettier-plugin-packagejson'],
};

View File

@@ -5,16 +5,16 @@
* This file ensures proper execution when run via npx from GitHub
*/
const { execSync } = require('child_process');
const path = require('path');
const fs = require('fs');
const { execSync } = require('node:child_process');
const path = require('node:path');
const fs = require('node:fs');
// Check if we're running in an npx temporary directory
const isNpxExecution = __dirname.includes('_npx') || __dirname.includes('.npm');
// If running via npx, we need to handle things differently
if (isNpxExecution) {
const args = process.argv.slice(2);
const arguments_ = process.argv.slice(2);
// Use the installer for all commands
const bmadScriptPath = path.join(__dirname, 'installer', 'bin', 'bmad.js');
@@ -26,9 +26,9 @@ if (isNpxExecution) {
}
try {
execSync(`node "${bmadScriptPath}" ${args.join(' ')}`, {
execSync(`node "${bmadScriptPath}" ${arguments_.join(' ')}`, {
stdio: 'inherit',
cwd: path.dirname(__dirname)
cwd: path.dirname(__dirname),
});
} catch (error) {
process.exit(error.status || 1);

View File

@@ -1,23 +1,23 @@
const fs = require("node:fs").promises;
const path = require("node:path");
const DependencyResolver = require("../lib/dependency-resolver");
const yamlUtils = require("../lib/yaml-utils");
const fs = require('node:fs').promises;
const path = require('node:path');
const DependencyResolver = require('../lib/dependency-resolver');
const yamlUtilities = require('../lib/yaml-utils');
class WebBuilder {
constructor(options = {}) {
this.rootDir = options.rootDir || process.cwd();
this.outputDirs = options.outputDirs || [path.join(this.rootDir, "dist")];
this.outputDirs = options.outputDirs || [path.join(this.rootDir, 'dist')];
this.resolver = new DependencyResolver(this.rootDir);
this.templatePath = path.join(
this.rootDir,
"tools",
"md-assets",
"web-agent-startup-instructions.md"
'tools',
'md-assets',
'web-agent-startup-instructions.md',
);
}
parseYaml(content) {
const yaml = require("js-yaml");
const yaml = require('js-yaml');
return yaml.load(content);
}
@@ -42,11 +42,21 @@ class WebBuilder {
generateWebInstructions(bundleType, packName = null) {
// Generate dynamic web instructions based on bundle type
const rootExample = packName ? `.${packName}` : '.bmad-core';
const examplePath = packName ? `.${packName}/folder/filename.md` : '.bmad-core/folder/filename.md';
const personasExample = packName ? `.${packName}/personas/analyst.md` : '.bmad-core/personas/analyst.md';
const tasksExample = packName ? `.${packName}/tasks/create-story.md` : '.bmad-core/tasks/create-story.md';
const utilsExample = packName ? `.${packName}/utils/template-format.md` : '.bmad-core/utils/template-format.md';
const tasksRef = packName ? `.${packName}/tasks/create-story.md` : '.bmad-core/tasks/create-story.md';
const examplePath = packName
? `.${packName}/folder/filename.md`
: '.bmad-core/folder/filename.md';
const personasExample = packName
? `.${packName}/personas/analyst.md`
: '.bmad-core/personas/analyst.md';
const tasksExample = packName
? `.${packName}/tasks/create-story.md`
: '.bmad-core/tasks/create-story.md';
const utilitiesExample = packName
? `.${packName}/utils/template-format.md`
: '.bmad-core/utils/template-format.md';
const tasksReference = packName
? `.${packName}/tasks/create-story.md`
: '.bmad-core/tasks/create-story.md';
return `# Web Agent Bundle Instructions
@@ -79,8 +89,8 @@ dependencies:
These references map directly to bundle sections:
- \`utils: template-format\` → Look for \`==================== START: ${utilsExample} ====================\`
- \`tasks: create-story\` → Look for \`==================== START: ${tasksRef} ====================\`
- \`utils: template-format\` → Look for \`==================== START: ${utilitiesExample} ====================\`
- \`tasks: create-story\` → Look for \`==================== START: ${tasksReference} ====================\`
3. **Execution Context**: You are operating in a web environment. All your capabilities and knowledge are contained within this bundle. Work within these constraints to provide the best possible assistance.
@@ -112,10 +122,10 @@ These references map directly to bundle sections:
// Write to all output directories
for (const outputDir of this.outputDirs) {
const outputPath = path.join(outputDir, "agents");
const outputPath = path.join(outputDir, 'agents');
await fs.mkdir(outputPath, { recursive: true });
const outputFile = path.join(outputPath, `${agentId}.txt`);
await fs.writeFile(outputFile, bundle, "utf8");
await fs.writeFile(outputFile, bundle, 'utf8');
}
}
@@ -131,10 +141,10 @@ These references map directly to bundle sections:
// Write to all output directories
for (const outputDir of this.outputDirs) {
const outputPath = path.join(outputDir, "teams");
const outputPath = path.join(outputDir, 'teams');
await fs.mkdir(outputPath, { recursive: true });
const outputFile = path.join(outputPath, `${teamId}.txt`);
await fs.writeFile(outputFile, bundle, "utf8");
await fs.writeFile(outputFile, bundle, 'utf8');
}
}
@@ -157,7 +167,7 @@ These references map directly to bundle sections:
sections.push(this.formatSection(resourcePath, resource.content, 'bmad-core'));
}
return sections.join("\n");
return sections.join('\n');
}
async buildTeamBundle(teamId) {
@@ -182,12 +192,12 @@ These references map directly to bundle sections:
sections.push(this.formatSection(resourcePath, resource.content, 'bmad-core'));
}
return sections.join("\n");
return sections.join('\n');
}
processAgentContent(content) {
// First, replace content before YAML with the template
const yamlContent = yamlUtils.extractYamlFromAgent(content);
const yamlContent = yamlUtilities.extractYamlFromAgent(content);
if (!yamlContent) return content;
const yamlMatch = content.match(/```ya?ml\n([\s\S]*?)\n```/);
@@ -198,24 +208,24 @@ These references map directly to bundle sections:
// Parse YAML and remove root and IDE-FILE-RESOLUTION properties
try {
const yaml = require("js-yaml");
const yaml = require('js-yaml');
const parsed = yaml.load(yamlContent);
// Remove the properties if they exist at root level
delete parsed.root;
delete parsed["IDE-FILE-RESOLUTION"];
delete parsed["REQUEST-RESOLUTION"];
delete parsed['IDE-FILE-RESOLUTION'];
delete parsed['REQUEST-RESOLUTION'];
// Also remove from activation-instructions if they exist
if (parsed["activation-instructions"] && Array.isArray(parsed["activation-instructions"])) {
parsed["activation-instructions"] = parsed["activation-instructions"].filter(
if (parsed['activation-instructions'] && Array.isArray(parsed['activation-instructions'])) {
parsed['activation-instructions'] = parsed['activation-instructions'].filter(
(instruction) => {
return (
typeof instruction === 'string' &&
!instruction.startsWith("IDE-FILE-RESOLUTION:") &&
!instruction.startsWith("REQUEST-RESOLUTION:")
!instruction.startsWith('IDE-FILE-RESOLUTION:') &&
!instruction.startsWith('REQUEST-RESOLUTION:')
);
}
},
);
}
@@ -223,25 +233,25 @@ These references map directly to bundle sections:
const cleanedYaml = yaml.dump(parsed, { lineWidth: -1 });
// Get the agent name from the YAML for the header
const agentName = parsed.agent?.id || "agent";
const agentName = parsed.agent?.id || 'agent';
// Build the new content with just the agent header and YAML
const newHeader = `# ${agentName}\n\nCRITICAL: Read the full YAML, start activation to alter your state of being, follow startup section instructions, stay in this being until told to exit this mode:\n\n`;
const afterYaml = content.substring(yamlEndIndex);
const afterYaml = content.slice(Math.max(0, yamlEndIndex));
return newHeader + "```yaml\n" + cleanedYaml.trim() + "\n```" + afterYaml;
return newHeader + '```yaml\n' + cleanedYaml.trim() + '\n```' + afterYaml;
} catch (error) {
console.warn("Failed to process agent YAML:", error.message);
console.warn('Failed to process agent YAML:', error.message);
// If parsing fails, return original content
return content;
}
}
formatSection(path, content, bundleRoot = 'bmad-core') {
const separator = "====================";
const separator = '====================';
// Process agent content if this is an agent file
if (path.includes("/agents/")) {
if (path.includes('/agents/')) {
content = this.processAgentContent(content);
}
@@ -252,17 +262,17 @@ These references map directly to bundle sections:
`${separator} START: ${path} ${separator}`,
content.trim(),
`${separator} END: ${path} ${separator}`,
"",
].join("\n");
'',
].join('\n');
}
replaceRootReferences(content, bundleRoot) {
// Replace {root} with the appropriate bundle root path
return content.replace(/\{root\}/g, `.${bundleRoot}`);
return content.replaceAll('{root}', `.${bundleRoot}`);
}
async validate() {
console.log("Validating agent configurations...");
console.log('Validating agent configurations...');
const agents = await this.resolver.listAgents();
for (const agentId of agents) {
try {
@@ -274,7 +284,7 @@ These references map directly to bundle sections:
}
}
console.log("\nValidating team configurations...");
console.log('\nValidating team configurations...');
const teams = await this.resolver.listTeams();
for (const teamId of teams) {
try {
@@ -299,54 +309,54 @@ These references map directly to bundle sections:
}
async buildExpansionPack(packName, options = {}) {
const packDir = path.join(this.rootDir, "expansion-packs", packName);
const outputDirs = [path.join(this.rootDir, "dist", "expansion-packs", packName)];
const packDir = path.join(this.rootDir, 'expansion-packs', packName);
const outputDirectories = [path.join(this.rootDir, 'dist', 'expansion-packs', packName)];
// Clean output directories if requested
if (options.clean !== false) {
for (const outputDir of outputDirs) {
for (const outputDir of outputDirectories) {
try {
await fs.rm(outputDir, { recursive: true, force: true });
} catch (error) {
} catch {
// Directory might not exist, that's fine
}
}
}
// Build individual agents first
const agentsDir = path.join(packDir, "agents");
const agentsDir = path.join(packDir, 'agents');
try {
const agentFiles = await fs.readdir(agentsDir);
const agentMarkdownFiles = agentFiles.filter((f) => f.endsWith(".md"));
const agentMarkdownFiles = agentFiles.filter((f) => f.endsWith('.md'));
if (agentMarkdownFiles.length > 0) {
console.log(` Building individual agents for ${packName}:`);
for (const agentFile of agentMarkdownFiles) {
const agentName = agentFile.replace(".md", "");
const agentName = agentFile.replace('.md', '');
console.log(` - ${agentName}`);
// Build individual agent bundle
const bundle = await this.buildExpansionAgentBundle(packName, packDir, agentName);
// Write to all output directories
for (const outputDir of outputDirs) {
const agentsOutputDir = path.join(outputDir, "agents");
for (const outputDir of outputDirectories) {
const agentsOutputDir = path.join(outputDir, 'agents');
await fs.mkdir(agentsOutputDir, { recursive: true });
const outputFile = path.join(agentsOutputDir, `${agentName}.txt`);
await fs.writeFile(outputFile, bundle, "utf8");
await fs.writeFile(outputFile, bundle, 'utf8');
}
}
}
} catch (error) {
} catch {
console.debug(` No agents directory found for ${packName}`);
}
// Build team bundle
const agentTeamsDir = path.join(packDir, "agent-teams");
const agentTeamsDir = path.join(packDir, 'agent-teams');
try {
const teamFiles = await fs.readdir(agentTeamsDir);
const teamFile = teamFiles.find((f) => f.endsWith(".yaml"));
const teamFile = teamFiles.find((f) => f.endsWith('.yaml'));
if (teamFile) {
console.log(` Building team bundle for ${packName}`);
@@ -356,17 +366,17 @@ These references map directly to bundle sections:
const bundle = await this.buildExpansionTeamBundle(packName, packDir, teamConfigPath);
// Write to all output directories
for (const outputDir of outputDirs) {
const teamsOutputDir = path.join(outputDir, "teams");
for (const outputDir of outputDirectories) {
const teamsOutputDir = path.join(outputDir, 'teams');
await fs.mkdir(teamsOutputDir, { recursive: true });
const outputFile = path.join(teamsOutputDir, teamFile.replace(".yaml", ".txt"));
await fs.writeFile(outputFile, bundle, "utf8");
const outputFile = path.join(teamsOutputDir, teamFile.replace('.yaml', '.txt'));
await fs.writeFile(outputFile, bundle, 'utf8');
console.log(` ✓ Created bundle: ${path.relative(this.rootDir, outputFile)}`);
}
} else {
console.warn(` ⚠ No team configuration found in ${packName}/agent-teams/`);
}
} catch (error) {
} catch {
console.warn(` ⚠ No agent-teams directory found for ${packName}`);
}
}
@@ -376,16 +386,16 @@ These references map directly to bundle sections:
const sections = [template];
// Add agent configuration
const agentPath = path.join(packDir, "agents", `${agentName}.md`);
const agentContent = await fs.readFile(agentPath, "utf8");
const agentPath = path.join(packDir, 'agents', `${agentName}.md`);
const agentContent = await fs.readFile(agentPath, 'utf8');
const agentWebPath = this.convertToWebPath(agentPath, packName);
sections.push(this.formatSection(agentWebPath, agentContent, packName));
// Resolve and add agent dependencies
const yamlContent = yamlUtils.extractYamlFromAgent(agentContent);
const yamlContent = yamlUtilities.extractYamlFromAgent(agentContent);
if (yamlContent) {
try {
const yaml = require("js-yaml");
const yaml = require('js-yaml');
const agentConfig = yaml.load(yamlContent);
if (agentConfig.dependencies) {
@@ -398,59 +408,43 @@ These references map directly to bundle sections:
// Try expansion pack first
const resourcePath = path.join(packDir, resourceType, resourceName);
try {
const resourceContent = await fs.readFile(resourcePath, "utf8");
const resourceContent = await fs.readFile(resourcePath, 'utf8');
const resourceWebPath = this.convertToWebPath(resourcePath, packName);
sections.push(
this.formatSection(resourceWebPath, resourceContent, packName)
);
sections.push(this.formatSection(resourceWebPath, resourceContent, packName));
found = true;
} catch (error) {
} catch {
// Not in expansion pack, continue
}
// If not found in expansion pack, try core
if (!found) {
const corePath = path.join(
this.rootDir,
"bmad-core",
resourceType,
resourceName
);
const corePath = path.join(this.rootDir, 'bmad-core', resourceType, resourceName);
try {
const coreContent = await fs.readFile(corePath, "utf8");
const coreContent = await fs.readFile(corePath, 'utf8');
const coreWebPath = this.convertToWebPath(corePath, packName);
sections.push(
this.formatSection(coreWebPath, coreContent, packName)
);
sections.push(this.formatSection(coreWebPath, coreContent, packName));
found = true;
} catch (error) {
} catch {
// Not in core either, continue
}
}
// If not found in core, try common folder
if (!found) {
const commonPath = path.join(
this.rootDir,
"common",
resourceType,
resourceName
);
const commonPath = path.join(this.rootDir, 'common', resourceType, resourceName);
try {
const commonContent = await fs.readFile(commonPath, "utf8");
const commonContent = await fs.readFile(commonPath, 'utf8');
const commonWebPath = this.convertToWebPath(commonPath, packName);
sections.push(
this.formatSection(commonWebPath, commonContent, packName)
);
sections.push(this.formatSection(commonWebPath, commonContent, packName));
found = true;
} catch (error) {
} catch {
// Not in common either, continue
}
}
if (!found) {
console.warn(
` ⚠ Dependency ${resourceType}#${resourceName} not found in expansion pack or core`
` ⚠ Dependency ${resourceType}#${resourceName} not found in expansion pack or core`,
);
}
}
@@ -462,7 +456,7 @@ These references map directly to bundle sections:
}
}
return sections.join("\n");
return sections.join('\n');
}
async buildExpansionTeamBundle(packName, packDir, teamConfigPath) {
@@ -471,38 +465,38 @@ These references map directly to bundle sections:
const sections = [template];
// Add team configuration and parse to get agent list
const teamContent = await fs.readFile(teamConfigPath, "utf8");
const teamFileName = path.basename(teamConfigPath, ".yaml");
const teamContent = await fs.readFile(teamConfigPath, 'utf8');
const teamFileName = path.basename(teamConfigPath, '.yaml');
const teamConfig = this.parseYaml(teamContent);
const teamWebPath = this.convertToWebPath(teamConfigPath, packName);
sections.push(this.formatSection(teamWebPath, teamContent, packName));
// Get list of expansion pack agents
const expansionAgents = new Set();
const agentsDir = path.join(packDir, "agents");
const agentsDir = path.join(packDir, 'agents');
try {
const agentFiles = await fs.readdir(agentsDir);
for (const agentFile of agentFiles.filter((f) => f.endsWith(".md"))) {
const agentName = agentFile.replace(".md", "");
for (const agentFile of agentFiles.filter((f) => f.endsWith('.md'))) {
const agentName = agentFile.replace('.md', '');
expansionAgents.add(agentName);
}
} catch (error) {
} catch {
console.warn(` ⚠ No agents directory found in ${packName}`);
}
// Build a map of all available expansion pack resources for override checking
const expansionResources = new Map();
const resourceDirs = ["templates", "tasks", "checklists", "workflows", "data"];
for (const resourceDir of resourceDirs) {
const resourceDirectories = ['templates', 'tasks', 'checklists', 'workflows', 'data'];
for (const resourceDir of resourceDirectories) {
const resourcePath = path.join(packDir, resourceDir);
try {
const resourceFiles = await fs.readdir(resourcePath);
for (const resourceFile of resourceFiles.filter(
(f) => f.endsWith(".md") || f.endsWith(".yaml")
(f) => f.endsWith('.md') || f.endsWith('.yaml'),
)) {
expansionResources.set(`${resourceDir}#${resourceFile}`, true);
}
} catch (error) {
} catch {
// Directory might not exist, that's fine
}
}
@@ -511,9 +505,9 @@ These references map directly to bundle sections:
const agentsToProcess = teamConfig.agents || [];
// Ensure bmad-orchestrator is always included for teams
if (!agentsToProcess.includes("bmad-orchestrator")) {
if (!agentsToProcess.includes('bmad-orchestrator')) {
console.warn(` ⚠ Team ${teamFileName} missing bmad-orchestrator, adding automatically`);
agentsToProcess.unshift("bmad-orchestrator");
agentsToProcess.unshift('bmad-orchestrator');
}
// Track all dependencies from all agents (deduplicated)
@@ -523,7 +517,7 @@ These references map directly to bundle sections:
if (expansionAgents.has(agentId)) {
// Use expansion pack version (override)
const agentPath = path.join(agentsDir, `${agentId}.md`);
const agentContent = await fs.readFile(agentPath, "utf8");
const agentContent = await fs.readFile(agentPath, 'utf8');
const expansionAgentWebPath = this.convertToWebPath(agentPath, packName);
sections.push(this.formatSection(expansionAgentWebPath, agentContent, packName));
@@ -551,13 +545,13 @@ These references map directly to bundle sections:
} else {
// Use core BMad version
try {
const coreAgentPath = path.join(this.rootDir, "bmad-core", "agents", `${agentId}.md`);
const coreAgentContent = await fs.readFile(coreAgentPath, "utf8");
const coreAgentPath = path.join(this.rootDir, 'bmad-core', 'agents', `${agentId}.md`);
const coreAgentContent = await fs.readFile(coreAgentPath, 'utf8');
const coreAgentWebPath = this.convertToWebPath(coreAgentPath, packName);
sections.push(this.formatSection(coreAgentWebPath, coreAgentContent, packName));
// Parse and collect dependencies from core agent
const yamlContent = yamlUtils.extractYamlFromAgent(coreAgentContent, true);
const yamlContent = yamlUtilities.extractYamlFromAgent(coreAgentContent, true);
if (yamlContent) {
try {
const agentConfig = this.parseYaml(yamlContent);
@@ -577,7 +571,7 @@ These references map directly to bundle sections:
console.debug(`Failed to parse agent YAML for ${agentId}:`, error.message);
}
}
} catch (error) {
} catch {
console.warn(` ⚠ Agent ${agentId} not found in core or expansion pack`);
}
}
@@ -593,38 +587,38 @@ These references map directly to bundle sections:
// We know it exists in expansion pack, find and load it
const expansionPath = path.join(packDir, dep.type, dep.name);
try {
const content = await fs.readFile(expansionPath, "utf8");
const content = await fs.readFile(expansionPath, 'utf8');
const expansionWebPath = this.convertToWebPath(expansionPath, packName);
sections.push(this.formatSection(expansionWebPath, content, packName));
console.log(` ✓ Using expansion override for ${key}`);
found = true;
} catch (error) {
} catch {
// Try next extension
}
}
// If not found in expansion pack (or doesn't exist there), try core
if (!found) {
const corePath = path.join(this.rootDir, "bmad-core", dep.type, dep.name);
const corePath = path.join(this.rootDir, 'bmad-core', dep.type, dep.name);
try {
const content = await fs.readFile(corePath, "utf8");
const content = await fs.readFile(corePath, 'utf8');
const coreWebPath = this.convertToWebPath(corePath, packName);
sections.push(this.formatSection(coreWebPath, content, packName));
found = true;
} catch (error) {
} catch {
// Not in core either, continue
}
}
// If not found in core, try common folder
if (!found) {
const commonPath = path.join(this.rootDir, "common", dep.type, dep.name);
const commonPath = path.join(this.rootDir, 'common', dep.type, dep.name);
try {
const content = await fs.readFile(commonPath, "utf8");
const content = await fs.readFile(commonPath, 'utf8');
const commonWebPath = this.convertToWebPath(commonPath, packName);
sections.push(this.formatSection(commonWebPath, content, packName));
found = true;
} catch (error) {
} catch {
// Not in common either, continue
}
}
@@ -635,16 +629,16 @@ These references map directly to bundle sections:
}
// Add remaining expansion pack resources not already included as dependencies
for (const resourceDir of resourceDirs) {
for (const resourceDir of resourceDirectories) {
const resourcePath = path.join(packDir, resourceDir);
try {
const resourceFiles = await fs.readdir(resourcePath);
for (const resourceFile of resourceFiles.filter(
(f) => f.endsWith(".md") || f.endsWith(".yaml")
(f) => f.endsWith('.md') || f.endsWith('.yaml'),
)) {
const filePath = path.join(resourcePath, resourceFile);
const fileContent = await fs.readFile(filePath, "utf8");
const fileName = resourceFile.replace(/\.(md|yaml)$/, "");
const fileContent = await fs.readFile(filePath, 'utf8');
const fileName = resourceFile.replace(/\.(md|yaml)$/, '');
// Only add if not already included as a dependency
const resourceKey = `${resourceDir}#${fileName}`;
@@ -654,21 +648,21 @@ These references map directly to bundle sections:
sections.push(this.formatSection(resourceWebPath, fileContent, packName));
}
}
} catch (error) {
} catch {
// Directory might not exist, that's fine
}
}
return sections.join("\n");
return sections.join('\n');
}
async listExpansionPacks() {
const expansionPacksDir = path.join(this.rootDir, "expansion-packs");
const expansionPacksDir = path.join(this.rootDir, 'expansion-packs');
try {
const entries = await fs.readdir(expansionPacksDir, { withFileTypes: true });
return entries.filter((entry) => entry.isDirectory()).map((entry) => entry.name);
} catch (error) {
console.warn("No expansion-packs directory found");
} catch {
console.warn('No expansion-packs directory found');
return [];
}
}

View File

@@ -1,11 +1,9 @@
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
const fs = require('node:fs');
const path = require('node:path');
const yaml = require('js-yaml');
const args = process.argv.slice(2);
const bumpType = args[0] || 'minor'; // default to minor
const arguments_ = process.argv.slice(2);
const bumpType = arguments_[0] || 'minor'; // default to minor
if (!['major', 'minor', 'patch'].includes(bumpType)) {
console.log('Usage: node bump-all-versions.js [major|minor|patch]');
@@ -17,15 +15,19 @@ function bumpVersion(currentVersion, type) {
const [major, minor, patch] = currentVersion.split('.').map(Number);
switch (type) {
case 'major':
case 'major': {
return `${major + 1}.0.0`;
case 'minor':
}
case 'minor': {
return `${major}.${minor + 1}.0`;
case 'patch':
}
case 'patch': {
return `${major}.${minor}.${patch + 1}`;
default:
}
default: {
return currentVersion;
}
}
}
async function bumpAllVersions() {
@@ -43,7 +45,12 @@ async function bumpAllVersions() {
fs.writeFileSync(packagePath, JSON.stringify(packageJson, null, 2) + '\n');
updatedItems.push({ type: 'core', name: 'BMad Core', oldVersion: oldCoreVersion, newVersion: newCoreVersion });
updatedItems.push({
type: 'core',
name: 'BMad Core',
oldVersion: oldCoreVersion,
newVersion: newCoreVersion,
});
console.log(`✓ BMad Core (package.json): ${oldCoreVersion}${newCoreVersion}`);
} catch (error) {
console.error(`✗ Failed to update BMad Core: ${error.message}`);
@@ -74,7 +81,6 @@ async function bumpAllVersions() {
updatedItems.push({ type: 'expansion', name: packId, oldVersion, newVersion });
console.log(`${packId}: ${oldVersion}${newVersion}`);
} catch (error) {
console.error(`✗ Failed to update ${packId}: ${error.message}`);
}
@@ -83,20 +89,23 @@ async function bumpAllVersions() {
}
if (updatedItems.length > 0) {
const coreCount = updatedItems.filter(i => i.type === 'core').length;
const expansionCount = updatedItems.filter(i => i.type === 'expansion').length;
const coreCount = updatedItems.filter((index) => index.type === 'core').length;
const expansionCount = updatedItems.filter((index) => index.type === 'expansion').length;
console.log(`\n✓ Successfully bumped ${updatedItems.length} item(s) with ${bumpType} version bump`);
console.log(
`\n✓ Successfully bumped ${updatedItems.length} item(s) with ${bumpType} version bump`,
);
if (coreCount > 0) console.log(` - ${coreCount} core`);
if (expansionCount > 0) console.log(` - ${expansionCount} expansion pack(s)`);
console.log('\nNext steps:');
console.log('1. Test the changes');
console.log('2. Commit: git add -A && git commit -m "chore: bump all versions (' + bumpType + ')"');
console.log(
'2. Commit: git add -A && git commit -m "chore: bump all versions (' + bumpType + ')"',
);
} else {
console.log('No items found to update');
}
} catch (error) {
console.error('Error reading expansion packs directory:', error.message);
process.exit(1);

View File

@@ -1,17 +1,15 @@
#!/usr/bin/env node
// Load required modules
const fs = require('fs');
const path = require('path');
const fs = require('node:fs');
const path = require('node:path');
const yaml = require('js-yaml');
// Parse CLI arguments
const args = process.argv.slice(2);
const packId = args[0];
const bumpType = args[1] || 'minor';
const arguments_ = process.argv.slice(2);
const packId = arguments_[0];
const bumpType = arguments_[1] || 'minor';
// Validate arguments
if (!packId || args.length > 2) {
if (!packId || arguments_.length > 2) {
console.log('Usage: node bump-expansion-version.js <expansion-pack-id> [major|minor|patch]');
console.log('Default: minor');
console.log('Example: node bump-expansion-version.js bmad-creator-tools patch');
@@ -28,10 +26,18 @@ function bumpVersion(currentVersion, type) {
const [major, minor, patch] = currentVersion.split('.').map(Number);
switch (type) {
case 'major': return `${major + 1}.0.0`;
case 'minor': return `${major}.${minor + 1}.0`;
case 'patch': return `${major}.${minor}.${patch + 1}`;
default: return currentVersion;
case 'major': {
return `${major + 1}.0.0`;
}
case 'minor': {
return `${major}.${minor + 1}.0`;
}
case 'patch': {
return `${major}.${minor}.${patch + 1}`;
}
default: {
return currentVersion;
}
}
}
@@ -47,11 +53,11 @@ async function updateVersion() {
const packsDir = path.join(__dirname, '..', 'expansion-packs');
const entries = fs.readdirSync(packsDir, { withFileTypes: true });
entries.forEach(entry => {
for (const entry of entries) {
if (entry.isDirectory() && !entry.name.startsWith('.')) {
console.log(` - ${entry.name}`);
}
});
}
process.exit(1);
}
@@ -72,8 +78,9 @@ async function updateVersion() {
console.log(`\n✓ Successfully bumped ${packId} with ${bumpType} version bump`);
console.log('\nNext steps:');
console.log(`1. Test the changes`);
console.log(`2. Commit: git add -A && git commit -m "chore: bump ${packId} version (${bumpType})"`);
console.log(
`2. Commit: git add -A && git commit -m "chore: bump ${packId} version (${bumpType})"`,
);
} catch (error) {
console.error('Error updating version:', error.message);
process.exit(1);

View File

@@ -1,10 +1,8 @@
#!/usr/bin/env node
const { Command } = require('commander');
const WebBuilder = require('./builders/web-builder');
const V3ToV4Upgrader = require('./upgraders/v3-to-v4-upgrader');
const IdeSetup = require('./installer/lib/ide-setup');
const path = require('path');
const path = require('node:path');
const program = new Command();
@@ -23,7 +21,7 @@ program
.option('--no-clean', 'Skip cleaning output directories')
.action(async (options) => {
const builder = new WebBuilder({
rootDir: process.cwd()
rootDir: process.cwd(),
});
try {
@@ -66,7 +64,7 @@ program
.option('--no-clean', 'Skip cleaning output directories')
.action(async (options) => {
const builder = new WebBuilder({
rootDir: process.cwd()
rootDir: process.cwd(),
});
try {
@@ -92,7 +90,7 @@ program
const builder = new WebBuilder({ rootDir: process.cwd() });
const agents = await builder.resolver.listAgents();
console.log('Available agents:');
agents.forEach(agent => console.log(` - ${agent}`));
for (const agent of agents) console.log(` - ${agent}`);
process.exit(0);
});
@@ -103,7 +101,7 @@ program
const builder = new WebBuilder({ rootDir: process.cwd() });
const expansions = await builder.listExpansionPacks();
console.log('Available expansion packs:');
expansions.forEach(expansion => console.log(` - ${expansion}`));
for (const expansion of expansions) console.log(` - ${expansion}`);
process.exit(0);
});
@@ -147,7 +145,7 @@ program
await upgrader.upgrade({
projectPath: options.project,
dryRun: options.dryRun,
backup: options.backup
backup: options.backup,
});
});

View File

@@ -1,7 +1,7 @@
const fs = require("fs-extra");
const path = require("node:path");
const os = require("node:os");
const { isBinaryFile } = require("./binary.js");
const fs = require('fs-extra');
const path = require('node:path');
const os = require('node:os');
const { isBinaryFile } = require('./binary.js');
/**
* Aggregate file contents with bounded concurrency.
@@ -22,7 +22,7 @@ async function aggregateFileContents(files, rootDir, spinner = null) {
// Automatic concurrency selection based on CPU count and workload size.
// - Base on 2x logical CPUs, clamped to [2, 64]
// - For very small workloads, avoid excessive parallelism
const cpuCount = (os.cpus && Array.isArray(os.cpus()) ? os.cpus().length : (os.cpus?.length || 4));
const cpuCount = os.cpus && Array.isArray(os.cpus()) ? os.cpus().length : os.cpus?.length || 4;
let concurrency = Math.min(64, Math.max(2, (Number(cpuCount) || 4) * 2));
if (files.length > 0 && files.length < concurrency) {
concurrency = Math.max(1, Math.min(concurrency, Math.ceil(files.length / 2)));
@@ -37,16 +37,16 @@ async function aggregateFileContents(files, rootDir, spinner = null) {
const binary = await isBinaryFile(filePath);
if (binary) {
const size = (await fs.stat(filePath)).size;
const { size } = await fs.stat(filePath);
results.binaryFiles.push({ path: relativePath, absolutePath: filePath, size });
} else {
const content = await fs.readFile(filePath, "utf8");
const content = await fs.readFile(filePath, 'utf8');
results.textFiles.push({
path: relativePath,
absolutePath: filePath,
content,
size: content.length,
lines: content.split("\n").length,
lines: content.split('\n').length,
});
}
} catch (error) {
@@ -63,8 +63,8 @@ async function aggregateFileContents(files, rootDir, spinner = null) {
}
}
for (let i = 0; i < files.length; i += concurrency) {
const slice = files.slice(i, i + concurrency);
for (let index = 0; index < files.length; index += concurrency) {
const slice = files.slice(index, index + concurrency);
await Promise.all(slice.map(processOne));
}

View File

@@ -1,6 +1,6 @@
const fsp = require("node:fs/promises");
const path = require("node:path");
const { Buffer } = require("node:buffer");
const fsp = require('node:fs/promises');
const path = require('node:path');
const { Buffer } = require('node:buffer');
/**
* Efficiently determine if a file is binary without reading the whole file.
@@ -13,25 +13,54 @@ async function isBinaryFile(filePath) {
try {
const stats = await fsp.stat(filePath);
if (stats.isDirectory()) {
throw new Error("EISDIR: illegal operation on a directory");
throw new Error('EISDIR: illegal operation on a directory');
}
const binaryExtensions = new Set([
".jpg", ".jpeg", ".png", ".gif", ".bmp", ".ico", ".svg",
".pdf", ".doc", ".docx", ".xls", ".xlsx", ".ppt", ".pptx",
".zip", ".tar", ".gz", ".rar", ".7z",
".exe", ".dll", ".so", ".dylib",
".mp3", ".mp4", ".avi", ".mov", ".wav",
".ttf", ".otf", ".woff", ".woff2",
".bin", ".dat", ".db", ".sqlite",
'.jpg',
'.jpeg',
'.png',
'.gif',
'.bmp',
'.ico',
'.svg',
'.pdf',
'.doc',
'.docx',
'.xls',
'.xlsx',
'.ppt',
'.pptx',
'.zip',
'.tar',
'.gz',
'.rar',
'.7z',
'.exe',
'.dll',
'.so',
'.dylib',
'.mp3',
'.mp4',
'.avi',
'.mov',
'.wav',
'.ttf',
'.otf',
'.woff',
'.woff2',
'.bin',
'.dat',
'.db',
'.sqlite',
]);
const ext = path.extname(filePath).toLowerCase();
if (binaryExtensions.has(ext)) return true;
const extension = path.extname(filePath).toLowerCase();
if (binaryExtensions.has(extension)) return true;
if (stats.size === 0) return false;
const sampleSize = Math.min(4096, stats.size);
const fd = await fsp.open(filePath, "r");
const fd = await fsp.open(filePath, 'r');
try {
const buffer = Buffer.allocUnsafe(sampleSize);
const { bytesRead } = await fd.read(buffer, 0, sampleSize, 0);
@@ -41,9 +70,7 @@ async function isBinaryFile(filePath) {
await fd.close();
}
} catch (error) {
console.warn(
`Warning: Could not determine if file is binary: ${filePath} - ${error.message}`,
);
console.warn(`Warning: Could not determine if file is binary: ${filePath} - ${error.message}`);
return false;
}
}

View File

@@ -1,18 +1,21 @@
const path = require("node:path");
const { execFile } = require("node:child_process");
const { promisify } = require("node:util");
const { glob } = require("glob");
const { loadIgnore } = require("./ignoreRules.js");
const path = require('node:path');
const { execFile } = require('node:child_process');
const { promisify } = require('node:util');
const { glob } = require('glob');
const { loadIgnore } = require('./ignoreRules.js');
const pExecFile = promisify(execFile);
async function isGitRepo(rootDir) {
try {
const { stdout } = await pExecFile("git", [
"rev-parse",
"--is-inside-work-tree",
], { cwd: rootDir });
return String(stdout || "").toString().trim() === "true";
const { stdout } = await pExecFile('git', ['rev-parse', '--is-inside-work-tree'], {
cwd: rootDir,
});
return (
String(stdout || '')
.toString()
.trim() === 'true'
);
} catch {
return false;
}
@@ -20,12 +23,10 @@ async function isGitRepo(rootDir) {
async function gitListFiles(rootDir) {
try {
const { stdout } = await pExecFile("git", [
"ls-files",
"-co",
"--exclude-standard",
], { cwd: rootDir });
return String(stdout || "")
const { stdout } = await pExecFile('git', ['ls-files', '-co', '--exclude-standard'], {
cwd: rootDir,
});
return String(stdout || '')
.split(/\r?\n/)
.map((s) => s.trim())
.filter(Boolean);
@@ -48,14 +49,14 @@ async function discoverFiles(rootDir, options = {}) {
const { filter } = await loadIgnore(rootDir);
// Try git first
if (preferGit && await isGitRepo(rootDir)) {
if (preferGit && (await isGitRepo(rootDir))) {
const relFiles = await gitListFiles(rootDir);
const filteredRel = relFiles.filter((p) => filter(p));
return filteredRel.map((p) => path.resolve(rootDir, p));
}
// Glob fallback
const globbed = await glob("**/*", {
const globbed = await glob('**/*', {
cwd: rootDir,
nodir: true,
dot: true,

View File

@@ -1,8 +1,8 @@
const path = require("node:path");
const discovery = require("./discovery.js");
const ignoreRules = require("./ignoreRules.js");
const { isBinaryFile } = require("./binary.js");
const { aggregateFileContents } = require("./aggregate.js");
const path = require('node:path');
const discovery = require('./discovery.js');
const ignoreRules = require('./ignoreRules.js');
const { isBinaryFile } = require('./binary.js');
const { aggregateFileContents } = require('./aggregate.js');
// Backward-compatible signature; delegate to central loader
async function parseGitignore(gitignorePath) {
@@ -14,7 +14,7 @@ async function discoverFiles(rootDir) {
// Delegate to discovery module which respects .gitignore and defaults
return await discovery.discoverFiles(rootDir, { preferGit: true });
} catch (error) {
console.error("Error discovering files:", error.message);
console.error('Error discovering files:', error.message);
return [];
}
}

View File

@@ -1,147 +1,147 @@
const fs = require("fs-extra");
const path = require("node:path");
const ignore = require("ignore");
const fs = require('fs-extra');
const path = require('node:path');
const ignore = require('ignore');
// Central default ignore patterns for discovery and filtering.
// These complement .gitignore and are applied regardless of VCS presence.
const DEFAULT_PATTERNS = [
// Project/VCS
"**/.bmad-core/**",
"**/.git/**",
"**/.svn/**",
"**/.hg/**",
"**/.bzr/**",
'**/.bmad-core/**',
'**/.git/**',
'**/.svn/**',
'**/.hg/**',
'**/.bzr/**',
// Package/build outputs
"**/node_modules/**",
"**/bower_components/**",
"**/vendor/**",
"**/packages/**",
"**/build/**",
"**/dist/**",
"**/out/**",
"**/target/**",
"**/bin/**",
"**/obj/**",
"**/release/**",
"**/debug/**",
'**/node_modules/**',
'**/bower_components/**',
'**/vendor/**',
'**/packages/**',
'**/build/**',
'**/dist/**',
'**/out/**',
'**/target/**',
'**/bin/**',
'**/obj/**',
'**/release/**',
'**/debug/**',
// Environments
"**/.venv/**",
"**/venv/**",
"**/.virtualenv/**",
"**/virtualenv/**",
"**/env/**",
'**/.venv/**',
'**/venv/**',
'**/.virtualenv/**',
'**/virtualenv/**',
'**/env/**',
// Logs & coverage
"**/*.log",
"**/npm-debug.log*",
"**/yarn-debug.log*",
"**/yarn-error.log*",
"**/lerna-debug.log*",
"**/coverage/**",
"**/.nyc_output/**",
"**/.coverage/**",
"**/test-results/**",
'**/*.log',
'**/npm-debug.log*',
'**/yarn-debug.log*',
'**/yarn-error.log*',
'**/lerna-debug.log*',
'**/coverage/**',
'**/.nyc_output/**',
'**/.coverage/**',
'**/test-results/**',
// Caches & temp
"**/.cache/**",
"**/.tmp/**",
"**/.temp/**",
"**/tmp/**",
"**/temp/**",
"**/.sass-cache/**",
'**/.cache/**',
'**/.tmp/**',
'**/.temp/**',
'**/tmp/**',
'**/temp/**',
'**/.sass-cache/**',
// IDE/editor
"**/.vscode/**",
"**/.idea/**",
"**/*.swp",
"**/*.swo",
"**/*~",
"**/.project",
"**/.classpath",
"**/.settings/**",
"**/*.sublime-project",
"**/*.sublime-workspace",
'**/.vscode/**',
'**/.idea/**',
'**/*.swp',
'**/*.swo',
'**/*~',
'**/.project',
'**/.classpath',
'**/.settings/**',
'**/*.sublime-project',
'**/*.sublime-workspace',
// Lockfiles
"**/package-lock.json",
"**/yarn.lock",
"**/pnpm-lock.yaml",
"**/composer.lock",
"**/Pipfile.lock",
'**/package-lock.json',
'**/yarn.lock',
'**/pnpm-lock.yaml',
'**/composer.lock',
'**/Pipfile.lock',
// Python/Java/compiled artifacts
"**/*.pyc",
"**/*.pyo",
"**/*.pyd",
"**/__pycache__/**",
"**/*.class",
"**/*.jar",
"**/*.war",
"**/*.ear",
"**/*.o",
"**/*.so",
"**/*.dll",
"**/*.exe",
'**/*.pyc',
'**/*.pyo',
'**/*.pyd',
'**/__pycache__/**',
'**/*.class',
'**/*.jar',
'**/*.war',
'**/*.ear',
'**/*.o',
'**/*.so',
'**/*.dll',
'**/*.exe',
// System junk
"**/lib64/**",
"**/.venv/lib64/**",
"**/venv/lib64/**",
"**/_site/**",
"**/.jekyll-cache/**",
"**/.jekyll-metadata",
"**/.DS_Store",
"**/.DS_Store?",
"**/._*",
"**/.Spotlight-V100/**",
"**/.Trashes/**",
"**/ehthumbs.db",
"**/Thumbs.db",
"**/desktop.ini",
'**/lib64/**',
'**/.venv/lib64/**',
'**/venv/lib64/**',
'**/_site/**',
'**/.jekyll-cache/**',
'**/.jekyll-metadata',
'**/.DS_Store',
'**/.DS_Store?',
'**/._*',
'**/.Spotlight-V100/**',
'**/.Trashes/**',
'**/ehthumbs.db',
'**/Thumbs.db',
'**/desktop.ini',
// XML outputs
"**/flattened-codebase.xml",
"**/repomix-output.xml",
'**/flattened-codebase.xml',
'**/repomix-output.xml',
// Images, media, fonts, archives, docs, dylibs
"**/*.jpg",
"**/*.jpeg",
"**/*.png",
"**/*.gif",
"**/*.bmp",
"**/*.ico",
"**/*.svg",
"**/*.pdf",
"**/*.doc",
"**/*.docx",
"**/*.xls",
"**/*.xlsx",
"**/*.ppt",
"**/*.pptx",
"**/*.zip",
"**/*.tar",
"**/*.gz",
"**/*.rar",
"**/*.7z",
"**/*.dylib",
"**/*.mp3",
"**/*.mp4",
"**/*.avi",
"**/*.mov",
"**/*.wav",
"**/*.ttf",
"**/*.otf",
"**/*.woff",
"**/*.woff2",
'**/*.jpg',
'**/*.jpeg',
'**/*.png',
'**/*.gif',
'**/*.bmp',
'**/*.ico',
'**/*.svg',
'**/*.pdf',
'**/*.doc',
'**/*.docx',
'**/*.xls',
'**/*.xlsx',
'**/*.ppt',
'**/*.pptx',
'**/*.zip',
'**/*.tar',
'**/*.gz',
'**/*.rar',
'**/*.7z',
'**/*.dylib',
'**/*.mp3',
'**/*.mp4',
'**/*.avi',
'**/*.mov',
'**/*.wav',
'**/*.ttf',
'**/*.otf',
'**/*.woff',
'**/*.woff2',
// Env files
"**/.env",
"**/.env.*",
"**/*.env",
'**/.env',
'**/.env.*',
'**/*.env',
// Misc
"**/junit.xml",
'**/junit.xml',
];
async function readIgnoreFile(filePath) {
try {
if (!await fs.pathExists(filePath)) return [];
const content = await fs.readFile(filePath, "utf8");
if (!(await fs.pathExists(filePath))) return [];
const content = await fs.readFile(filePath, 'utf8');
return content
.split("\n")
.split('\n')
.map((l) => l.trim())
.filter((l) => l && !l.startsWith("#"));
} catch (err) {
.filter((l) => l && !l.startsWith('#'));
} catch {
return [];
}
}
@@ -153,18 +153,18 @@ async function parseGitignore(gitignorePath) {
async function loadIgnore(rootDir, extraPatterns = []) {
const ig = ignore();
const gitignorePath = path.join(rootDir, ".gitignore");
const gitignorePath = path.join(rootDir, '.gitignore');
const patterns = [
...await readIgnoreFile(gitignorePath),
...(await readIgnoreFile(gitignorePath)),
...DEFAULT_PATTERNS,
...extraPatterns,
];
// De-duplicate
const unique = Array.from(new Set(patterns.map((p) => String(p))));
const unique = [...new Set(patterns.map(String))];
ig.add(unique);
// Include-only filter: return true if path should be included
const filter = (relativePath) => !ig.ignores(relativePath.replace(/\\/g, "/"));
const filter = (relativePath) => !ig.ignores(relativePath.replaceAll('\\', '/'));
return { ig, filter, patterns: unique };
}

View File

@@ -1,20 +1,14 @@
#!/usr/bin/env node
const { Command } = require("commander");
const fs = require("fs-extra");
const path = require("node:path");
const process = require("node:process");
const { Command } = require('commander');
const fs = require('fs-extra');
const path = require('node:path');
const process = require('node:process');
// Modularized components
const { findProjectRoot } = require("./projectRoot.js");
const { promptYesNo, promptPath } = require("./prompts.js");
const {
discoverFiles,
filterFiles,
aggregateFileContents,
} = require("./files.js");
const { generateXMLOutput } = require("./xml.js");
const { calculateStatistics } = require("./stats.js");
const { findProjectRoot } = require('./projectRoot.js');
const { promptYesNo, promptPath } = require('./prompts.js');
const { discoverFiles, filterFiles, aggregateFileContents } = require('./files.js');
const { generateXMLOutput } = require('./xml.js');
const { calculateStatistics } = require('./stats.js');
/**
* Recursively discover all files in a directory
@@ -73,30 +67,30 @@ const { calculateStatistics } = require("./stats.js");
const program = new Command();
program
.name("bmad-flatten")
.description("BMad-Method codebase flattener tool")
.version("1.0.0")
.option("-i, --input <path>", "Input directory to flatten", process.cwd())
.option("-o, --output <path>", "Output file path", "flattened-codebase.xml")
.name('bmad-flatten')
.description('BMad-Method codebase flattener tool')
.version('1.0.0')
.option('-i, --input <path>', 'Input directory to flatten', process.cwd())
.option('-o, --output <path>', 'Output file path', 'flattened-codebase.xml')
.action(async (options) => {
let inputDir = path.resolve(options.input);
let outputPath = path.resolve(options.output);
// Detect if user explicitly provided -i/--input or -o/--output
const argv = process.argv.slice(2);
const userSpecifiedInput = argv.some((a) =>
a === "-i" || a === "--input" || a.startsWith("--input=")
const userSpecifiedInput = argv.some(
(a) => a === '-i' || a === '--input' || a.startsWith('--input='),
);
const userSpecifiedOutput = argv.some((a) =>
a === "-o" || a === "--output" || a.startsWith("--output=")
const userSpecifiedOutput = argv.some(
(a) => a === '-o' || a === '--output' || a.startsWith('--output='),
);
const noPathArgs = !userSpecifiedInput && !userSpecifiedOutput;
const noPathArguments = !userSpecifiedInput && !userSpecifiedOutput;
if (noPathArgs) {
if (noPathArguments) {
const detectedRoot = await findProjectRoot(process.cwd());
const suggestedOutput = detectedRoot
? path.join(detectedRoot, "flattened-codebase.xml")
: path.resolve("flattened-codebase.xml");
? path.join(detectedRoot, 'flattened-codebase.xml')
: path.resolve('flattened-codebase.xml');
if (detectedRoot) {
const useDefaults = await promptYesNo(
@@ -107,26 +101,25 @@ program
inputDir = detectedRoot;
outputPath = suggestedOutput;
} else {
inputDir = await promptPath(
"Enter input directory path",
process.cwd(),
);
inputDir = await promptPath('Enter input directory path', process.cwd());
outputPath = await promptPath(
"Enter output file path",
path.join(inputDir, "flattened-codebase.xml"),
'Enter output file path',
path.join(inputDir, 'flattened-codebase.xml'),
);
}
} else {
console.log("Could not auto-detect a project root.");
inputDir = await promptPath(
"Enter input directory path",
process.cwd(),
);
console.log('Could not auto-detect a project root.');
inputDir = await promptPath('Enter input directory path', process.cwd());
outputPath = await promptPath(
"Enter output file path",
path.join(inputDir, "flattened-codebase.xml"),
'Enter output file path',
path.join(inputDir, 'flattened-codebase.xml'),
);
}
} else {
console.error(
'Could not auto-detect a project root and no arguments were provided. Please specify -i/--input and -o/--output.',
);
process.exit(1);
}
// Ensure output directory exists
@@ -134,24 +127,23 @@ program
try {
// Verify input directory exists
if (!await fs.pathExists(inputDir)) {
if (!(await fs.pathExists(inputDir))) {
console.error(`❌ Error: Input directory does not exist: ${inputDir}`);
process.exit(1);
}
// Import ora dynamically
const { default: ora } = await import("ora");
const { default: ora } = await import('ora');
// Start file discovery with spinner
const discoverySpinner = ora("🔍 Discovering files...").start();
const discoverySpinner = ora('🔍 Discovering files...').start();
const files = await discoverFiles(inputDir);
const filteredFiles = await filterFiles(files, inputDir);
discoverySpinner.succeed(
`📁 Found ${filteredFiles.length} files to include`,
);
discoverySpinner.succeed(`📁 Found ${filteredFiles.length} files to include`);
// Process files with progress tracking
const processingSpinner = ora("📄 Processing files...").start();
console.log('Reading file contents');
const processingSpinner = ora('📄 Processing files...').start();
const aggregatedContent = await aggregateFileContents(
filteredFiles,
inputDir,
@@ -165,31 +157,23 @@ program
}
// Generate XML output using streaming
const xmlSpinner = ora("🔧 Generating XML output...").start();
const xmlSpinner = ora('🔧 Generating XML output...').start();
await generateXMLOutput(aggregatedContent, outputPath);
xmlSpinner.succeed("📝 XML generation completed");
xmlSpinner.succeed('📝 XML generation completed');
// Calculate and display statistics
const outputStats = await fs.stat(outputPath);
const stats = await calculateStatistics(
aggregatedContent,
outputStats.size,
inputDir,
);
const stats = await calculateStatistics(aggregatedContent, outputStats.size, inputDir);
// Display completion summary
console.log("\n📊 Completion Summary:");
console.log('\n📊 Completion Summary:');
console.log(
`✅ Successfully processed ${filteredFiles.length} files into ${
path.basename(outputPath)
}`,
`✅ Successfully processed ${filteredFiles.length} files into ${path.basename(outputPath)}`,
);
console.log(`📁 Output file: ${outputPath}`);
console.log(`📏 Total source size: ${stats.totalSize}`);
console.log(`📄 Generated XML size: ${stats.xmlSize}`);
console.log(
`📝 Total lines of code: ${stats.totalLines.toLocaleString()}`,
);
console.log(`📝 Total lines of code: ${stats.totalLines.toLocaleString()}`);
console.log(`🔢 Estimated tokens: ${stats.estimatedTokens}`);
console.log(
`📊 File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors\n`,
@@ -197,92 +181,75 @@ program
// Ask user if they want detailed stats + markdown report
const generateDetailed = await promptYesNo(
"Generate detailed stats (console + markdown) now?",
'Generate detailed stats (console + markdown) now?',
true,
);
if (generateDetailed) {
// Additional detailed stats
console.log("\n📈 Size Percentiles:");
console.log('\n📈 Size Percentiles:');
console.log(
` Avg: ${
Math.round(stats.avgFileSize).toLocaleString()
} B, Median: ${
Math.round(stats.medianFileSize).toLocaleString()
} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
` Avg: ${Math.round(stats.avgFileSize).toLocaleString()} B, Median: ${Math.round(
stats.medianFileSize,
).toLocaleString()} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
);
if (Array.isArray(stats.histogram) && stats.histogram.length) {
console.log("\n🧮 Size Histogram:");
if (Array.isArray(stats.histogram) && stats.histogram.length > 0) {
console.log('\n🧮 Size Histogram:');
for (const b of stats.histogram.slice(0, 2)) {
console.log(
` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`,
);
console.log(` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`);
}
if (stats.histogram.length > 2) {
console.log(` … and ${stats.histogram.length - 2} more buckets`);
}
}
if (Array.isArray(stats.byExtension) && stats.byExtension.length) {
if (Array.isArray(stats.byExtension) && stats.byExtension.length > 0) {
const topExt = stats.byExtension.slice(0, 2);
console.log("\n📦 Top Extensions:");
console.log('\n📦 Top Extensions:');
for (const e of topExt) {
const pct = stats.totalBytes
? ((e.bytes / stats.totalBytes) * 100)
: 0;
const pct = stats.totalBytes ? (e.bytes / stats.totalBytes) * 100 : 0;
console.log(
` ${e.ext}: ${e.count} files, ${e.bytes.toLocaleString()} bytes (${
pct.toFixed(2)
}%)`,
` ${e.ext}: ${e.count} files, ${e.bytes.toLocaleString()} bytes (${pct.toFixed(
2,
)}%)`,
);
}
if (stats.byExtension.length > 2) {
console.log(
` … and ${stats.byExtension.length - 2} more extensions`,
);
console.log(` … and ${stats.byExtension.length - 2} more extensions`);
}
}
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length) {
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length > 0) {
const topDir = stats.byDirectory.slice(0, 2);
console.log("\n📂 Top Directories:");
console.log('\n📂 Top Directories:');
for (const d of topDir) {
const pct = stats.totalBytes
? ((d.bytes / stats.totalBytes) * 100)
: 0;
const pct = stats.totalBytes ? (d.bytes / stats.totalBytes) * 100 : 0;
console.log(
` ${d.dir}: ${d.count} files, ${d.bytes.toLocaleString()} bytes (${
pct.toFixed(2)
}%)`,
` ${d.dir}: ${d.count} files, ${d.bytes.toLocaleString()} bytes (${pct.toFixed(
2,
)}%)`,
);
}
if (stats.byDirectory.length > 2) {
console.log(
` … and ${stats.byDirectory.length - 2} more directories`,
);
console.log(` … and ${stats.byDirectory.length - 2} more directories`);
}
}
if (
Array.isArray(stats.depthDistribution) &&
stats.depthDistribution.length
) {
console.log("\n🌳 Depth Distribution:");
if (Array.isArray(stats.depthDistribution) && stats.depthDistribution.length > 0) {
console.log('\n🌳 Depth Distribution:');
const dd = stats.depthDistribution.slice(0, 2);
let line = " " + dd.map((d) => `${d.depth}:${d.count}`).join(" ");
let line = ' ' + dd.map((d) => `${d.depth}:${d.count}`).join(' ');
if (stats.depthDistribution.length > 2) {
line += ` … +${stats.depthDistribution.length - 2} more`;
}
console.log(line);
}
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length) {
console.log("\n🧵 Longest Paths:");
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length > 0) {
console.log('\n🧵 Longest Paths:');
for (const p of stats.longestPaths.slice(0, 2)) {
console.log(
` ${p.path} (${p.length} chars, ${p.size.toLocaleString()} bytes)`,
);
console.log(` ${p.path} (${p.length} chars, ${p.size.toLocaleString()} bytes)`);
}
if (stats.longestPaths.length > 2) {
console.log(` … and ${stats.longestPaths.length - 2} more paths`);
@@ -290,7 +257,7 @@ program
}
if (stats.temporal) {
console.log("\n⏱ Temporal:");
console.log('\n⏱ Temporal:');
if (stats.temporal.oldest) {
console.log(
` Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`,
@@ -302,104 +269,82 @@ program
);
}
if (Array.isArray(stats.temporal.ageBuckets)) {
console.log(" Age buckets:");
console.log(' Age buckets:');
for (const b of stats.temporal.ageBuckets.slice(0, 2)) {
console.log(
` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`,
);
console.log(` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`);
}
if (stats.temporal.ageBuckets.length > 2) {
console.log(
` … and ${
stats.temporal.ageBuckets.length - 2
} more buckets`,
);
console.log(` … and ${stats.temporal.ageBuckets.length - 2} more buckets`);
}
}
}
if (stats.quality) {
console.log("\n✅ Quality Signals:");
console.log('\n✅ Quality Signals:');
console.log(` Zero-byte files: ${stats.quality.zeroByteFiles}`);
console.log(` Empty text files: ${stats.quality.emptyTextFiles}`);
console.log(` Hidden files: ${stats.quality.hiddenFiles}`);
console.log(` Symlinks: ${stats.quality.symlinks}`);
console.log(
` Large files (>= ${
(stats.quality.largeThreshold / (1024 * 1024)).toFixed(0)
} MB): ${stats.quality.largeFilesCount}`,
` Large files (>= ${(stats.quality.largeThreshold / (1024 * 1024)).toFixed(
0,
)} MB): ${stats.quality.largeFilesCount}`,
);
console.log(
` Suspiciously large files (>= 100 MB): ${stats.quality.suspiciousLargeFilesCount}`,
);
}
if (
Array.isArray(stats.duplicateCandidates) &&
stats.duplicateCandidates.length
) {
console.log("\n🧬 Duplicate Candidates:");
if (Array.isArray(stats.duplicateCandidates) && stats.duplicateCandidates.length > 0) {
console.log('\n🧬 Duplicate Candidates:');
for (const d of stats.duplicateCandidates.slice(0, 2)) {
console.log(
` ${d.reason}: ${d.count} files @ ${d.size.toLocaleString()} bytes`,
);
console.log(` ${d.reason}: ${d.count} files @ ${d.size.toLocaleString()} bytes`);
}
if (stats.duplicateCandidates.length > 2) {
console.log(
` … and ${stats.duplicateCandidates.length - 2} more groups`,
);
console.log(` … and ${stats.duplicateCandidates.length - 2} more groups`);
}
}
if (typeof stats.compressibilityRatio === "number") {
if (typeof stats.compressibilityRatio === 'number') {
console.log(
`\n🗜️ Compressibility ratio (sampled): ${
(stats.compressibilityRatio * 100).toFixed(2)
}%`,
`\n🗜️ Compressibility ratio (sampled): ${(stats.compressibilityRatio * 100).toFixed(
2,
)}%`,
);
}
if (stats.git && stats.git.isRepo) {
console.log("\n🔧 Git:");
console.log('\n🔧 Git:');
console.log(
` Tracked: ${stats.git.trackedCount} files, ${stats.git.trackedBytes.toLocaleString()} bytes`,
);
console.log(
` Untracked: ${stats.git.untrackedCount} files, ${stats.git.untrackedBytes.toLocaleString()} bytes`,
);
if (
Array.isArray(stats.git.lfsCandidates) &&
stats.git.lfsCandidates.length
) {
console.log(" LFS candidates (top 2):");
if (Array.isArray(stats.git.lfsCandidates) && stats.git.lfsCandidates.length > 0) {
console.log(' LFS candidates (top 2):');
for (const f of stats.git.lfsCandidates.slice(0, 2)) {
console.log(` ${f.path} (${f.size.toLocaleString()} bytes)`);
}
if (stats.git.lfsCandidates.length > 2) {
console.log(
` … and ${stats.git.lfsCandidates.length - 2} more`,
);
console.log(` … and ${stats.git.lfsCandidates.length - 2} more`);
}
}
}
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length) {
console.log("\n📚 Largest Files (top 2):");
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length > 0) {
console.log('\n📚 Largest Files (top 2):');
for (const f of stats.largestFiles.slice(0, 2)) {
// Show LOC for text files when available; omit ext and mtime
let locStr = "";
let locStr = '';
if (!f.isBinary && Array.isArray(aggregatedContent?.textFiles)) {
const tf = aggregatedContent.textFiles.find((t) =>
t.path === f.path
);
if (tf && typeof tf.lines === "number") {
const tf = aggregatedContent.textFiles.find((t) => t.path === f.path);
if (tf && typeof tf.lines === 'number') {
locStr = `, LOC: ${tf.lines.toLocaleString()}`;
}
}
console.log(
` ${f.path} ${f.sizeFormatted} (${
f.percentOfTotal.toFixed(2)
}%)${locStr}`,
` ${f.path} ${f.sizeFormatted} (${f.percentOfTotal.toFixed(2)}%)${locStr}`,
);
}
if (stats.largestFiles.length > 2) {
@@ -409,262 +354,214 @@ program
// Write a comprehensive markdown report next to the XML
{
const mdPath = outputPath.endsWith(".xml")
? outputPath.replace(/\.xml$/i, ".stats.md")
: outputPath + ".stats.md";
const mdPath = outputPath.endsWith('.xml')
? outputPath.replace(/\.xml$/i, '.stats.md')
: outputPath + '.stats.md';
try {
const pct = (num, den) => (den ? ((num / den) * 100) : 0);
const pct = (num, den) => (den ? (num / den) * 100 : 0);
const md = [];
md.push(`# 🧾 Flatten Stats for ${path.basename(outputPath)}`);
md.push("");
md.push("## 📊 Summary");
md.push(`- Total source size: ${stats.totalSize}`);
md.push(`- Generated XML size: ${stats.xmlSize}`);
md.push(
`# 🧾 Flatten Stats for ${path.basename(outputPath)}`,
'',
'## 📊 Summary',
`- Total source size: ${stats.totalSize}`,
`- Generated XML size: ${stats.xmlSize}`,
`- Total lines of code: ${stats.totalLines.toLocaleString()}`,
);
md.push(`- Estimated tokens: ${stats.estimatedTokens}`);
md.push(
`- Estimated tokens: ${stats.estimatedTokens}`,
`- File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors`,
'',
'## 📈 Size Percentiles',
`Avg: ${Math.round(stats.avgFileSize).toLocaleString()} B, Median: ${Math.round(
stats.medianFileSize,
).toLocaleString()} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
'',
);
md.push("");
// Percentiles
md.push("## 📈 Size Percentiles");
md.push(
`Avg: ${
Math.round(stats.avgFileSize).toLocaleString()
} B, Median: ${
Math.round(stats.medianFileSize).toLocaleString()
} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
);
md.push("");
// Histogram
if (Array.isArray(stats.histogram) && stats.histogram.length) {
md.push("## 🧮 Size Histogram");
md.push("| Bucket | Files | Bytes |");
md.push("| --- | ---: | ---: |");
for (const b of stats.histogram) {
if (Array.isArray(stats.histogram) && stats.histogram.length > 0) {
md.push(
`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`,
'## 🧮 Size Histogram',
'| Bucket | Files | Bytes |',
'| --- | ---: | ---: |',
);
for (const b of stats.histogram) {
md.push(`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`);
}
md.push("");
md.push('');
}
// Top Extensions
if (Array.isArray(stats.byExtension) && stats.byExtension.length) {
md.push("## 📦 Top Extensions by Bytes (Top 20)");
md.push("| Ext | Files | Bytes | % of total |");
md.push("| --- | ---: | ---: | ---: |");
if (Array.isArray(stats.byExtension) && stats.byExtension.length > 0) {
md.push(
'## 📦 Top Extensions by Bytes (Top 20)',
'| Ext | Files | Bytes | % of total |',
'| --- | ---: | ---: | ---: |',
);
for (const e of stats.byExtension.slice(0, 20)) {
const p = pct(e.bytes, stats.totalBytes);
md.push(
`| ${e.ext} | ${e.count} | ${e.bytes.toLocaleString()} | ${
p.toFixed(2)
}% |`,
`| ${e.ext} | ${e.count} | ${e.bytes.toLocaleString()} | ${p.toFixed(2)}% |`,
);
}
md.push("");
md.push('');
}
// Top Directories
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length) {
md.push("## 📂 Top Directories by Bytes (Top 20)");
md.push("| Directory | Files | Bytes | % of total |");
md.push("| --- | ---: | ---: | ---: |");
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length > 0) {
md.push(
'## 📂 Top Directories by Bytes (Top 20)',
'| Directory | Files | Bytes | % of total |',
'| --- | ---: | ---: | ---: |',
);
for (const d of stats.byDirectory.slice(0, 20)) {
const p = pct(d.bytes, stats.totalBytes);
md.push(
`| ${d.dir} | ${d.count} | ${d.bytes.toLocaleString()} | ${
p.toFixed(2)
}% |`,
`| ${d.dir} | ${d.count} | ${d.bytes.toLocaleString()} | ${p.toFixed(2)}% |`,
);
}
md.push("");
md.push('');
}
// Depth distribution
if (
Array.isArray(stats.depthDistribution) &&
stats.depthDistribution.length
) {
md.push("## 🌳 Depth Distribution");
md.push("| Depth | Count |");
md.push("| ---: | ---: |");
if (Array.isArray(stats.depthDistribution) && stats.depthDistribution.length > 0) {
md.push('## 🌳 Depth Distribution', '| Depth | Count |', '| ---: | ---: |');
for (const d of stats.depthDistribution) {
md.push(`| ${d.depth} | ${d.count} |`);
}
md.push("");
md.push('');
}
// Longest paths
if (
Array.isArray(stats.longestPaths) && stats.longestPaths.length
) {
md.push("## 🧵 Longest Paths (Top 25)");
md.push("| Path | Length | Bytes |");
md.push("| --- | ---: | ---: |");
for (const pth of stats.longestPaths) {
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length > 0) {
md.push(
`| ${pth.path} | ${pth.length} | ${pth.size.toLocaleString()} |`,
'## 🧵 Longest Paths (Top 25)',
'| Path | Length | Bytes |',
'| --- | ---: | ---: |',
);
for (const pth of stats.longestPaths) {
md.push(`| ${pth.path} | ${pth.length} | ${pth.size.toLocaleString()} |`);
}
md.push("");
md.push('');
}
// Temporal
if (stats.temporal) {
md.push("## ⏱️ Temporal");
md.push('## ⏱️ Temporal');
if (stats.temporal.oldest) {
md.push(
`- Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`,
);
md.push(`- Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`);
}
if (stats.temporal.newest) {
md.push(
`- Newest: ${stats.temporal.newest.path} (${stats.temporal.newest.mtime})`,
);
md.push(`- Newest: ${stats.temporal.newest.path} (${stats.temporal.newest.mtime})`);
}
if (Array.isArray(stats.temporal.ageBuckets)) {
md.push("");
md.push("| Age | Files | Bytes |");
md.push("| --- | ---: | ---: |");
md.push('', '| Age | Files | Bytes |', '| --- | ---: | ---: |');
for (const b of stats.temporal.ageBuckets) {
md.push(
`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`,
);
md.push(`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`);
}
}
md.push("");
md.push('');
}
// Quality signals
if (stats.quality) {
md.push("## ✅ Quality Signals");
md.push(`- Zero-byte files: ${stats.quality.zeroByteFiles}`);
md.push(`- Empty text files: ${stats.quality.emptyTextFiles}`);
md.push(`- Hidden files: ${stats.quality.hiddenFiles}`);
md.push(`- Symlinks: ${stats.quality.symlinks}`);
md.push(
`- Large files (>= ${
(stats.quality.largeThreshold / (1024 * 1024)).toFixed(0)
} MB): ${stats.quality.largeFilesCount}`,
);
md.push(
'## ✅ Quality Signals',
`- Zero-byte files: ${stats.quality.zeroByteFiles}`,
`- Empty text files: ${stats.quality.emptyTextFiles}`,
`- Hidden files: ${stats.quality.hiddenFiles}`,
`- Symlinks: ${stats.quality.symlinks}`,
`- Large files (>= ${(stats.quality.largeThreshold / (1024 * 1024)).toFixed(0)} MB): ${stats.quality.largeFilesCount}`,
`- Suspiciously large files (>= 100 MB): ${stats.quality.suspiciousLargeFilesCount}`,
'',
);
md.push("");
}
// Duplicates
if (
Array.isArray(stats.duplicateCandidates) &&
stats.duplicateCandidates.length
) {
md.push("## 🧬 Duplicate Candidates");
md.push("| Reason | Files | Size (bytes) |");
md.push("| --- | ---: | ---: |");
for (const d of stats.duplicateCandidates) {
if (Array.isArray(stats.duplicateCandidates) && stats.duplicateCandidates.length > 0) {
md.push(
`| ${d.reason} | ${d.count} | ${d.size.toLocaleString()} |`,
'## 🧬 Duplicate Candidates',
'| Reason | Files | Size (bytes) |',
'| --- | ---: | ---: |',
);
for (const d of stats.duplicateCandidates) {
md.push(`| ${d.reason} | ${d.count} | ${d.size.toLocaleString()} |`);
}
md.push("");
// Detailed listing of duplicate file names and locations
md.push("### 🧬 Duplicate Groups Details");
md.push('', '### 🧬 Duplicate Groups Details');
let dupIndex = 1;
for (const d of stats.duplicateCandidates) {
md.push(
`#### Group ${dupIndex}: ${d.count} files @ ${d.size.toLocaleString()} bytes (${d.reason})`,
);
if (Array.isArray(d.files) && d.files.length) {
if (Array.isArray(d.files) && d.files.length > 0) {
for (const fp of d.files) {
md.push(`- ${fp}`);
}
} else {
md.push("- (file list unavailable)");
md.push('- (file list unavailable)');
}
md.push("");
md.push('');
dupIndex++;
}
md.push("");
md.push('');
}
// Compressibility
if (typeof stats.compressibilityRatio === "number") {
md.push("## 🗜️ Compressibility");
if (typeof stats.compressibilityRatio === 'number') {
md.push(
`Sampled compressibility ratio: ${
(stats.compressibilityRatio * 100).toFixed(2)
}%`,
'## 🗜️ Compressibility',
`Sampled compressibility ratio: ${(stats.compressibilityRatio * 100).toFixed(2)}%`,
'',
);
md.push("");
}
// Git
if (stats.git && stats.git.isRepo) {
md.push("## 🔧 Git");
md.push(
'## 🔧 Git',
`- Tracked: ${stats.git.trackedCount} files, ${stats.git.trackedBytes.toLocaleString()} bytes`,
);
md.push(
`- Untracked: ${stats.git.untrackedCount} files, ${stats.git.untrackedBytes.toLocaleString()} bytes`,
);
if (
Array.isArray(stats.git.lfsCandidates) &&
stats.git.lfsCandidates.length
) {
md.push("");
md.push("### 📦 LFS Candidates (Top 20)");
md.push("| Path | Bytes |");
md.push("| --- | ---: |");
if (Array.isArray(stats.git.lfsCandidates) && stats.git.lfsCandidates.length > 0) {
md.push('', '### 📦 LFS Candidates (Top 20)', '| Path | Bytes |', '| --- | ---: |');
for (const f of stats.git.lfsCandidates.slice(0, 20)) {
md.push(`| ${f.path} | ${f.size.toLocaleString()} |`);
}
}
md.push("");
md.push('');
}
// Largest Files
if (
Array.isArray(stats.largestFiles) && stats.largestFiles.length
) {
md.push("## 📚 Largest Files (Top 50)");
md.push("| Path | Size | % of total | LOC |");
md.push("| --- | ---: | ---: | ---: |");
for (const f of stats.largestFiles) {
let loc = "";
if (
!f.isBinary && Array.isArray(aggregatedContent?.textFiles)
) {
const tf = aggregatedContent.textFiles.find((t) =>
t.path === f.path
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length > 0) {
md.push(
'## 📚 Largest Files (Top 50)',
'| Path | Size | % of total | LOC |',
'| --- | ---: | ---: | ---: |',
);
if (tf && typeof tf.lines === "number") {
for (const f of stats.largestFiles) {
let loc = '';
if (!f.isBinary && Array.isArray(aggregatedContent?.textFiles)) {
const tf = aggregatedContent.textFiles.find((t) => t.path === f.path);
if (tf && typeof tf.lines === 'number') {
loc = tf.lines.toLocaleString();
}
}
md.push(
`| ${f.path} | ${f.sizeFormatted} | ${
f.percentOfTotal.toFixed(2)
}% | ${loc} |`,
`| ${f.path} | ${f.sizeFormatted} | ${f.percentOfTotal.toFixed(2)}% | ${loc} |`,
);
}
md.push("");
md.push('');
}
await fs.writeFile(mdPath, md.join("\n"));
await fs.writeFile(mdPath, md.join('\n'));
console.log(`\n🧾 Detailed stats report written to: ${mdPath}`);
} catch (e) {
console.warn(`⚠️ Failed to write stats markdown: ${e.message}`);
} catch (error) {
console.warn(`⚠️ Failed to write stats markdown: ${error.message}`);
}
}
}
} catch (error) {
console.error("❌ Critical error:", error.message);
console.error("An unexpected error occurred.");
console.error('❌ Critical error:', error.message);
console.error('An unexpected error occurred.');
process.exit(1);
}
});

View File

@@ -1,10 +1,10 @@
const fs = require("fs-extra");
const path = require("node:path");
const fs = require('fs-extra');
const path = require('node:path');
// Deno/Node compatibility: explicitly import process
const process = require("node:process");
const { execFile } = require("node:child_process");
const { promisify } = require("node:util");
const process = require('node:process');
const { execFile } = require('node:child_process');
const { promisify } = require('node:util');
const execFileAsync = promisify(execFile);
// Simple memoization across calls (keyed by realpath of startDir)
@@ -18,7 +18,7 @@ async function _tryRun(cmd, args, cwd, timeoutMs = 500) {
windowsHide: true,
maxBuffer: 1024 * 1024,
});
const out = String(stdout || "").trim();
const out = String(stdout || '').trim();
return out || null;
} catch {
return null;
@@ -27,15 +27,17 @@ async function _tryRun(cmd, args, cwd, timeoutMs = 500) {
async function _detectVcsTopLevel(startDir) {
// Run common VCS root queries in parallel; ignore failures
const gitP = _tryRun("git", ["rev-parse", "--show-toplevel"], startDir);
const hgP = _tryRun("hg", ["root"], startDir);
const gitP = _tryRun('git', ['rev-parse', '--show-toplevel'], startDir);
const hgP = _tryRun('hg', ['root'], startDir);
const svnP = (async () => {
const show = await _tryRun("svn", ["info", "--show-item", "wc-root"], startDir);
const show = await _tryRun('svn', ['info', '--show-item', 'wc-root'], startDir);
if (show) return show;
const info = await _tryRun("svn", ["info"], startDir);
const info = await _tryRun('svn', ['info'], startDir);
if (info) {
const line = info.split(/\r?\n/).find((l) => l.toLowerCase().startsWith("working copy root path:"));
if (line) return line.split(":").slice(1).join(":").trim();
const line = info
.split(/\r?\n/)
.find((l) => l.toLowerCase().startsWith('working copy root path:'));
if (line) return line.split(':').slice(1).join(':').trim();
}
return null;
})();
@@ -71,90 +73,92 @@ async function findProjectRoot(startDir) {
const checks = [];
const add = (rel, weight) => {
const makePath = (d) => Array.isArray(rel) ? path.join(d, ...rel) : path.join(d, rel);
const makePath = (d) => (Array.isArray(rel) ? path.join(d, ...rel) : path.join(d, rel));
checks.push({ makePath, weight });
};
// Highest priority: explicit sentinel markers
add(".project-root", 110);
add(".workspace-root", 110);
add(".repo-root", 110);
add('.project-root', 110);
add('.workspace-root', 110);
add('.repo-root', 110);
// Highest priority: VCS roots
add(".git", 100);
add(".hg", 95);
add(".svn", 95);
add('.git', 100);
add('.hg', 95);
add('.svn', 95);
// Monorepo/workspace indicators
add("pnpm-workspace.yaml", 90);
add("lerna.json", 90);
add("turbo.json", 90);
add("nx.json", 90);
add("rush.json", 90);
add("go.work", 90);
add("WORKSPACE", 90);
add("WORKSPACE.bazel", 90);
add("MODULE.bazel", 90);
add("pants.toml", 90);
add('pnpm-workspace.yaml', 90);
add('lerna.json', 90);
add('turbo.json', 90);
add('nx.json', 90);
add('rush.json', 90);
add('go.work', 90);
add('WORKSPACE', 90);
add('WORKSPACE.bazel', 90);
add('MODULE.bazel', 90);
add('pants.toml', 90);
// Lockfiles and package-manager/top-level locks
add("yarn.lock", 85);
add("pnpm-lock.yaml", 85);
add("package-lock.json", 85);
add("bun.lockb", 85);
add("Cargo.lock", 85);
add("composer.lock", 85);
add("poetry.lock", 85);
add("Pipfile.lock", 85);
add("Gemfile.lock", 85);
add('yarn.lock', 85);
add('pnpm-lock.yaml', 85);
add('package-lock.json', 85);
add('bun.lockb', 85);
add('Cargo.lock', 85);
add('composer.lock', 85);
add('poetry.lock', 85);
add('Pipfile.lock', 85);
add('Gemfile.lock', 85);
// Build-system root indicators
add("settings.gradle", 80);
add("settings.gradle.kts", 80);
add("gradlew", 80);
add("pom.xml", 80);
add("build.sbt", 80);
add(["project", "build.properties"], 80);
add('settings.gradle', 80);
add('settings.gradle.kts', 80);
add('gradlew', 80);
add('pom.xml', 80);
add('build.sbt', 80);
add(['project', 'build.properties'], 80);
// Language/project config markers
add("deno.json", 75);
add("deno.jsonc", 75);
add("pyproject.toml", 75);
add("Pipfile", 75);
add("requirements.txt", 75);
add("go.mod", 75);
add("Cargo.toml", 75);
add("composer.json", 75);
add("mix.exs", 75);
add("Gemfile", 75);
add("CMakeLists.txt", 75);
add("stack.yaml", 75);
add("cabal.project", 75);
add("rebar.config", 75);
add("pubspec.yaml", 75);
add("flake.nix", 75);
add("shell.nix", 75);
add("default.nix", 75);
add(".tool-versions", 75);
add("package.json", 74); // generic Node project (lower than lockfiles/workspaces)
add('deno.json', 75);
add('deno.jsonc', 75);
add('pyproject.toml', 75);
add('Pipfile', 75);
add('requirements.txt', 75);
add('go.mod', 75);
add('Cargo.toml', 75);
add('composer.json', 75);
add('mix.exs', 75);
add('Gemfile', 75);
add('CMakeLists.txt', 75);
add('stack.yaml', 75);
add('cabal.project', 75);
add('rebar.config', 75);
add('pubspec.yaml', 75);
add('flake.nix', 75);
add('shell.nix', 75);
add('default.nix', 75);
add('.tool-versions', 75);
add('package.json', 74); // generic Node project (lower than lockfiles/workspaces)
// Changesets
add([".changeset", "config.json"], 70);
add(".changeset", 70);
add(['.changeset', 'config.json'], 70);
add('.changeset', 70);
// Custom markers via env (comma-separated names)
if (process.env.PROJECT_ROOT_MARKERS) {
for (const name of process.env.PROJECT_ROOT_MARKERS.split(",").map((s) => s.trim()).filter(Boolean)) {
for (const name of process.env.PROJECT_ROOT_MARKERS.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
add(name, 72);
}
}
/** Check for package.json with "workspaces" */
const hasWorkspacePackageJson = async (d) => {
const pkgPath = path.join(d, "package.json");
const pkgPath = path.join(d, 'package.json');
if (!(await exists(pkgPath))) return false;
try {
const raw = await fs.readFile(pkgPath, "utf8");
const raw = await fs.readFile(pkgPath, 'utf8');
const pkg = JSON.parse(raw);
return Boolean(pkg && pkg.workspaces);
} catch {
@@ -172,9 +176,8 @@ async function findProjectRoot(startDir) {
while (true) {
// Special check: package.json with "workspaces"
if (await hasWorkspacePackageJson(dir)) {
if (!best || 90 >= best.weight) best = { dir, weight: 90 };
}
if ((await hasWorkspacePackageJson(dir)) && (!best || 90 >= best.weight))
best = { dir, weight: 90 };
// Evaluate all other checks in parallel
const results = await Promise.all(
@@ -201,4 +204,3 @@ async function findProjectRoot(startDir) {
}
module.exports = { findProjectRoot };

View File

@@ -1,11 +1,11 @@
const os = require("node:os");
const path = require("node:path");
const readline = require("node:readline");
const process = require("node:process");
const os = require('node:os');
const path = require('node:path');
const readline = require('node:readline');
const process = require('node:process');
function expandHome(p) {
if (!p) return p;
if (p.startsWith("~")) return path.join(os.homedir(), p.slice(1));
if (p.startsWith('~')) return path.join(os.homedir(), p.slice(1));
return p;
}
@@ -27,16 +27,16 @@ function promptQuestion(question) {
}
async function promptYesNo(question, defaultYes = true) {
const suffix = defaultYes ? " [Y/n] " : " [y/N] ";
const suffix = defaultYes ? ' [Y/n] ' : ' [y/N] ';
const ans = (await promptQuestion(`${question}${suffix}`)).trim().toLowerCase();
if (!ans) return defaultYes;
if (["y", "yes"].includes(ans)) return true;
if (["n", "no"].includes(ans)) return false;
if (['y', 'yes'].includes(ans)) return true;
if (['n', 'no'].includes(ans)) return false;
return promptYesNo(question, defaultYes);
}
async function promptPath(question, defaultValue) {
const prompt = `${question}${defaultValue ? ` (default: ${defaultValue})` : ""}: `;
const prompt = `${question}${defaultValue ? ` (default: ${defaultValue})` : ''}: `;
const ans = (await promptQuestion(prompt)).trim();
return expandHome(ans || defaultValue);
}

View File

@@ -1,11 +1,11 @@
"use strict";
'use strict';
const fs = require("node:fs/promises");
const path = require("node:path");
const zlib = require("node:zlib");
const { Buffer } = require("node:buffer");
const crypto = require("node:crypto");
const cp = require("node:child_process");
const fs = require('node:fs/promises');
const path = require('node:path');
const zlib = require('node:zlib');
const { Buffer } = require('node:buffer');
const crypto = require('node:crypto');
const cp = require('node:child_process');
const KB = 1024;
const MB = 1024 * KB;
@@ -34,17 +34,19 @@ async function enrichAllFiles(textFiles, binaryFiles) {
const allFiles = [];
async function enrich(file, isBinary) {
const ext = (path.extname(file.path) || "").toLowerCase();
const dir = path.dirname(file.path) || ".";
const ext = (path.extname(file.path) || '').toLowerCase();
const dir = path.dirname(file.path) || '.';
const depth = file.path.split(path.sep).filter(Boolean).length;
const hidden = file.path.split(path.sep).some((seg) => seg.startsWith("."));
const hidden = file.path.split(path.sep).some((seg) => seg.startsWith('.'));
let mtimeMs = 0;
let isSymlink = false;
try {
const lst = await fs.lstat(file.absolutePath);
mtimeMs = lst.mtimeMs;
isSymlink = lst.isSymbolicLink();
} catch (_) { /* ignore lstat errors during enrichment */ }
} catch {
/* ignore lstat errors during enrichment */
}
allFiles.push({
path: file.path,
absolutePath: file.absolutePath,
@@ -67,18 +69,18 @@ async function enrichAllFiles(textFiles, binaryFiles) {
function buildHistogram(allFiles) {
const buckets = [
[1 * KB, "01KB"],
[10 * KB, "110KB"],
[100 * KB, "10100KB"],
[1 * MB, "100KB1MB"],
[10 * MB, "110MB"],
[100 * MB, "10100MB"],
[Infinity, ">=100MB"],
[1 * KB, '01KB'],
[10 * KB, '110KB'],
[100 * KB, '10100KB'],
[1 * MB, '100KB1MB'],
[10 * MB, '110MB'],
[100 * MB, '10100MB'],
[Infinity, '>=100MB'],
];
const histogram = buckets.map(([_, label]) => ({ label, count: 0, bytes: 0 }));
for (const f of allFiles) {
for (let i = 0; i < buckets.length; i++) {
if (f.size < buckets[i][0]) {
for (const [i, bucket] of buckets.entries()) {
if (f.size < bucket[0]) {
histogram[i].count++;
histogram[i].bytes += f.size;
break;
@@ -91,13 +93,13 @@ function buildHistogram(allFiles) {
function aggregateByExtension(allFiles) {
const byExtension = new Map();
for (const f of allFiles) {
const key = f.ext || "<none>";
const key = f.ext || '<none>';
const v = byExtension.get(key) || { ext: key, count: 0, bytes: 0 };
v.count++;
v.bytes += f.size;
byExtension.set(key, v);
}
return Array.from(byExtension.values()).sort((a, b) => b.bytes - a.bytes);
return [...byExtension.values()].sort((a, b) => b.bytes - a.bytes);
}
function aggregateByDirectory(allFiles) {
@@ -109,15 +111,15 @@ function aggregateByDirectory(allFiles) {
byDirectory.set(dir, v);
}
for (const f of allFiles) {
const parts = f.dir === "." ? [] : f.dir.split(path.sep);
let acc = "";
const parts = f.dir === '.' ? [] : f.dir.split(path.sep);
let acc = '';
for (let i = 0; i < parts.length; i++) {
acc = i === 0 ? parts[0] : acc + path.sep + parts[i];
addDirBytes(acc, f.size);
}
if (parts.length === 0) addDirBytes(".", f.size);
if (parts.length === 0) addDirBytes('.', f.size);
}
return Array.from(byDirectory.values()).sort((a, b) => b.bytes - a.bytes);
return [...byDirectory.values()].sort((a, b) => b.bytes - a.bytes);
}
function computeDepthAndLongest(allFiles) {
@@ -129,21 +131,22 @@ function computeDepthAndLongest(allFiles) {
.sort((a, b) => b.path.length - a.path.length)
.slice(0, 25)
.map((f) => ({ path: f.path, length: f.path.length, size: f.size }));
const depthDist = Array.from(depthDistribution.entries())
const depthDist = [...depthDistribution.entries()]
.sort((a, b) => a[0] - b[0])
.map(([depth, count]) => ({ depth, count }));
return { depthDist, longestPaths };
}
function computeTemporal(allFiles, nowMs) {
let oldest = null, newest = null;
let oldest = null,
newest = null;
const ageBuckets = [
{ label: "> 1 year", minDays: 365, maxDays: Infinity, count: 0, bytes: 0 },
{ label: "612 months", minDays: 180, maxDays: 365, count: 0, bytes: 0 },
{ label: "16 months", minDays: 30, maxDays: 180, count: 0, bytes: 0 },
{ label: "730 days", minDays: 7, maxDays: 30, count: 0, bytes: 0 },
{ label: "17 days", minDays: 1, maxDays: 7, count: 0, bytes: 0 },
{ label: "< 1 day", minDays: 0, maxDays: 1, count: 0, bytes: 0 },
{ label: '> 1 year', minDays: 365, maxDays: Infinity, count: 0, bytes: 0 },
{ label: '612 months', minDays: 180, maxDays: 365, count: 0, bytes: 0 },
{ label: '16 months', minDays: 30, maxDays: 180, count: 0, bytes: 0 },
{ label: '730 days', minDays: 7, maxDays: 30, count: 0, bytes: 0 },
{ label: '17 days', minDays: 1, maxDays: 7, count: 0, bytes: 0 },
{ label: '< 1 day', minDays: 0, maxDays: 1, count: 0, bytes: 0 },
];
for (const f of allFiles) {
const ageDays = Math.max(0, (nowMs - (f.mtimeMs || nowMs)) / (24 * 60 * 60 * 1000));
@@ -158,15 +161,21 @@ function computeTemporal(allFiles, nowMs) {
if (!newest || f.mtimeMs > newest.mtimeMs) newest = f;
}
return {
oldest: oldest ? { path: oldest.path, mtime: oldest.mtimeMs ? new Date(oldest.mtimeMs).toISOString() : null } : null,
newest: newest ? { path: newest.path, mtime: newest.mtimeMs ? new Date(newest.mtimeMs).toISOString() : null } : null,
oldest: oldest
? { path: oldest.path, mtime: oldest.mtimeMs ? new Date(oldest.mtimeMs).toISOString() : null }
: null,
newest: newest
? { path: newest.path, mtime: newest.mtimeMs ? new Date(newest.mtimeMs).toISOString() : null }
: null,
ageBuckets,
};
}
function computeQuality(allFiles, textFiles) {
const zeroByteFiles = allFiles.filter((f) => f.size === 0).length;
const emptyTextFiles = textFiles.filter((f) => (f.size || 0) === 0 || (f.lines || 0) === 0).length;
const emptyTextFiles = textFiles.filter(
(f) => (f.size || 0) === 0 || (f.lines || 0) === 0,
).length;
const hiddenFiles = allFiles.filter((f) => f.hidden).length;
const symlinks = allFiles.filter((f) => f.isSymlink).length;
const largeThreshold = 50 * MB;
@@ -201,18 +210,31 @@ function computeDuplicates(allFiles, textFiles) {
for (const tf of textGroup) {
try {
const src = textFiles.find((x) => x.absolutePath === tf.absolutePath);
const content = src ? src.content : "";
const h = crypto.createHash("sha1").update(content).digest("hex");
const content = src ? src.content : '';
const h = crypto.createHash('sha1').update(content).digest('hex');
const g = contentHashGroups.get(h) || [];
g.push(tf);
contentHashGroups.set(h, g);
} catch (_) { /* ignore hashing errors for duplicate detection */ }
} catch {
/* ignore hashing errors for duplicate detection */
}
}
for (const [_h, g] of contentHashGroups.entries()) {
if (g.length > 1) duplicateCandidates.push({ reason: "same-size+text-hash", size: Number(sizeKey), count: g.length, files: g.map((f) => f.path) });
if (g.length > 1)
duplicateCandidates.push({
reason: 'same-size+text-hash',
size: Number(sizeKey),
count: g.length,
files: g.map((f) => f.path),
});
}
if (otherGroup.length > 1) {
duplicateCandidates.push({ reason: "same-size", size: Number(sizeKey), count: otherGroup.length, files: otherGroup.map((f) => f.path) });
duplicateCandidates.push({
reason: 'same-size',
size: Number(sizeKey),
count: otherGroup.length,
files: otherGroup.map((f) => f.path),
});
}
}
return duplicateCandidates;
@@ -226,10 +248,12 @@ function estimateCompressibility(textFiles) {
const sampleLen = Math.min(256 * 1024, tf.size || 0);
if (sampleLen <= 0) continue;
const sample = tf.content.slice(0, sampleLen);
const gz = zlib.gzipSync(Buffer.from(sample, "utf8"));
const gz = zlib.gzipSync(Buffer.from(sample, 'utf8'));
compSampleBytes += sampleLen;
compCompressedBytes += gz.length;
} catch (_) { /* ignore compression errors during sampling */ }
} catch {
/* ignore compression errors during sampling */
}
}
return compSampleBytes > 0 ? compCompressedBytes / compSampleBytes : null;
}
@@ -245,20 +269,34 @@ function computeGitInfo(allFiles, rootDir, largeThreshold) {
};
try {
if (!rootDir) return info;
const top = cp.execFileSync("git", ["rev-parse", "--show-toplevel"], { cwd: rootDir, stdio: ["ignore", "pipe", "ignore"] }).toString().trim();
const top = cp
.execFileSync('git', ['rev-parse', '--show-toplevel'], {
cwd: rootDir,
stdio: ['ignore', 'pipe', 'ignore'],
})
.toString()
.trim();
if (!top) return info;
info.isRepo = true;
const out = cp.execFileSync("git", ["ls-files", "-z"], { cwd: rootDir, stdio: ["ignore", "pipe", "ignore"] });
const tracked = new Set(out.toString().split("\0").filter(Boolean));
let trackedBytes = 0, trackedCount = 0, untrackedBytes = 0, untrackedCount = 0;
const out = cp.execFileSync('git', ['ls-files', '-z'], {
cwd: rootDir,
stdio: ['ignore', 'pipe', 'ignore'],
});
const tracked = new Set(out.toString().split('\0').filter(Boolean));
let trackedBytes = 0,
trackedCount = 0,
untrackedBytes = 0,
untrackedCount = 0;
const lfsCandidates = [];
for (const f of allFiles) {
const isTracked = tracked.has(f.path);
if (isTracked) {
trackedCount++; trackedBytes += f.size;
trackedCount++;
trackedBytes += f.size;
if (f.size >= largeThreshold) lfsCandidates.push({ path: f.path, size: f.size });
} else {
untrackedCount++; untrackedBytes += f.size;
untrackedCount++;
untrackedBytes += f.size;
}
}
info.trackedCount = trackedCount;
@@ -266,7 +304,9 @@ function computeGitInfo(allFiles, rootDir, largeThreshold) {
info.untrackedCount = untrackedCount;
info.untrackedBytes = untrackedBytes;
info.lfsCandidates = lfsCandidates.sort((a, b) => b.size - a.size).slice(0, 50);
} catch (_) { /* git not available or not a repo, ignore */ }
} catch {
/* git not available or not a repo, ignore */
}
return info;
}
@@ -280,34 +320,58 @@ function computeLargestFiles(allFiles, totalBytes) {
size: f.size,
sizeFormatted: formatSize(f.size),
percentOfTotal: toPct(f.size, totalBytes),
ext: f.ext || "",
ext: f.ext || '',
isBinary: f.isBinary,
mtime: f.mtimeMs ? new Date(f.mtimeMs).toISOString() : null,
}));
}
function mdTable(rows, headers) {
const header = `| ${headers.join(" | ")} |`;
const sep = `| ${headers.map(() => "---").join(" | ")} |`;
const body = rows.map((r) => `| ${r.join(" | ")} |`).join("\n");
const header = `| ${headers.join(' | ')} |`;
const sep = `| ${headers.map(() => '---').join(' | ')} |`;
const body = rows.map((r) => `| ${r.join(' | ')} |`).join('\n');
return `${header}\n${sep}\n${body}`;
}
function buildMarkdownReport(largestFiles, byExtensionArr, byDirectoryArr, totalBytes) {
const toPct = (num, den) => (den === 0 ? 0 : (num / den) * 100);
const md = [];
md.push("\n### Top Largest Files (Top 50)\n");
md.push(mdTable(
largestFiles.map((f) => [f.path, f.sizeFormatted, `${f.percentOfTotal.toFixed(2)}%`, f.ext || "", f.isBinary ? "binary" : "text"]),
["Path", "Size", "% of total", "Ext", "Type"],
));
md.push("\n\n### Top Extensions by Bytes (Top 20)\n");
const topExtRows = byExtensionArr.slice(0, 20).map((e) => [e.ext, String(e.count), formatSize(e.bytes), `${toPct(e.bytes, totalBytes).toFixed(2)}%`]);
md.push(mdTable(topExtRows, ["Ext", "Count", "Bytes", "% of total"]));
md.push("\n\n### Top Directories by Bytes (Top 20)\n");
const topDirRows = byDirectoryArr.slice(0, 20).map((d) => [d.dir, String(d.count), formatSize(d.bytes), `${toPct(d.bytes, totalBytes).toFixed(2)}%`]);
md.push(mdTable(topDirRows, ["Directory", "Files", "Bytes", "% of total"]));
return md.join("\n");
md.push(
'\n### Top Largest Files (Top 50)\n',
mdTable(
largestFiles.map((f) => [
f.path,
f.sizeFormatted,
`${f.percentOfTotal.toFixed(2)}%`,
f.ext || '',
f.isBinary ? 'binary' : 'text',
]),
['Path', 'Size', '% of total', 'Ext', 'Type'],
),
'\n\n### Top Extensions by Bytes (Top 20)\n',
);
const topExtRows = byExtensionArr
.slice(0, 20)
.map((e) => [
e.ext,
String(e.count),
formatSize(e.bytes),
`${toPct(e.bytes, totalBytes).toFixed(2)}%`,
]);
md.push(
mdTable(topExtRows, ['Ext', 'Count', 'Bytes', '% of total']),
'\n\n### Top Directories by Bytes (Top 20)\n',
);
const topDirRows = byDirectoryArr
.slice(0, 20)
.map((d) => [
d.dir,
String(d.count),
formatSize(d.bytes),
`${toPct(d.bytes, totalBytes).toFixed(2)}%`,
]);
md.push(mdTable(topDirRows, ['Directory', 'Files', 'Bytes', '% of total']));
return md.join('\n');
}
module.exports = {

View File

@@ -1,4 +1,4 @@
const H = require("./stats.helpers.js");
const H = require('./stats.helpers.js');
async function calculateStatistics(aggregatedContent, xmlFileSize, rootDir) {
const { textFiles, binaryFiles, errors } = aggregatedContent;
@@ -10,8 +10,8 @@ async function calculateStatistics(aggregatedContent, xmlFileSize, rootDir) {
const allFiles = await H.enrichAllFiles(textFiles, binaryFiles);
const totalBytes = allFiles.reduce((s, f) => s + f.size, 0);
const sizes = allFiles.map((f) => f.size).sort((a, b) => a - b);
const avgSize = sizes.length ? totalBytes / sizes.length : 0;
const medianSize = sizes.length ? H.percentile(sizes, 50) : 0;
const avgSize = sizes.length > 0 ? totalBytes / sizes.length : 0;
const medianSize = sizes.length > 0 ? H.percentile(sizes, 50) : 0;
const p90 = H.percentile(sizes, 90);
const p95 = H.percentile(sizes, 95);
const p99 = H.percentile(sizes, 99);

View File

@@ -1,4 +1,3 @@
#!/usr/bin/env node
/* deno-lint-ignore-file */
/*
Automatic test matrix for project root detection.
@@ -6,65 +5,65 @@
No external options or flags required. Safe to run multiple times.
*/
const os = require("node:os");
const path = require("node:path");
const fs = require("fs-extra");
const { promisify } = require("node:util");
const { execFile } = require("node:child_process");
const process = require("node:process");
const os = require('node:os');
const path = require('node:path');
const fs = require('fs-extra');
const { promisify } = require('node:util');
const { execFile } = require('node:child_process');
const process = require('node:process');
const execFileAsync = promisify(execFile);
const { findProjectRoot } = require("./projectRoot.js");
const { findProjectRoot } = require('./projectRoot.js');
async function cmdAvailable(cmd) {
try {
await execFileAsync(cmd, ["--version"], { timeout: 500, windowsHide: true });
await execFileAsync(cmd, ['--version'], { timeout: 500, windowsHide: true });
return true;
} catch {
return false;
}
async function testSvnMarker() {
const root = await mkTmpDir("svn");
const nested = path.join(root, "proj", "code");
async function testSvnMarker() {
const root = await mkTmpDir('svn');
const nested = path.join(root, 'proj', 'code');
await fs.ensureDir(nested);
await fs.ensureDir(path.join(root, ".svn"));
await fs.ensureDir(path.join(root, '.svn'));
const found = await findProjectRoot(nested);
assertEqual(found, root, ".svn marker should be detected");
return { name: "svn-marker", ok: true };
}
assertEqual(found, root, '.svn marker should be detected');
return { name: 'svn-marker', ok: true };
}
async function testSymlinkStart() {
const root = await mkTmpDir("symlink-start");
const nested = path.join(root, "a", "b");
async function testSymlinkStart() {
const root = await mkTmpDir('symlink-start');
const nested = path.join(root, 'a', 'b');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, ".project-root"), "\n");
const tmp = await mkTmpDir("symlink-tmp");
const link = path.join(tmp, "link-to-b");
await fs.writeFile(path.join(root, '.project-root'), '\n');
const tmp = await mkTmpDir('symlink-tmp');
const link = path.join(tmp, 'link-to-b');
try {
await fs.symlink(nested, link);
} catch {
// symlink may not be permitted on some systems; skip
return { name: "symlink-start", ok: true, skipped: true };
return { name: 'symlink-start', ok: true, skipped: true };
}
const found = await findProjectRoot(link);
assertEqual(found, root, "should resolve symlinked start to real root");
return { name: "symlink-start", ok: true };
}
assertEqual(found, root, 'should resolve symlinked start to real root');
return { name: 'symlink-start', ok: true };
}
async function testSubmoduleLikeInnerGitFile() {
const root = await mkTmpDir("submodule-like");
const mid = path.join(root, "mid");
const leaf = path.join(mid, "leaf");
async function testSubmoduleLikeInnerGitFile() {
const root = await mkTmpDir('submodule-like');
const mid = path.join(root, 'mid');
const leaf = path.join(mid, 'leaf');
await fs.ensureDir(leaf);
// outer repo
await fs.ensureDir(path.join(root, ".git"));
await fs.ensureDir(path.join(root, '.git'));
// inner submodule-like .git file
await fs.writeFile(path.join(mid, ".git"), "gitdir: ../.git/modules/mid\n");
await fs.writeFile(path.join(mid, '.git'), 'gitdir: ../.git/modules/mid\n');
const found = await findProjectRoot(leaf);
assertEqual(found, root, "outermost .git should win on tie weight");
return { name: "submodule-like-gitfile", ok: true };
}
assertEqual(found, root, 'outermost .git should win on tie weight');
return { name: 'submodule-like-gitfile', ok: true };
}
}
async function mkTmpDir(name) {
@@ -75,274 +74,283 @@ async function mkTmpDir(name) {
function assertEqual(actual, expected, msg) {
if (actual !== expected) {
throw new Error(`${msg}: expected=\"${expected}\" actual=\"${actual}\"`);
throw new Error(`${msg}: expected="${expected}" actual="${actual}"`);
}
}
async function testSentinel() {
const root = await mkTmpDir("sentinel");
const nested = path.join(root, "a", "b", "c");
const root = await mkTmpDir('sentinel');
const nested = path.join(root, 'a', 'b', 'c');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, ".project-root"), "\n");
await fs.writeFile(path.join(root, '.project-root'), '\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "sentinel .project-root should win");
return { name: "sentinel", ok: true };
await assertEqual(found, root, 'sentinel .project-root should win');
return { name: 'sentinel', ok: true };
}
async function testOtherSentinels() {
const root = await mkTmpDir("other-sentinels");
const nested = path.join(root, "x", "y");
const root = await mkTmpDir('other-sentinels');
const nested = path.join(root, 'x', 'y');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, ".workspace-root"), "\n");
await fs.writeFile(path.join(root, '.workspace-root'), '\n');
const found1 = await findProjectRoot(nested);
assertEqual(found1, root, "sentinel .workspace-root should win");
assertEqual(found1, root, 'sentinel .workspace-root should win');
await fs.remove(path.join(root, ".workspace-root"));
await fs.writeFile(path.join(root, ".repo-root"), "\n");
await fs.remove(path.join(root, '.workspace-root'));
await fs.writeFile(path.join(root, '.repo-root'), '\n');
const found2 = await findProjectRoot(nested);
assertEqual(found2, root, "sentinel .repo-root should win");
return { name: "other-sentinels", ok: true };
assertEqual(found2, root, 'sentinel .repo-root should win');
return { name: 'other-sentinels', ok: true };
}
async function testGitCliAndMarker() {
const hasGit = await cmdAvailable("git");
if (!hasGit) return { name: "git-cli", ok: true, skipped: true };
const hasGit = await cmdAvailable('git');
if (!hasGit) return { name: 'git-cli', ok: true, skipped: true };
const root = await mkTmpDir("git");
const nested = path.join(root, "pkg", "src");
const root = await mkTmpDir('git');
const nested = path.join(root, 'pkg', 'src');
await fs.ensureDir(nested);
await execFileAsync("git", ["init"], { cwd: root, timeout: 2000 });
await execFileAsync('git', ['init'], { cwd: root, timeout: 2000 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, "git toplevel should be detected");
return { name: "git-cli", ok: true };
await assertEqual(found, root, 'git toplevel should be detected');
return { name: 'git-cli', ok: true };
}
async function testHgMarkerOrCli() {
// Prefer simple marker test to avoid requiring Mercurial install
const root = await mkTmpDir("hg");
const nested = path.join(root, "lib");
const root = await mkTmpDir('hg');
const nested = path.join(root, 'lib');
await fs.ensureDir(nested);
await fs.ensureDir(path.join(root, ".hg"));
await fs.ensureDir(path.join(root, '.hg'));
const found = await findProjectRoot(nested);
await assertEqual(found, root, ".hg marker should be detected");
return { name: "hg-marker", ok: true };
await assertEqual(found, root, '.hg marker should be detected');
return { name: 'hg-marker', ok: true };
}
async function testWorkspacePnpm() {
const root = await mkTmpDir("pnpm-workspace");
const pkgA = path.join(root, "packages", "a");
const root = await mkTmpDir('pnpm-workspace');
const pkgA = path.join(root, 'packages', 'a');
await fs.ensureDir(pkgA);
await fs.writeFile(path.join(root, "pnpm-workspace.yaml"), "packages:\n - packages/*\n");
await fs.writeFile(path.join(root, 'pnpm-workspace.yaml'), 'packages:\n - packages/*\n');
const found = await findProjectRoot(pkgA);
await assertEqual(found, root, "pnpm-workspace.yaml should be detected");
return { name: "pnpm-workspace", ok: true };
await assertEqual(found, root, 'pnpm-workspace.yaml should be detected');
return { name: 'pnpm-workspace', ok: true };
}
async function testPackageJsonWorkspaces() {
const root = await mkTmpDir("package-workspaces");
const pkgA = path.join(root, "packages", "a");
const root = await mkTmpDir('package-workspaces');
const pkgA = path.join(root, 'packages', 'a');
await fs.ensureDir(pkgA);
await fs.writeJson(path.join(root, "package.json"), { private: true, workspaces: ["packages/*"] }, { spaces: 2 });
await fs.writeJson(
path.join(root, 'package.json'),
{ private: true, workspaces: ['packages/*'] },
{ spaces: 2 },
);
const found = await findProjectRoot(pkgA);
await assertEqual(found, root, "package.json workspaces should be detected");
return { name: "package.json-workspaces", ok: true };
await assertEqual(found, root, 'package.json workspaces should be detected');
return { name: 'package.json-workspaces', ok: true };
}
async function testLockfiles() {
const root = await mkTmpDir("lockfiles");
const nested = path.join(root, "src");
const root = await mkTmpDir('lockfiles');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "yarn.lock"), "\n");
await fs.writeFile(path.join(root, 'yarn.lock'), '\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "yarn.lock should be detected");
return { name: "lockfiles", ok: true };
await assertEqual(found, root, 'yarn.lock should be detected');
return { name: 'lockfiles', ok: true };
}
async function testLanguageConfigs() {
const root = await mkTmpDir("lang-configs");
const nested = path.join(root, "x", "y");
const root = await mkTmpDir('lang-configs');
const nested = path.join(root, 'x', 'y');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "pyproject.toml"), "[tool.poetry]\nname='tmp'\n");
await fs.writeFile(path.join(root, 'pyproject.toml'), "[tool.poetry]\nname='tmp'\n");
const found = await findProjectRoot(nested);
await assertEqual(found, root, "pyproject.toml should be detected");
return { name: "language-configs", ok: true };
await assertEqual(found, root, 'pyproject.toml should be detected');
return { name: 'language-configs', ok: true };
}
async function testPreferOuterOnTie() {
const root = await mkTmpDir("tie");
const mid = path.join(root, "mid");
const leaf = path.join(mid, "leaf");
const root = await mkTmpDir('tie');
const mid = path.join(root, 'mid');
const leaf = path.join(mid, 'leaf');
await fs.ensureDir(leaf);
// same weight marker at two levels
await fs.writeFile(path.join(root, "requirements.txt"), "\n");
await fs.writeFile(path.join(mid, "requirements.txt"), "\n");
await fs.writeFile(path.join(root, 'requirements.txt'), '\n');
await fs.writeFile(path.join(mid, 'requirements.txt'), '\n');
const found = await findProjectRoot(leaf);
await assertEqual(found, root, "outermost directory should win on equal weight");
return { name: "prefer-outermost-tie", ok: true };
await assertEqual(found, root, 'outermost directory should win on equal weight');
return { name: 'prefer-outermost-tie', ok: true };
}
// Additional coverage: Bazel, Nx/Turbo/Rush, Go workspaces, Deno, Java/Scala, PHP, Rust, Nix, Changesets, env markers,
// and priority interaction between package.json and lockfiles.
async function testBazelWorkspace() {
const root = await mkTmpDir("bazel");
const nested = path.join(root, "apps", "svc");
const root = await mkTmpDir('bazel');
const nested = path.join(root, 'apps', 'svc');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "WORKSPACE"), "workspace(name=\"tmp\")\n");
await fs.writeFile(path.join(root, 'WORKSPACE'), 'workspace(name="tmp")\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "Bazel WORKSPACE should be detected");
return { name: "bazel-workspace", ok: true };
await assertEqual(found, root, 'Bazel WORKSPACE should be detected');
return { name: 'bazel-workspace', ok: true };
}
async function testNx() {
const root = await mkTmpDir("nx");
const nested = path.join(root, "apps", "web");
const root = await mkTmpDir('nx');
const nested = path.join(root, 'apps', 'web');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, "nx.json"), { npmScope: "tmp" }, { spaces: 2 });
await fs.writeJson(path.join(root, 'nx.json'), { npmScope: 'tmp' }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, "nx.json should be detected");
return { name: "nx", ok: true };
await assertEqual(found, root, 'nx.json should be detected');
return { name: 'nx', ok: true };
}
async function testTurbo() {
const root = await mkTmpDir("turbo");
const nested = path.join(root, "packages", "x");
const root = await mkTmpDir('turbo');
const nested = path.join(root, 'packages', 'x');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, "turbo.json"), { pipeline: {} }, { spaces: 2 });
await fs.writeJson(path.join(root, 'turbo.json'), { pipeline: {} }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, "turbo.json should be detected");
return { name: "turbo", ok: true };
await assertEqual(found, root, 'turbo.json should be detected');
return { name: 'turbo', ok: true };
}
async function testRush() {
const root = await mkTmpDir("rush");
const nested = path.join(root, "apps", "a");
const root = await mkTmpDir('rush');
const nested = path.join(root, 'apps', 'a');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, "rush.json"), { projectFolderMinDepth: 1 }, { spaces: 2 });
await fs.writeJson(path.join(root, 'rush.json'), { projectFolderMinDepth: 1 }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, "rush.json should be detected");
return { name: "rush", ok: true };
await assertEqual(found, root, 'rush.json should be detected');
return { name: 'rush', ok: true };
}
async function testGoWorkAndMod() {
const root = await mkTmpDir("gowork");
const mod = path.join(root, "modA");
const nested = path.join(mod, "pkg");
const root = await mkTmpDir('gowork');
const mod = path.join(root, 'modA');
const nested = path.join(mod, 'pkg');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "go.work"), "go 1.22\nuse ./modA\n");
await fs.writeFile(path.join(mod, "go.mod"), "module example.com/a\ngo 1.22\n");
await fs.writeFile(path.join(root, 'go.work'), 'go 1.22\nuse ./modA\n');
await fs.writeFile(path.join(mod, 'go.mod'), 'module example.com/a\ngo 1.22\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "go.work should define the workspace root");
return { name: "go-work", ok: true };
await assertEqual(found, root, 'go.work should define the workspace root');
return { name: 'go-work', ok: true };
}
async function testDenoJson() {
const root = await mkTmpDir("deno");
const nested = path.join(root, "src");
const root = await mkTmpDir('deno');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, "deno.json"), { tasks: {} }, { spaces: 2 });
await fs.writeJson(path.join(root, 'deno.json'), { tasks: {} }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, "deno.json should be detected");
return { name: "deno-json", ok: true };
await assertEqual(found, root, 'deno.json should be detected');
return { name: 'deno-json', ok: true };
}
async function testGradleSettings() {
const root = await mkTmpDir("gradle");
const nested = path.join(root, "app");
const root = await mkTmpDir('gradle');
const nested = path.join(root, 'app');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "settings.gradle"), "rootProject.name='tmp'\n");
await fs.writeFile(path.join(root, 'settings.gradle'), "rootProject.name='tmp'\n");
const found = await findProjectRoot(nested);
await assertEqual(found, root, "settings.gradle should be detected");
return { name: "gradle-settings", ok: true };
await assertEqual(found, root, 'settings.gradle should be detected');
return { name: 'gradle-settings', ok: true };
}
async function testMavenPom() {
const root = await mkTmpDir("maven");
const nested = path.join(root, "module");
const root = await mkTmpDir('maven');
const nested = path.join(root, 'module');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "pom.xml"), "<project></project>\n");
await fs.writeFile(path.join(root, 'pom.xml'), '<project></project>\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "pom.xml should be detected");
return { name: "maven-pom", ok: true };
await assertEqual(found, root, 'pom.xml should be detected');
return { name: 'maven-pom', ok: true };
}
async function testSbtBuild() {
const root = await mkTmpDir("sbt");
const nested = path.join(root, "sub");
const root = await mkTmpDir('sbt');
const nested = path.join(root, 'sub');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "build.sbt"), "name := \"tmp\"\n");
await fs.writeFile(path.join(root, 'build.sbt'), 'name := "tmp"\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "build.sbt should be detected");
return { name: "sbt-build", ok: true };
await assertEqual(found, root, 'build.sbt should be detected');
return { name: 'sbt-build', ok: true };
}
async function testComposer() {
const root = await mkTmpDir("composer");
const nested = path.join(root, "src");
const root = await mkTmpDir('composer');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, "composer.json"), { name: "tmp/pkg" }, { spaces: 2 });
await fs.writeFile(path.join(root, "composer.lock"), "{}\n");
await fs.writeJson(path.join(root, 'composer.json'), { name: 'tmp/pkg' }, { spaces: 2 });
await fs.writeFile(path.join(root, 'composer.lock'), '{}\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "composer.{json,lock} should be detected");
return { name: "composer", ok: true };
await assertEqual(found, root, 'composer.{json,lock} should be detected');
return { name: 'composer', ok: true };
}
async function testCargo() {
const root = await mkTmpDir("cargo");
const nested = path.join(root, "src");
const root = await mkTmpDir('cargo');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "Cargo.toml"), "[package]\nname='tmp'\nversion='0.0.0'\n");
await fs.writeFile(path.join(root, 'Cargo.toml'), "[package]\nname='tmp'\nversion='0.0.0'\n");
const found = await findProjectRoot(nested);
await assertEqual(found, root, "Cargo.toml should be detected");
return { name: "cargo", ok: true };
await assertEqual(found, root, 'Cargo.toml should be detected');
return { name: 'cargo', ok: true };
}
async function testNixFlake() {
const root = await mkTmpDir("nix");
const nested = path.join(root, "work");
const root = await mkTmpDir('nix');
const nested = path.join(root, 'work');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "flake.nix"), "{ }\n");
await fs.writeFile(path.join(root, 'flake.nix'), '{ }\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, "flake.nix should be detected");
return { name: "nix-flake", ok: true };
await assertEqual(found, root, 'flake.nix should be detected');
return { name: 'nix-flake', ok: true };
}
async function testChangesetConfig() {
const root = await mkTmpDir("changeset");
const nested = path.join(root, "pkg");
const root = await mkTmpDir('changeset');
const nested = path.join(root, 'pkg');
await fs.ensureDir(nested);
await fs.ensureDir(path.join(root, ".changeset"));
await fs.writeJson(path.join(root, ".changeset", "config.json"), { $schema: "https://unpkg.com/@changesets/config@2.3.1/schema.json" }, { spaces: 2 });
await fs.ensureDir(path.join(root, '.changeset'));
await fs.writeJson(
path.join(root, '.changeset', 'config.json'),
{ $schema: 'https://unpkg.com/@changesets/config@2.3.1/schema.json' },
{ spaces: 2 },
);
const found = await findProjectRoot(nested);
await assertEqual(found, root, ".changeset/config.json should be detected");
return { name: "changesets", ok: true };
await assertEqual(found, root, '.changeset/config.json should be detected');
return { name: 'changesets', ok: true };
}
async function testEnvCustomMarker() {
const root = await mkTmpDir("env-marker");
const nested = path.join(root, "dir");
const root = await mkTmpDir('env-marker');
const nested = path.join(root, 'dir');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, "MY_ROOT"), "\n");
await fs.writeFile(path.join(root, 'MY_ROOT'), '\n');
const prev = process.env.PROJECT_ROOT_MARKERS;
process.env.PROJECT_ROOT_MARKERS = "MY_ROOT";
process.env.PROJECT_ROOT_MARKERS = 'MY_ROOT';
try {
const found = await findProjectRoot(nested);
await assertEqual(found, root, "custom env marker should be honored");
await assertEqual(found, root, 'custom env marker should be honored');
} finally {
if (prev === undefined) delete process.env.PROJECT_ROOT_MARKERS; else process.env.PROJECT_ROOT_MARKERS = prev;
if (prev === undefined) delete process.env.PROJECT_ROOT_MARKERS;
else process.env.PROJECT_ROOT_MARKERS = prev;
}
return { name: "env-custom-marker", ok: true };
return { name: 'env-custom-marker', ok: true };
}
async function testPackageLowPriorityVsLock() {
const root = await mkTmpDir("pkg-vs-lock");
const nested = path.join(root, "nested");
await fs.ensureDir(path.join(nested, "deep"));
await fs.writeJson(path.join(nested, "package.json"), { name: "nested" }, { spaces: 2 });
await fs.writeFile(path.join(root, "yarn.lock"), "\n");
const found = await findProjectRoot(path.join(nested, "deep"));
await assertEqual(found, root, "lockfile at root should outrank nested package.json");
return { name: "package-vs-lock-priority", ok: true };
const root = await mkTmpDir('pkg-vs-lock');
const nested = path.join(root, 'nested');
await fs.ensureDir(path.join(nested, 'deep'));
await fs.writeJson(path.join(nested, 'package.json'), { name: 'nested' }, { spaces: 2 });
await fs.writeFile(path.join(root, 'yarn.lock'), '\n');
const found = await findProjectRoot(path.join(nested, 'deep'));
await assertEqual(found, root, 'lockfile at root should outrank nested package.json');
return { name: 'package-vs-lock-priority', ok: true };
}
async function run() {
@@ -381,25 +389,25 @@ async function run() {
try {
const r = await t();
results.push({ ...r, ok: true });
console.log(`${r.name}${r.skipped ? " (skipped)" : ""}`);
} catch (err) {
console.error(`${t.name}:`, err && err.message ? err.message : err);
results.push({ name: t.name, ok: false, error: String(err) });
console.log(`${r.name}${r.skipped ? ' (skipped)' : ''}`);
} catch (error) {
console.error(`${t.name}:`, error && error.message ? error.message : error);
results.push({ name: t.name, ok: false, error: String(error) });
}
}
const failed = results.filter((r) => !r.ok);
console.log("\nSummary:");
console.log('\nSummary:');
for (const r of results) {
console.log(`- ${r.name}: ${r.ok ? "ok" : "FAIL"}${r.skipped ? " (skipped)" : ""}`);
console.log(`- ${r.name}: ${r.ok ? 'ok' : 'FAIL'}${r.skipped ? ' (skipped)' : ''}`);
}
if (failed.length) {
if (failed.length > 0) {
process.exitCode = 1;
}
}
run().catch((e) => {
console.error("Fatal error:", e);
run().catch((error) => {
console.error('Fatal error:', error);
process.exit(1);
});

View File

@@ -1,49 +1,44 @@
const fs = require("fs-extra");
const fs = require('fs-extra');
function escapeXml(str) {
if (typeof str !== "string") {
return String(str);
function escapeXml(string_) {
if (typeof string_ !== 'string') {
return String(string_);
}
return str
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/'/g, "&apos;");
return string_.replaceAll('&', '&amp;').replaceAll('<', '&lt;').replaceAll("'", '&apos;');
}
function indentFileContent(content) {
if (typeof content !== "string") {
if (typeof content !== 'string') {
return String(content);
}
return content.split("\n").map((line) => ` ${line}`);
return content.split('\n').map((line) => ` ${line}`);
}
function generateXMLOutput(aggregatedContent, outputPath) {
const { textFiles } = aggregatedContent;
const writeStream = fs.createWriteStream(outputPath, { encoding: "utf8" });
const writeStream = fs.createWriteStream(outputPath, { encoding: 'utf8' });
return new Promise((resolve, reject) => {
writeStream.on("error", reject);
writeStream.on("finish", resolve);
writeStream.on('error', reject);
writeStream.on('finish', resolve);
writeStream.write('<?xml version="1.0" encoding="UTF-8"?>\n');
writeStream.write("<files>\n");
writeStream.write('<files>\n');
// Sort files by path for deterministic order
const filesSorted = [...textFiles].sort((a, b) =>
a.path.localeCompare(b.path)
);
const filesSorted = [...textFiles].sort((a, b) => a.path.localeCompare(b.path));
let index = 0;
const writeNext = () => {
if (index >= filesSorted.length) {
writeStream.write("</files>\n");
writeStream.write('</files>\n');
writeStream.end();
return;
}
const file = filesSorted[index++];
const p = escapeXml(file.path);
const content = typeof file.content === "string" ? file.content : "";
const content = typeof file.content === 'string' ? file.content : '';
if (content.length === 0) {
writeStream.write(`\t<file path='${p}'/>\n`);
@@ -51,27 +46,34 @@ function generateXMLOutput(aggregatedContent, outputPath) {
return;
}
const needsCdata = content.includes("<") || content.includes("&") ||
content.includes("]]>");
const needsCdata = content.includes('<') || content.includes('&') || content.includes(']]>');
if (needsCdata) {
// Open tag and CDATA on their own line with tab indent; content lines indented with two tabs
writeStream.write(`\t<file path='${p}'><![CDATA[\n`);
// Safely split any occurrences of "]]>" inside content, trim trailing newlines, indent each line with two tabs
const safe = content.replace(/]]>/g, "]]]]><![CDATA[>");
const trimmed = safe.replace(/[\r\n]+$/, "");
const indented = trimmed.length > 0
? trimmed.split("\n").map((line) => `\t\t${line}`).join("\n")
: "";
const safe = content.replaceAll(']]>', ']]]]><![CDATA[>');
const trimmed = safe.replace(/[\r\n]+$/, '');
const indented =
trimmed.length > 0
? trimmed
.split('\n')
.map((line) => `\t\t${line}`)
.join('\n')
: '';
writeStream.write(indented);
// Close CDATA and attach closing tag directly after the last content line
writeStream.write("]]></file>\n");
writeStream.write(']]></file>\n');
} else {
// Write opening tag then newline; indent content with two tabs; attach closing tag directly after last content char
writeStream.write(`\t<file path='${p}'>\n`);
const trimmed = content.replace(/[\r\n]+$/, "");
const indented = trimmed.length > 0
? trimmed.split("\n").map((line) => `\t\t${line}`).join("\n")
: "";
const trimmed = content.replace(/[\r\n]+$/, '');
const indented =
trimmed.length > 0
? trimmed
.split('\n')
.map((line) => `\t\t${line}`)
.join('\n')
: '';
writeStream.write(indented);
writeStream.write(`</file>\n`);
}

View File

@@ -1,13 +1,13 @@
#!/usr/bin/env node
const { program } = require('commander');
const path = require('path');
const fs = require('fs').promises;
const path = require('node:path');
const fs = require('node:fs').promises;
const yaml = require('js-yaml');
const chalk = require('chalk').default || require('chalk');
const inquirer = require('inquirer').default || require('inquirer');
const semver = require('semver');
const https = require('https');
const https = require('node:https');
// Handle both execution contexts (from root via npx or from installer directory)
let version;
@@ -18,18 +18,20 @@ try {
version = require('../package.json').version;
packageName = require('../package.json').name;
installer = require('../lib/installer');
} catch (e) {
} catch (error) {
// Fall back to root context (when run via npx from GitHub)
console.log(`Installer context not found (${e.message}), trying root context...`);
console.log(`Installer context not found (${error.message}), trying root context...`);
try {
version = require('../../../package.json').version;
installer = require('../../../tools/installer/lib/installer');
} catch (e2) {
console.error('Error: Could not load required modules. Please ensure you are running from the correct directory.');
} catch (error) {
console.error(
'Error: Could not load required modules. Please ensure you are running from the correct directory.',
);
console.error('Debug info:', {
__dirname,
cwd: process.cwd(),
error: e2.message
error: error.message,
});
process.exit(1);
}
@@ -45,8 +47,14 @@ program
.option('-f, --full', 'Install complete BMad Method')
.option('-x, --expansion-only', 'Install only expansion packs (no bmad-core)')
.option('-d, --directory <path>', 'Installation directory')
.option('-i, --ide <ide...>', 'Configure for specific IDE(s) - can specify multiple (cursor, claude-code, windsurf, trae, roo, kilo, cline, gemini, qwen-code, github-copilot, crush, other)')
.option('-e, --expansion-packs <packs...>', 'Install specific expansion packs (can specify multiple)')
.option(
'-i, --ide <ide...>',
'Configure for specific IDE(s) - can specify multiple (cursor, claude-code, windsurf, trae, roo, kilo, cline, gemini, qwen-code, github-copilot, other)',
)
.option(
'-e, --expansion-packs <packs...>',
'Install specific expansion packs (can specify multiple)',
)
.action(async (options) => {
try {
if (!options.full && !options.expansionOnly) {
@@ -64,8 +72,8 @@ program
const config = {
installType,
directory: options.directory || '.',
ides: (options.ide || []).filter(ide => ide !== 'other'),
expansionPacks: options.expansionPacks || []
ides: (options.ide || []).filter((ide) => ide !== 'other'),
expansionPacks: options.expansionPacks || [],
};
await installer.install(config);
process.exit(0);
@@ -98,7 +106,7 @@ program
console.log('Checking for updates...');
// Make HTTP request to npm registry for latest version info
const req = https.get(`https://registry.npmjs.org/${packageName}/latest`, res => {
const req = https.get(`https://registry.npmjs.org/${packageName}/latest`, (res) => {
// Check for HTTP errors (non-200 status codes)
if (res.statusCode !== 200) {
console.error(chalk.red(`Update check failed: Received status code ${res.statusCode}`));
@@ -107,7 +115,7 @@ program
// Accumulate response data chunks
let data = '';
res.on('data', chunk => data += chunk);
res.on('data', (chunk) => (data += chunk));
// Process complete response
res.on('end', () => {
@@ -117,7 +125,9 @@ program
// Compare versions using semver
if (semver.gt(latest, version)) {
console.log(chalk.bold.blue(`⚠️ ${packageName} update available: ${version}${latest}`));
console.log(
chalk.bold.blue(`⚠️ ${packageName} update available: ${version}${latest}`),
);
console.log(chalk.bold.blue('\nInstall latest by running:'));
console.log(chalk.bold.magenta(` npm install ${packageName}@latest`));
console.log(chalk.dim(' or'));
@@ -133,12 +143,12 @@ program
});
// Handle network/connection errors
req.on('error', error => {
req.on('error', (error) => {
console.error(chalk.red('Update check failed:'), error.message);
});
// Set 30 second timeout to prevent hanging
req.setTimeout(30000, () => {
req.setTimeout(30_000, () => {
req.destroy();
console.error(chalk.red('Update check timed out'));
});
@@ -183,16 +193,17 @@ program
});
async function promptInstallation() {
// Display ASCII logo
console.log(chalk.bold.cyan(`
console.log(
chalk.bold.cyan(`
██████╗ ███╗ ███╗ █████╗ ██████╗ ███╗ ███╗███████╗████████╗██╗ ██╗ ██████╗ ██████╗
██╔══██╗████╗ ████║██╔══██╗██╔══██╗ ████╗ ████║██╔════╝╚══██╔══╝██║ ██║██╔═══██╗██╔══██╗
██████╔╝██╔████╔██║███████║██║ ██║█████╗██╔████╔██║█████╗ ██║ ███████║██║ ██║██║ ██║
██╔══██╗██║╚██╔╝██║██╔══██║██║ ██║╚════╝██║╚██╔╝██║██╔══╝ ██║ ██╔══██║██║ ██║██║ ██║
██████╔╝██║ ╚═╝ ██║██║ ██║██████╔╝ ██║ ╚═╝ ██║███████╗ ██║ ██║ ██║╚██████╔╝██████╔╝
╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝
`));
`),
);
console.log(chalk.bold.magenta('🚀 Universal AI Agent Framework for Any Domain'));
console.log(chalk.bold.blue(`✨ Installer v${version}\n`));
@@ -210,8 +221,8 @@ async function promptInstallation() {
return 'Please enter a valid project path';
}
return true;
}
}
},
},
]);
answers.directory = directory;
@@ -238,7 +249,8 @@ async function promptInstallation() {
if (state.type === 'v4_existing') {
const currentVersion = state.manifest?.version || 'unknown';
const newVersion = version; // Always use package.json version
const versionInfo = currentVersion === newVersion
const versionInfo =
currentVersion === newVersion
? `(v${currentVersion} - reinstall)`
: `(v${currentVersion} → v${newVersion})`;
bmadOptionText = `Update ${coreShortTitle} ${versionInfo} .bmad-core`;
@@ -249,7 +261,7 @@ async function promptInstallation() {
choices.push({
name: bmadOptionText,
value: 'bmad-core',
checked: true
checked: true,
});
// Add expansion pack options
@@ -260,7 +272,8 @@ async function promptInstallation() {
if (existing) {
const currentVersion = existing.manifest?.version || 'unknown';
const newVersion = pack.version;
const versionInfo = currentVersion === newVersion
const versionInfo =
currentVersion === newVersion
? `(v${currentVersion} - reinstall)`
: `(v${currentVersion} → v${newVersion})`;
packOptionText = `Update ${pack.shortTitle} ${versionInfo} .${pack.id}`;
@@ -271,7 +284,7 @@ async function promptInstallation() {
choices.push({
name: packOptionText,
value: pack.id,
checked: false
checked: false,
});
}
@@ -287,13 +300,13 @@ async function promptInstallation() {
return 'Please select at least one item to install';
}
return true;
}
}
},
},
]);
// Process selections
answers.installType = selectedItems.includes('bmad-core') ? 'full' : 'expansion-only';
answers.expansionPacks = selectedItems.filter(item => item !== 'bmad-core');
answers.expansionPacks = selectedItems.filter((item) => item !== 'bmad-core');
// Ask sharding questions if installing BMad core
if (selectedItems.includes('bmad-core')) {
@@ -306,8 +319,8 @@ async function promptInstallation() {
type: 'confirm',
name: 'prdSharded',
message: 'Will the PRD (Product Requirements Document) be sharded into multiple files?',
default: true
}
default: true,
},
]);
answers.prdSharded = prdSharded;
@@ -317,18 +330,30 @@ async function promptInstallation() {
type: 'confirm',
name: 'architectureSharded',
message: 'Will the architecture documentation be sharded into multiple files?',
default: true
}
default: true,
},
]);
answers.architectureSharded = architectureSharded;
// Show warning if architecture sharding is disabled
if (!architectureSharded) {
console.log(chalk.yellow.bold('\n⚠ IMPORTANT: Architecture Sharding Disabled'));
console.log(chalk.yellow('With architecture sharding disabled, you should still create the files listed'));
console.log(chalk.yellow('in devLoadAlwaysFiles (like coding-standards.md, tech-stack.md, source-tree.md)'));
console.log(
chalk.yellow(
'With architecture sharding disabled, you should still create the files listed',
),
);
console.log(
chalk.yellow(
'in devLoadAlwaysFiles (like coding-standards.md, tech-stack.md, source-tree.md)',
),
);
console.log(chalk.yellow('as these are used by the dev agent at runtime.'));
console.log(chalk.yellow('\nAlternatively, you can remove these files from the devLoadAlwaysFiles list'));
console.log(
chalk.yellow(
'\nAlternatively, you can remove these files from the devLoadAlwaysFiles list',
),
);
console.log(chalk.yellow('in your core-config.yaml after installation.'));
const { acknowledge } = await inquirer.prompt([
@@ -336,8 +361,8 @@ async function promptInstallation() {
type: 'confirm',
name: 'acknowledge',
message: 'Do you acknowledge this requirement and want to proceed?',
default: false
}
default: false,
},
]);
if (!acknowledge) {
@@ -353,7 +378,11 @@ async function promptInstallation() {
while (!ideSelectionComplete) {
console.log(chalk.cyan('\n🛠 IDE Configuration'));
console.log(chalk.bold.yellow.bgRed(' ⚠️ IMPORTANT: This is a MULTISELECT! Use SPACEBAR to toggle each IDE! '));
console.log(
chalk.bold.yellow.bgRed(
' ⚠️ IMPORTANT: This is a MULTISELECT! Use SPACEBAR to toggle each IDE! ',
),
);
console.log(chalk.bold.magenta('🔸 Use arrow keys to navigate'));
console.log(chalk.bold.magenta('🔸 Use SPACEBAR to select/deselect IDEs'));
console.log(chalk.bold.magenta('🔸 Press ENTER when finished selecting\n'));
@@ -362,7 +391,8 @@ async function promptInstallation() {
{
type: 'checkbox',
name: 'ides',
message: 'Which IDE(s) do you want to configure? (Select with SPACEBAR, confirm with ENTER):',
message:
'Which IDE(s) do you want to configure? (Select with SPACEBAR, confirm with ENTER):',
choices: [
{ name: 'Cursor', value: 'cursor' },
{ name: 'Claude Code', value: 'claude-code' },
@@ -374,9 +404,9 @@ async function promptInstallation() {
{ name: 'Gemini CLI', value: 'gemini' },
{ name: 'Qwen Code', value: 'qwen-code' },
{ name: 'Crush', value: 'crush' },
{ name: 'Github Copilot', value: 'github-copilot' }
]
}
{ name: 'Github Copilot', value: 'github-copilot' },
],
},
]);
ides = ideResponse.ides;
@@ -387,13 +417,19 @@ async function promptInstallation() {
{
type: 'confirm',
name: 'confirmNoIde',
message: chalk.red('⚠️ You have NOT selected any IDEs. This means NO IDE integration will be set up. Is this correct?'),
default: false
}
message: chalk.red(
'⚠️ You have NOT selected any IDEs. This means NO IDE integration will be set up. Is this correct?',
),
default: false,
},
]);
if (!confirmNoIde) {
console.log(chalk.bold.red('\n🔄 Returning to IDE selection. Remember to use SPACEBAR to select IDEs!\n'));
console.log(
chalk.bold.red(
'\n🔄 Returning to IDE selection. Remember to use SPACEBAR to select IDEs!\n',
),
);
continue; // Go back to IDE selection only
}
}
@@ -407,7 +443,9 @@ async function promptInstallation() {
// Configure GitHub Copilot immediately if selected
if (ides.includes('github-copilot')) {
console.log(chalk.cyan('\n🔧 GitHub Copilot Configuration'));
console.log(chalk.dim('BMad works best with specific VS Code settings for optimal agent experience.\n'));
console.log(
chalk.dim('BMad works best with specific VS Code settings for optimal agent experience.\n'),
);
const { configChoice } = await inquirer.prompt([
{
@@ -417,19 +455,19 @@ async function promptInstallation() {
choices: [
{
name: 'Use recommended defaults (fastest setup)',
value: 'defaults'
value: 'defaults',
},
{
name: 'Configure each setting manually (customize to your preferences)',
value: 'manual'
value: 'manual',
},
{
name: 'Skip settings configuration (I\'ll configure manually later)',
value: 'skip'
}
name: "Skip settings configuration (I'll configure manually later)",
value: 'skip',
},
],
default: 'defaults'
}
default: 'defaults',
},
]);
answers.githubCopilotConfig = { configChoice };
@@ -440,14 +478,17 @@ async function promptInstallation() {
{
type: 'confirm',
name: 'includeWebBundles',
message: 'Would you like to include pre-built web bundles? (standalone files for ChatGPT, Claude, Gemini)',
default: false
}
message:
'Would you like to include pre-built web bundles? (standalone files for ChatGPT, Claude, Gemini)',
default: false,
},
]);
if (includeWebBundles) {
console.log(chalk.cyan('\n📦 Web bundles are standalone files perfect for web AI platforms.'));
console.log(chalk.dim(' You can choose different teams/agents than your IDE installation.\n'));
console.log(
chalk.dim(' You can choose different teams/agents than your IDE installation.\n'),
);
const { webBundleType } = await inquirer.prompt([
{
@@ -457,22 +498,22 @@ async function promptInstallation() {
choices: [
{
name: 'All available bundles (agents, teams, expansion packs)',
value: 'all'
value: 'all',
},
{
name: 'Specific teams only',
value: 'teams'
value: 'teams',
},
{
name: 'Individual agents only',
value: 'agents'
value: 'agents',
},
{
name: 'Custom selection',
value: 'custom'
}
]
}
value: 'custom',
},
],
},
]);
answers.webBundleType = webBundleType;
@@ -485,18 +526,18 @@ async function promptInstallation() {
type: 'checkbox',
name: 'selectedTeams',
message: 'Select team bundles to include:',
choices: teams.map(t => ({
choices: teams.map((t) => ({
name: `${t.icon || '📋'} ${t.name}: ${t.description}`,
value: t.id,
checked: webBundleType === 'teams' // Check all if teams-only mode
checked: webBundleType === 'teams', // Check all if teams-only mode
})),
validate: (answer) => {
if (answer.length < 1) {
if (answer.length === 0) {
return 'You must select at least one team.';
}
return true;
}
}
},
},
]);
answers.selectedWebBundleTeams = selectedTeams;
}
@@ -508,8 +549,8 @@ async function promptInstallation() {
type: 'confirm',
name: 'includeIndividualAgents',
message: 'Also include individual agent bundles?',
default: true
}
default: true,
},
]);
answers.includeIndividualAgents = includeIndividualAgents;
}
@@ -525,8 +566,8 @@ async function promptInstallation() {
return 'Please enter a valid directory path';
}
return true;
}
}
},
},
]);
answers.webBundlesDirectory = webBundlesDirectory;
}
@@ -539,6 +580,6 @@ async function promptInstallation() {
program.parse(process.argv);
// Show help if no command provided
if (!process.argv.slice(2).length) {
if (process.argv.slice(2).length === 0) {
program.outputHelp();
}

View File

@@ -40,12 +40,12 @@ ide-configurations:
# 3. Crush will switch to that agent's persona / task
windsurf:
name: Windsurf
rule-dir: .windsurf/rules/
rule-dir: .windsurf/workflows/
format: multi-file
command-suffix: .md
instructions: |
# To use BMad agents in Windsurf:
# 1. Type @agent-name (e.g., "@dev", "@pm")
# 1. Type /agent-name (e.g., "/dev", "/pm")
# 2. Windsurf will adopt that agent's persona
trae:
name: Trae

View File

@@ -1,5 +1,5 @@
const fs = require('fs-extra');
const path = require('path');
const path = require('node:path');
const yaml = require('js-yaml');
const { extractYamlFromAgent } = require('../../lib/yaml-utils');
@@ -51,7 +51,7 @@ class ConfigLoader {
id: agentId,
name: agentConfig.title || agentConfig.name || agentId,
file: `bmad-core/agents/${entry.name}`,
description: agentConfig.whenToUse || 'No description available'
description: agentConfig.whenToUse || 'No description available',
});
}
} catch (error) {
@@ -90,21 +90,25 @@ class ConfigLoader {
expansionPacks.push({
id: entry.name,
name: config.name || entry.name,
description: config['short-title'] || config.description || 'No description available',
fullDescription: config.description || config['short-title'] || 'No description available',
description:
config['short-title'] || config.description || 'No description available',
fullDescription:
config.description || config['short-title'] || 'No description available',
version: config.version || '1.0.0',
author: config.author || 'BMad Team',
packPath: packPath,
dependencies: config.dependencies?.agents || []
dependencies: config.dependencies?.agents || [],
});
} catch (error) {
// Fallback if config.yaml doesn't exist or can't be read
console.warn(`Failed to read config for expansion pack ${entry.name}: ${error.message}`);
console.warn(
`Failed to read config for expansion pack ${entry.name}: ${error.message}`,
);
// Try to derive info from directory name as fallback
const name = entry.name
.split('-')
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(' ');
expansionPacks.push({
@@ -115,7 +119,7 @@ class ConfigLoader {
version: '1.0.0',
author: 'BMad Team',
packPath: packPath,
dependencies: []
dependencies: [],
});
}
}
@@ -193,7 +197,7 @@ class ConfigLoader {
id: path.basename(entry.name, '.yaml'),
name: teamConfig.bundle.name || entry.name,
description: teamConfig.bundle.description || 'Team configuration',
icon: teamConfig.bundle.icon || '📋'
icon: teamConfig.bundle.icon || '📋',
});
}
} catch (error) {

View File

@@ -1,17 +1,14 @@
const fs = require("fs-extra");
const path = require("path");
const crypto = require("crypto");
const yaml = require("js-yaml");
const chalk = require("chalk").default || require("chalk");
const { createReadStream, createWriteStream, promises: fsPromises } = require('fs');
const { pipeline } = require('stream/promises');
const fs = require('fs-extra');
const path = require('node:path');
const crypto = require('node:crypto');
const yaml = require('js-yaml');
const chalk = require('chalk');
const { createReadStream, createWriteStream, promises: fsPromises } = require('node:fs');
const { pipeline } = require('node:stream/promises');
const resourceLocator = require('./resource-locator');
class FileManager {
constructor() {
this.manifestDir = ".bmad-core";
this.manifestFile = "install-manifest.yaml";
}
constructor() {}
async copyFile(source, destination) {
try {
@@ -19,14 +16,9 @@ class FileManager {
// Use streaming for large files (> 10MB)
const stats = await fs.stat(source);
if (stats.size > 10 * 1024 * 1024) {
await pipeline(
createReadStream(source),
createWriteStream(destination)
);
} else {
await fs.copy(source, destination);
}
await (stats.size > 10 * 1024 * 1024
? pipeline(createReadStream(source), createWriteStream(destination))
: fs.copy(source, destination));
return true;
} catch (error) {
console.error(chalk.red(`Failed to copy ${source}:`), error.message);
@@ -41,28 +33,20 @@ class FileManager {
// Use streaming copy for large directories
const files = await resourceLocator.findFiles('**/*', {
cwd: source,
nodir: true
nodir: true,
});
// Process files in batches to avoid memory issues
const batchSize = 50;
for (let i = 0; i < files.length; i += batchSize) {
const batch = files.slice(i, i + batchSize);
for (let index = 0; index < files.length; index += batchSize) {
const batch = files.slice(index, index + batchSize);
await Promise.all(
batch.map(file =>
this.copyFile(
path.join(source, file),
path.join(destination, file)
)
)
batch.map((file) => this.copyFile(path.join(source, file), path.join(destination, file))),
);
}
return true;
} catch (error) {
console.error(
chalk.red(`Failed to copy directory ${source}:`),
error.message
);
console.error(chalk.red(`Failed to copy directory ${source}:`), error.message);
return false;
}
}
@@ -73,17 +57,16 @@ class FileManager {
for (const file of files) {
const sourcePath = path.join(sourceDir, file);
const destPath = path.join(destDir, file);
const destinationPath = path.join(destDir, file);
// Use root replacement if rootValue is provided and file needs it
const needsRootReplacement = rootValue && (file.endsWith('.md') || file.endsWith('.yaml') || file.endsWith('.yml'));
const needsRootReplacement =
rootValue && (file.endsWith('.md') || file.endsWith('.yaml') || file.endsWith('.yml'));
let success = false;
if (needsRootReplacement) {
success = await this.copyFileWithRootReplacement(sourcePath, destPath, rootValue);
} else {
success = await this.copyFile(sourcePath, destPath);
}
success = await (needsRootReplacement
? this.copyFileWithRootReplacement(sourcePath, destinationPath, rootValue)
: this.copyFile(sourcePath, destinationPath));
if (success) {
copied.push(file);
@@ -97,32 +80,28 @@ class FileManager {
try {
// Use streaming for hash calculation to reduce memory usage
const stream = createReadStream(filePath);
const hash = crypto.createHash("sha256");
const hash = crypto.createHash('sha256');
for await (const chunk of stream) {
hash.update(chunk);
}
return hash.digest("hex").slice(0, 16);
} catch (error) {
return hash.digest('hex').slice(0, 16);
} catch {
return null;
}
}
async createManifest(installDir, config, files) {
const manifestPath = path.join(
installDir,
this.manifestDir,
this.manifestFile
);
const manifestPath = path.join(installDir, this.manifestDir, this.manifestFile);
// Read version from package.json
let coreVersion = "unknown";
let coreVersion = 'unknown';
try {
const packagePath = path.join(__dirname, '..', '..', '..', 'package.json');
const packageJson = require(packagePath);
coreVersion = packageJson.version;
} catch (error) {
} catch {
console.warn("Could not read version from package.json, using 'unknown'");
}
@@ -156,31 +135,23 @@ class FileManager {
}
async readManifest(installDir) {
const manifestPath = path.join(
installDir,
this.manifestDir,
this.manifestFile
);
const manifestPath = path.join(installDir, this.manifestDir, this.manifestFile);
try {
const content = await fs.readFile(manifestPath, "utf8");
const content = await fs.readFile(manifestPath, 'utf8');
return yaml.load(content);
} catch (error) {
} catch {
return null;
}
}
async readExpansionPackManifest(installDir, packId) {
const manifestPath = path.join(
installDir,
`.${packId}`,
this.manifestFile
);
const manifestPath = path.join(installDir, `.${packId}`, this.manifestFile);
try {
const content = await fs.readFile(manifestPath, "utf8");
const content = await fs.readFile(manifestPath, 'utf8');
return yaml.load(content);
} catch (error) {
} catch {
return null;
}
}
@@ -203,7 +174,7 @@ class FileManager {
async checkFileIntegrity(installDir, manifest) {
const result = {
missing: [],
modified: []
modified: [],
};
for (const file of manifest.files) {
@@ -214,13 +185,13 @@ class FileManager {
continue;
}
if (!(await this.pathExists(filePath))) {
result.missing.push(file.path);
} else {
if (await this.pathExists(filePath)) {
const currentHash = await this.calculateFileHash(filePath);
if (currentHash && currentHash !== file.hash) {
result.modified.push(file.path);
}
} else {
result.missing.push(file.path);
}
}
@@ -228,7 +199,7 @@ class FileManager {
}
async backupFile(filePath) {
const backupPath = filePath + ".bak";
const backupPath = filePath + '.bak';
let counter = 1;
let finalBackupPath = backupPath;
@@ -256,7 +227,7 @@ class FileManager {
}
async readFile(filePath) {
return fs.readFile(filePath, "utf8");
return fs.readFile(filePath, 'utf8');
}
async writeFile(filePath, content) {
@@ -269,14 +240,10 @@ class FileManager {
}
async createExpansionPackManifest(installDir, packId, config, files) {
const manifestPath = path.join(
installDir,
`.${packId}`,
this.manifestFile
);
const manifestPath = path.join(installDir, `.${packId}`, this.manifestFile);
const manifest = {
version: config.expansionPackVersion || require("../../../package.json").version,
version: config.expansionPackVersion || require('../../../package.json').version,
installed_at: new Date().toISOString(),
install_type: config.installType,
expansion_pack_id: config.expansionPackId,
@@ -336,26 +303,27 @@ class FileManager {
// Check file size to determine if we should stream
const stats = await fs.stat(source);
if (stats.size > 5 * 1024 * 1024) { // 5MB threshold
if (stats.size > 5 * 1024 * 1024) {
// 5MB threshold
// Use streaming for large files
const { Transform } = require('stream');
const { Transform } = require('node:stream');
const replaceStream = new Transform({
transform(chunk, encoding, callback) {
const modified = chunk.toString().replace(/\{root\}/g, rootValue);
const modified = chunk.toString().replaceAll('{root}', rootValue);
callback(null, modified);
}
},
});
await this.ensureDirectory(path.dirname(destination));
await pipeline(
createReadStream(source, { encoding: 'utf8' }),
replaceStream,
createWriteStream(destination, { encoding: 'utf8' })
createWriteStream(destination, { encoding: 'utf8' }),
);
} else {
// Regular approach for smaller files
const content = await fsPromises.readFile(source, 'utf8');
const updatedContent = content.replace(/\{root\}/g, rootValue);
const updatedContent = content.replaceAll('{root}', rootValue);
await this.ensureDirectory(path.dirname(destination));
await fsPromises.writeFile(destination, updatedContent, 'utf8');
}
@@ -367,32 +335,37 @@ class FileManager {
}
}
async copyDirectoryWithRootReplacement(source, destination, rootValue, fileExtensions = ['.md', '.yaml', '.yml']) {
async copyDirectoryWithRootReplacement(
source,
destination,
rootValue,
fileExtensions = ['.md', '.yaml', '.yml'],
) {
try {
await this.ensureDirectory(destination);
// Get all files in source directory
const files = await resourceLocator.findFiles('**/*', {
cwd: source,
nodir: true
nodir: true,
});
let replacedCount = 0;
for (const file of files) {
const sourcePath = path.join(source, file);
const destPath = path.join(destination, file);
const destinationPath = path.join(destination, file);
// Check if this file type should have {root} replacement
const shouldReplace = fileExtensions.some(ext => file.endsWith(ext));
const shouldReplace = fileExtensions.some((extension) => file.endsWith(extension));
if (shouldReplace) {
if (await this.copyFileWithRootReplacement(sourcePath, destPath, rootValue)) {
if (await this.copyFileWithRootReplacement(sourcePath, destinationPath, rootValue)) {
replacedCount++;
}
} else {
// Regular copy for files that don't need replacement
await this.copyFile(sourcePath, destPath);
await this.copyFile(sourcePath, destinationPath);
}
}
@@ -402,10 +375,15 @@ class FileManager {
return true;
} catch (error) {
console.error(chalk.red(`Failed to copy directory ${source} with root replacement:`), error.message);
console.error(
chalk.red(`Failed to copy directory ${source} with root replacement:`),
error.message,
);
return false;
}
}
manifestDir = '.bmad-core';
manifestFile = 'install-manifest.yaml';
}
module.exports = new FileManager();

View File

@@ -3,13 +3,13 @@
* Reduces duplication and provides shared methods
*/
const path = require("path");
const fs = require("fs-extra");
const yaml = require("js-yaml");
const chalk = require("chalk").default || require("chalk");
const fileManager = require("./file-manager");
const resourceLocator = require("./resource-locator");
const { extractYamlFromAgent } = require("../../lib/yaml-utils");
const path = require('node:path');
const fs = require('fs-extra');
const yaml = require('js-yaml');
const chalk = require('chalk').default || require('chalk');
const fileManager = require('./file-manager');
const resourceLocator = require('./resource-locator');
const { extractYamlFromAgent } = require('../../lib/yaml-utils');
class BaseIdeSetup {
constructor() {
@@ -30,16 +30,16 @@ class BaseIdeSetup {
// Get core agents
const coreAgents = await this.getCoreAgentIds(installDir);
coreAgents.forEach(id => allAgents.add(id));
for (const id of coreAgents) allAgents.add(id);
// Get expansion pack agents
const expansionPacks = await this.getInstalledExpansionPacks(installDir);
for (const pack of expansionPacks) {
const packAgents = await this.getExpansionPackAgents(pack.path);
packAgents.forEach(id => allAgents.add(id));
for (const id of packAgents) allAgents.add(id);
}
const result = Array.from(allAgents);
const result = [...allAgents];
this._agentCache.set(cacheKey, result);
return result;
}
@@ -50,14 +50,14 @@ class BaseIdeSetup {
async getCoreAgentIds(installDir) {
const coreAgents = [];
const corePaths = [
path.join(installDir, ".bmad-core", "agents"),
path.join(installDir, "bmad-core", "agents")
path.join(installDir, '.bmad-core', 'agents'),
path.join(installDir, 'bmad-core', 'agents'),
];
for (const agentsDir of corePaths) {
if (await fileManager.pathExists(agentsDir)) {
const files = await resourceLocator.findFiles("*.md", { cwd: agentsDir });
coreAgents.push(...files.map(file => path.basename(file, ".md")));
const files = await resourceLocator.findFiles('*.md', { cwd: agentsDir });
coreAgents.push(...files.map((file) => path.basename(file, '.md')));
break; // Use first found
}
}
@@ -80,9 +80,9 @@ class BaseIdeSetup {
if (!agentPath) {
// Check installation-specific paths
const possiblePaths = [
path.join(installDir, ".bmad-core", "agents", `${agentId}.md`),
path.join(installDir, "bmad-core", "agents", `${agentId}.md`),
path.join(installDir, "common", "agents", `${agentId}.md`)
path.join(installDir, '.bmad-core', 'agents', `${agentId}.md`),
path.join(installDir, 'bmad-core', 'agents', `${agentId}.md`),
path.join(installDir, 'common', 'agents', `${agentId}.md`),
];
for (const testPath of possiblePaths) {
@@ -113,7 +113,7 @@ class BaseIdeSetup {
const metadata = yaml.load(yamlContent);
return metadata.agent_name || agentId;
}
} catch (error) {
} catch {
// Fallback to agent ID
}
return agentId;
@@ -131,29 +131,29 @@ class BaseIdeSetup {
const expansionPacks = [];
// Check for dot-prefixed expansion packs
const dotExpansions = await resourceLocator.findFiles(".bmad-*", { cwd: installDir });
const dotExpansions = await resourceLocator.findFiles('.bmad-*', { cwd: installDir });
for (const dotExpansion of dotExpansions) {
if (dotExpansion !== ".bmad-core") {
if (dotExpansion !== '.bmad-core') {
const packPath = path.join(installDir, dotExpansion);
const packName = dotExpansion.substring(1); // remove the dot
const packName = dotExpansion.slice(1); // remove the dot
expansionPacks.push({
name: packName,
path: packPath
path: packPath,
});
}
}
// Check other dot folders that have config.yaml
const allDotFolders = await resourceLocator.findFiles(".*", { cwd: installDir });
const allDotFolders = await resourceLocator.findFiles('.*', { cwd: installDir });
for (const folder of allDotFolders) {
if (!folder.startsWith(".bmad-") && folder !== ".bmad-core") {
if (!folder.startsWith('.bmad-') && folder !== '.bmad-core') {
const packPath = path.join(installDir, folder);
const configPath = path.join(packPath, "config.yaml");
const configPath = path.join(packPath, 'config.yaml');
if (await fileManager.pathExists(configPath)) {
expansionPacks.push({
name: folder.substring(1), // remove the dot
path: packPath
name: folder.slice(1), // remove the dot
path: packPath,
});
}
}
@@ -167,13 +167,13 @@ class BaseIdeSetup {
* Get expansion pack agents
*/
async getExpansionPackAgents(packPath) {
const agentsDir = path.join(packPath, "agents");
const agentsDir = path.join(packPath, 'agents');
if (!(await fileManager.pathExists(agentsDir))) {
return [];
}
const agentFiles = await resourceLocator.findFiles("*.md", { cwd: agentsDir });
return agentFiles.map(file => path.basename(file, ".md"));
const agentFiles = await resourceLocator.findFiles('*.md', { cwd: agentsDir });
return agentFiles.map((file) => path.basename(file, '.md'));
}
/**
@@ -184,26 +184,27 @@ class BaseIdeSetup {
const agentTitle = await this.getAgentTitle(agentId, installDir);
const yamlContent = extractYamlFromAgent(agentContent);
let content = "";
let content = '';
if (format === 'mdc') {
// MDC format for Cursor
content = "---\n";
content += "description: \n";
content += "globs: []\n";
content += "alwaysApply: false\n";
content += "---\n\n";
content = '---\n';
content += 'description: \n';
content += 'globs: []\n';
content += 'alwaysApply: false\n';
content += '---\n\n';
content += `# ${agentId.toUpperCase()} Agent Rule\n\n`;
content += `This rule is triggered when the user types \`@${agentId}\` and activates the ${agentTitle} agent persona.\n\n`;
content += "## Agent Activation\n\n";
content += "CRITICAL: Read the full YAML, start activation to alter your state of being, follow startup section instructions, stay in this being until told to exit this mode:\n\n";
content += "```yaml\n";
content += yamlContent || agentContent.replace(/^#.*$/m, "").trim();
content += "\n```\n\n";
content += "## File Reference\n\n";
const relativePath = path.relative(installDir, agentPath).replace(/\\/g, '/');
content += '## Agent Activation\n\n';
content +=
'CRITICAL: Read the full YAML, start activation to alter your state of being, follow startup section instructions, stay in this being until told to exit this mode:\n\n';
content += '```yaml\n';
content += yamlContent || agentContent.replace(/^#.*$/m, '').trim();
content += '\n```\n\n';
content += '## File Reference\n\n';
const relativePath = path.relative(installDir, agentPath).replaceAll('\\', '/');
content += `The complete agent definition is available in [${relativePath}](mdc:${relativePath}).\n\n`;
content += "## Usage\n\n";
content += '## Usage\n\n';
content += `When the user types \`@${agentId}\`, activate this ${agentTitle} persona and follow all instructions defined in the YAML configuration above.\n`;
} else if (format === 'claude') {
// Claude Code format

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@
* Helps identify memory leaks and optimize resource usage
*/
const v8 = require('v8');
const v8 = require('node:v8');
class MemoryProfiler {
constructor() {
@@ -28,18 +28,18 @@ class MemoryProfiler {
heapTotal: this.formatBytes(memUsage.heapTotal),
heapUsed: this.formatBytes(memUsage.heapUsed),
external: this.formatBytes(memUsage.external),
arrayBuffers: this.formatBytes(memUsage.arrayBuffers || 0)
arrayBuffers: this.formatBytes(memUsage.arrayBuffers || 0),
},
heap: {
totalHeapSize: this.formatBytes(heapStats.total_heap_size),
usedHeapSize: this.formatBytes(heapStats.used_heap_size),
heapSizeLimit: this.formatBytes(heapStats.heap_size_limit),
mallocedMemory: this.formatBytes(heapStats.malloced_memory),
externalMemory: this.formatBytes(heapStats.external_memory)
externalMemory: this.formatBytes(heapStats.external_memory),
},
raw: {
heapUsed: memUsage.heapUsed
}
heapUsed: memUsage.heapUsed,
},
};
// Track peak memory
@@ -55,8 +55,8 @@ class MemoryProfiler {
* Force garbage collection (requires --expose-gc flag)
*/
forceGC() {
if (global.gc) {
global.gc();
if (globalThis.gc) {
globalThis.gc();
return true;
}
return false;
@@ -72,11 +72,11 @@ class MemoryProfiler {
currentUsage: {
rss: this.formatBytes(currentMemory.rss),
heapTotal: this.formatBytes(currentMemory.heapTotal),
heapUsed: this.formatBytes(currentMemory.heapUsed)
heapUsed: this.formatBytes(currentMemory.heapUsed),
},
peakMemory: this.formatBytes(this.peakMemory),
totalCheckpoints: this.checkpoints.length,
runTime: `${((Date.now() - this.startTime) / 1000).toFixed(2)}s`
runTime: `${((Date.now() - this.startTime) / 1000).toFixed(2)}s`,
};
}
@@ -91,7 +91,7 @@ class MemoryProfiler {
summary,
memoryGrowth,
checkpoints: this.checkpoints,
recommendations: this.getRecommendations(memoryGrowth)
recommendations: this.getRecommendations(memoryGrowth),
};
}
@@ -102,18 +102,18 @@ class MemoryProfiler {
if (this.checkpoints.length < 2) return [];
const growth = [];
for (let i = 1; i < this.checkpoints.length; i++) {
const prev = this.checkpoints[i - 1];
const curr = this.checkpoints[i];
for (let index = 1; index < this.checkpoints.length; index++) {
const previous = this.checkpoints[index - 1];
const current = this.checkpoints[index];
const heapDiff = curr.raw.heapUsed - prev.raw.heapUsed;
const heapDiff = current.raw.heapUsed - previous.raw.heapUsed;
growth.push({
from: prev.label,
to: curr.label,
from: previous.label,
to: current.label,
heapGrowth: this.formatBytes(Math.abs(heapDiff)),
isIncrease: heapDiff > 0,
timeDiff: `${((curr.timestamp - prev.timestamp) / 1000).toFixed(2)}s`
timeDiff: `${((current.timestamp - previous.timestamp) / 1000).toFixed(2)}s`,
});
}
@@ -127,7 +127,7 @@ class MemoryProfiler {
const recommendations = [];
// Check for large memory growth
const largeGrowths = memoryGrowth.filter(g => {
const largeGrowths = memoryGrowth.filter((g) => {
const bytes = this.parseBytes(g.heapGrowth);
return bytes > 50 * 1024 * 1024; // 50MB
});
@@ -136,16 +136,17 @@ class MemoryProfiler {
recommendations.push({
type: 'warning',
message: `Large memory growth detected in ${largeGrowths.length} operations`,
details: largeGrowths.map(g => `${g.from}${g.to}: ${g.heapGrowth}`)
details: largeGrowths.map((g) => `${g.from}${g.to}: ${g.heapGrowth}`),
});
}
// Check peak memory
if (this.peakMemory > 500 * 1024 * 1024) { // 500MB
if (this.peakMemory > 500 * 1024 * 1024) {
// 500MB
recommendations.push({
type: 'warning',
message: `High peak memory usage: ${this.formatBytes(this.peakMemory)}`,
suggestion: 'Consider processing files in smaller batches'
suggestion: 'Consider processing files in smaller batches',
});
}
@@ -155,7 +156,7 @@ class MemoryProfiler {
recommendations.push({
type: 'error',
message: 'Potential memory leak detected',
details: 'Memory usage continuously increases without significant decreases'
details: 'Memory usage continuously increases without significant decreases',
});
}
@@ -169,8 +170,8 @@ class MemoryProfiler {
if (this.checkpoints.length < 5) return false;
let increasingCount = 0;
for (let i = 1; i < this.checkpoints.length; i++) {
if (this.checkpoints[i].raw.heapUsed > this.checkpoints[i - 1].raw.heapUsed) {
for (let index = 1; index < this.checkpoints.length; index++) {
if (this.checkpoints[index].raw.heapUsed > this.checkpoints[index - 1].raw.heapUsed) {
increasingCount++;
}
}
@@ -187,26 +188,26 @@ class MemoryProfiler {
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
const index = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
return Number.parseFloat((bytes / Math.pow(k, index)).toFixed(2)) + ' ' + sizes[index];
}
/**
* Parse human-readable bytes back to number
*/
parseBytes(str) {
const match = str.match(/^([\d.]+)\s*([KMGT]?B?)$/i);
parseBytes(string_) {
const match = string_.match(/^([\d.]+)\s*([KMGT]?B?)$/i);
if (!match) return 0;
const value = parseFloat(match[1]);
const value = Number.parseFloat(match[1]);
const unit = match[2].toUpperCase();
const multipliers = {
'B': 1,
'KB': 1024,
'MB': 1024 * 1024,
'GB': 1024 * 1024 * 1024
B: 1,
KB: 1024,
MB: 1024 * 1024,
GB: 1024 * 1024 * 1024,
};
return value * (multipliers[unit] || 1);

View File

@@ -17,13 +17,13 @@ class ModuleManager {
const modules = await Promise.all([
this.getModule('chalk'),
this.getModule('ora'),
this.getModule('inquirer')
this.getModule('inquirer'),
]);
return {
chalk: modules[0],
ora: modules[1],
inquirer: modules[2]
inquirer: modules[2],
};
}
@@ -64,20 +64,26 @@ class ModuleManager {
*/
async _loadModule(moduleName) {
switch (moduleName) {
case 'chalk':
case 'chalk': {
return (await import('chalk')).default;
case 'ora':
}
case 'ora': {
return (await import('ora')).default;
case 'inquirer':
}
case 'inquirer': {
return (await import('inquirer')).default;
case 'glob':
}
case 'glob': {
return (await import('glob')).glob;
case 'globSync':
}
case 'globSync': {
return (await import('glob')).globSync;
default:
}
default: {
throw new Error(`Unknown module: ${moduleName}`);
}
}
}
/**
* Clear the module cache to free memory
@@ -93,13 +99,11 @@ class ModuleManager {
* @returns {Promise<Object>} Object with module names as keys
*/
async getModules(moduleNames) {
const modules = await Promise.all(
moduleNames.map(name => this.getModule(name))
);
const modules = await Promise.all(moduleNames.map((name) => this.getModule(name)));
return moduleNames.reduce((acc, name, index) => {
acc[name] = modules[index];
return acc;
return moduleNames.reduce((accumulator, name, index) => {
accumulator[name] = modules[index];
return accumulator;
}, {});
}
}

View File

@@ -107,14 +107,11 @@ class ResourceLocator {
// Get agents from bmad-core
const coreAgents = await this.findFiles('agents/*.md', {
cwd: this.getBmadCorePath()
cwd: this.getBmadCorePath(),
});
for (const agentFile of coreAgents) {
const content = await fs.readFile(
path.join(this.getBmadCorePath(), agentFile),
'utf8'
);
const content = await fs.readFile(path.join(this.getBmadCorePath(), agentFile), 'utf8');
const yamlContent = extractYamlFromAgent(content);
if (yamlContent) {
try {
@@ -123,9 +120,9 @@ class ResourceLocator {
id: path.basename(agentFile, '.md'),
name: metadata.agent_name || path.basename(agentFile, '.md'),
description: metadata.description || 'No description available',
source: 'core'
source: 'core',
});
} catch (e) {
} catch {
// Skip invalid agents
}
}
@@ -167,11 +164,12 @@ class ResourceLocator {
name: config.name || entry.name,
version: config.version || '1.0.0',
description: config.description || 'No description available',
shortTitle: config['short-title'] || config.description || 'No description available',
shortTitle:
config['short-title'] || config.description || 'No description available',
author: config.author || 'Unknown',
path: path.join(expansionPacksPath, entry.name)
path: path.join(expansionPacksPath, entry.name),
});
} catch (e) {
} catch {
// Skip invalid packs
}
}
@@ -207,7 +205,7 @@ class ResourceLocator {
const config = yaml.load(content);
this._pathCache.set(cacheKey, config);
return config;
} catch (e) {
} catch {
return null;
}
}
@@ -261,7 +259,7 @@ class ResourceLocator {
const result = { all: allDeps, byType };
this._pathCache.set(cacheKey, result);
return result;
} catch (e) {
} catch {
return { all: [], byType: {} };
}
}
@@ -295,7 +293,7 @@ class ResourceLocator {
const config = yaml.load(content);
this._pathCache.set(cacheKey, config);
return config;
} catch (e) {
} catch {
return null;
}
}

View File

@@ -2,14 +2,6 @@
"name": "bmad-method",
"version": "5.0.0",
"description": "BMad Method installer - AI-powered Agile development framework",
"main": "lib/installer.js",
"bin": {
"bmad": "./bin/bmad.js",
"bmad-method": "./bin/bmad.js"
},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [
"bmad",
"agile",
@@ -19,8 +11,24 @@
"installer",
"agents"
],
"author": "BMad Team",
"homepage": "https://github.com/bmad-team/bmad-method#readme",
"bugs": {
"url": "https://github.com/bmad-team/bmad-method/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/bmad-team/bmad-method.git"
},
"license": "MIT",
"author": "BMad Team",
"main": "lib/installer.js",
"bin": {
"bmad": "./bin/bmad.js",
"bmad-method": "./bin/bmad.js"
},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"dependencies": {
"chalk": "^4.1.2",
"commander": "^14.0.0",
@@ -32,13 +40,5 @@
},
"engines": {
"node": ">=20.0.0"
},
"repository": {
"type": "git",
"url": "https://github.com/bmad-team/bmad-method.git"
},
"bugs": {
"url": "https://github.com/bmad-team/bmad-method/issues"
},
"homepage": "https://github.com/bmad-team/bmad-method#readme"
}
}

View File

@@ -1,5 +1,5 @@
const fs = require('fs').promises;
const path = require('path');
const fs = require('node:fs').promises;
const path = require('node:path');
const yaml = require('js-yaml');
const { extractYamlFromAgent } = require('./yaml-utils');
@@ -28,9 +28,9 @@ class DependencyResolver {
id: agentId,
path: agentPath,
content: agentContent,
config: agentConfig
config: agentConfig,
},
resources: []
resources: [],
};
// Personas are now embedded in agent configs, no need to resolve separately
@@ -58,18 +58,18 @@ class DependencyResolver {
id: teamId,
path: teamPath,
content: teamContent,
config: teamConfig
config: teamConfig,
},
agents: [],
resources: new Map() // Use Map to deduplicate resources
resources: new Map(), // Use Map to deduplicate resources
};
// Always add bmad-orchestrator agent first if it's a team
const bmadAgent = await this.resolveAgentDependencies('bmad-orchestrator');
dependencies.agents.push(bmadAgent.agent);
bmadAgent.resources.forEach(res => {
for (const res of bmadAgent.resources) {
dependencies.resources.set(res.path, res);
});
}
// Resolve all agents in the team
let agentsToResolve = teamConfig.agents || [];
@@ -78,7 +78,7 @@ class DependencyResolver {
if (agentsToResolve.includes('*')) {
const allAgents = await this.listAgents();
// Remove wildcard and add all agents except those already in the list and bmad-master
agentsToResolve = agentsToResolve.filter(a => a !== '*');
agentsToResolve = agentsToResolve.filter((a) => a !== '*');
for (const agent of allAgents) {
if (!agentsToResolve.includes(agent) && agent !== 'bmad-master') {
agentsToResolve.push(agent);
@@ -92,9 +92,9 @@ class DependencyResolver {
dependencies.agents.push(agentDeps.agent);
// Add resources with deduplication
agentDeps.resources.forEach(res => {
for (const res of agentDeps.resources) {
dependencies.resources.set(res.path, res);
});
}
}
// Resolve workflows
@@ -104,7 +104,7 @@ class DependencyResolver {
}
// Convert Map back to array
dependencies.resources = Array.from(dependencies.resources.values());
dependencies.resources = [...dependencies.resources.values()];
return dependencies;
}
@@ -123,12 +123,12 @@ class DependencyResolver {
try {
filePath = path.join(this.bmadCore, type, id);
content = await fs.readFile(filePath, 'utf8');
} catch (e) {
} catch {
// If not found in bmad-core, try common folder
try {
filePath = path.join(this.common, type, id);
content = await fs.readFile(filePath, 'utf8');
} catch (e2) {
} catch {
// File not found in either location
}
}
@@ -142,7 +142,7 @@ class DependencyResolver {
type,
id,
path: filePath,
content
content,
};
this.cache.set(cacheKey, resource);
@@ -156,10 +156,8 @@ class DependencyResolver {
async listAgents() {
try {
const files = await fs.readdir(path.join(this.bmadCore, 'agents'));
return files
.filter(f => f.endsWith('.md'))
.map(f => f.replace('.md', ''));
} catch (error) {
return files.filter((f) => f.endsWith('.md')).map((f) => f.replace('.md', ''));
} catch {
return [];
}
}
@@ -167,10 +165,8 @@ class DependencyResolver {
async listTeams() {
try {
const files = await fs.readdir(path.join(this.bmadCore, 'agent-teams'));
return files
.filter(f => f.endsWith('.yaml'))
.map(f => f.replace('.yaml', ''));
} catch (error) {
return files.filter((f) => f.endsWith('.yaml')).map((f) => f.replace('.yaml', ''));
} catch {
return [];
}
}

View File

@@ -10,7 +10,7 @@
*/
function extractYamlFromAgent(agentContent, cleanCommands = false) {
// Remove carriage returns and match YAML block
const yamlMatch = agentContent.replace(/\r/g, "").match(/```ya?ml\n([\s\S]*?)\n```/);
const yamlMatch = agentContent.replaceAll('\r', '').match(/```ya?ml\n([\s\S]*?)\n```/);
if (!yamlMatch) return null;
let yamlContent = yamlMatch[1].trim();
@@ -18,12 +18,12 @@ function extractYamlFromAgent(agentContent, cleanCommands = false) {
// Clean up command descriptions if requested
// Converts "- command - description" to just "- command"
if (cleanCommands) {
yamlContent = yamlContent.replace(/^(\s*-)(\s*"[^"]+")(\s*-\s*.*)$/gm, '$1$2');
yamlContent = yamlContent.replaceAll(/^(\s*-)(\s*"[^"]+")(\s*-\s*.*)$/gm, '$1$2');
}
return yamlContent;
}
module.exports = {
extractYamlFromAgent
extractYamlFromAgent,
};

View File

@@ -2,8 +2,8 @@
* Semantic-release plugin to sync installer package.json version
*/
const fs = require('fs');
const path = require('path');
const fs = require('node:fs');
const path = require('node:path');
// This function runs during the "prepare" step of semantic-release
function prepare(_, { nextRelease, logger }) {
@@ -14,13 +14,13 @@ function prepare(_, { nextRelease, logger }) {
if (!fs.existsSync(file)) return logger.log('Installer package.json not found, skipping');
// Read and parse the package.json file
const pkg = JSON.parse(fs.readFileSync(file, 'utf8'));
const package_ = JSON.parse(fs.readFileSync(file, 'utf8'));
// Update the version field with the next release version
pkg.version = nextRelease.version;
package_.version = nextRelease.version;
// Write the updated JSON back to the file
fs.writeFileSync(file, JSON.stringify(pkg, null, 2) + '\n');
fs.writeFileSync(file, JSON.stringify(package_, null, 2) + '\n');
// Log success message
logger.log(`Synced installer package.json to version ${nextRelease.version}`);

View File

@@ -1,8 +1,8 @@
// ASCII banner art definitions extracted from banners.js to separate art from logic
const BMAD_TITLE = "BMAD-METHOD";
const FLATTENER_TITLE = "FLATTENER";
const INSTALLER_TITLE = "INSTALLER";
const BMAD_TITLE = 'BMAD-METHOD';
const FLATTENER_TITLE = 'FLATTENER';
const INSTALLER_TITLE = 'INSTALLER';
// Large ASCII blocks (block-style fonts)
const BMAD_LARGE = `

View File

@@ -1,12 +1,10 @@
#!/usr/bin/env node
/**
* Sync installer package.json version with main package.json
* Used by semantic-release to keep versions in sync
*/
const fs = require('fs');
const path = require('path');
const fs = require('node:fs');
const path = require('node:path');
function syncInstallerVersion() {
// Read main package.json

View File

@@ -1,18 +1,16 @@
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
const fs = require('node:fs');
const path = require('node:path');
const yaml = require('js-yaml');
const args = process.argv.slice(2);
const arguments_ = process.argv.slice(2);
if (args.length < 2) {
if (arguments_.length < 2) {
console.log('Usage: node update-expansion-version.js <expansion-pack-id> <new-version>');
console.log('Example: node update-expansion-version.js bmad-creator-tools 1.1.0');
process.exit(1);
}
const [packId, newVersion] = args;
const [packId, newVersion] = arguments_;
// Validate version format
if (!/^\d+\.\d+\.\d+$/.test(newVersion)) {
@@ -43,8 +41,9 @@ async function updateVersion() {
console.log(`\n✓ Successfully updated ${packId} to version ${newVersion}`);
console.log('\nNext steps:');
console.log('1. Test the changes');
console.log('2. Commit: git add -A && git commit -m "chore: bump ' + packId + ' to v' + newVersion + '"');
console.log(
'2. Commit: git add -A && git commit -m "chore: bump ' + packId + ' to v' + newVersion + '"',
);
} catch (error) {
console.error('Error updating version:', error.message);
process.exit(1);

View File

@@ -1,15 +1,15 @@
const fs = require("fs").promises;
const path = require("path");
const { glob } = require("glob");
const fs = require('node:fs').promises;
const path = require('node:path');
const { glob } = require('glob');
// Dynamic imports for ES modules
let chalk, ora, inquirer;
// Initialize ES modules
async function initializeModules() {
chalk = (await import("chalk")).default;
ora = (await import("ora")).default;
inquirer = (await import("inquirer")).default;
chalk = (await import('chalk')).default;
ora = (await import('ora')).default;
inquirer = (await import('inquirer')).default;
}
class V3ToV4Upgrader {
@@ -25,23 +25,15 @@ class V3ToV4Upgrader {
process.stdin.resume();
// 1. Welcome message
console.log(
chalk.bold("\nWelcome to BMad-Method V3 to V4 Upgrade Tool\n")
);
console.log(
"This tool will help you upgrade your BMad-Method V3 project to V4.\n"
);
console.log(chalk.cyan("What this tool does:"));
console.log("- Creates a backup of your V3 files (.bmad-v3-backup/)");
console.log("- Installs the new V4 .bmad-core structure");
console.log(
"- Preserves your PRD, Architecture, and Stories in the new format\n"
);
console.log(chalk.yellow("What this tool does NOT do:"));
console.log(
"- Modify your document content (use doc-migration-task after upgrade)"
);
console.log("- Touch any files outside bmad-agent/ and docs/\n");
console.log(chalk.bold('\nWelcome to BMad-Method V3 to V4 Upgrade Tool\n'));
console.log('This tool will help you upgrade your BMad-Method V3 project to V4.\n');
console.log(chalk.cyan('What this tool does:'));
console.log('- Creates a backup of your V3 files (.bmad-v3-backup/)');
console.log('- Installs the new V4 .bmad-core structure');
console.log('- Preserves your PRD, Architecture, and Stories in the new format\n');
console.log(chalk.yellow('What this tool does NOT do:'));
console.log('- Modify your document content (use doc-migration-task after upgrade)');
console.log('- Touch any files outside bmad-agent/ and docs/\n');
// 2. Get project path
const projectPath = await this.getProjectPath(options.projectPath);
@@ -49,15 +41,11 @@ class V3ToV4Upgrader {
// 3. Validate V3 structure
const validation = await this.validateV3Project(projectPath);
if (!validation.isValid) {
console.error(
chalk.red("\nError: This doesn't appear to be a V3 project.")
);
console.error("Expected to find:");
console.error("- bmad-agent/ directory");
console.error("- docs/ directory\n");
console.error(
"Please check you're in the correct directory and try again."
);
console.error(chalk.red("\nError: This doesn't appear to be a V3 project."));
console.error('Expected to find:');
console.error('- bmad-agent/ directory');
console.error('- docs/ directory\n');
console.error("Please check you're in the correct directory and try again.");
return;
}
@@ -68,15 +56,15 @@ class V3ToV4Upgrader {
if (!options.dryRun) {
const { confirm } = await inquirer.prompt([
{
type: "confirm",
name: "confirm",
message: "Continue with upgrade?",
type: 'confirm',
name: 'confirm',
message: 'Continue with upgrade?',
default: true,
},
]);
if (!confirm) {
console.log("Upgrade cancelled.");
console.log('Upgrade cancelled.');
return;
}
}
@@ -106,7 +94,7 @@ class V3ToV4Upgrader {
process.exit(0);
} catch (error) {
console.error(chalk.red("\nUpgrade error:"), error.message);
console.error(chalk.red('\nUpgrade error:'), error.message);
process.exit(1);
}
}
@@ -118,9 +106,9 @@ class V3ToV4Upgrader {
const { projectPath } = await inquirer.prompt([
{
type: "input",
name: "projectPath",
message: "Please enter the path to your V3 project:",
type: 'input',
name: 'projectPath',
message: 'Please enter the path to your V3 project:',
default: process.cwd(),
},
]);
@@ -129,45 +117,45 @@ class V3ToV4Upgrader {
}
async validateV3Project(projectPath) {
const spinner = ora("Validating project structure...").start();
const spinner = ora('Validating project structure...').start();
try {
const bmadAgentPath = path.join(projectPath, "bmad-agent");
const docsPath = path.join(projectPath, "docs");
const bmadAgentPath = path.join(projectPath, 'bmad-agent');
const docsPath = path.join(projectPath, 'docs');
const hasBmadAgent = await this.pathExists(bmadAgentPath);
const hasDocs = await this.pathExists(docsPath);
if (hasBmadAgent) {
spinner.text = "✓ Found bmad-agent/ directory";
console.log(chalk.green("\n✓ Found bmad-agent/ directory"));
spinner.text = '✓ Found bmad-agent/ directory';
console.log(chalk.green('\n✓ Found bmad-agent/ directory'));
}
if (hasDocs) {
console.log(chalk.green("✓ Found docs/ directory"));
console.log(chalk.green('✓ Found docs/ directory'));
}
const isValid = hasBmadAgent && hasDocs;
if (isValid) {
spinner.succeed("This appears to be a valid V3 project");
spinner.succeed('This appears to be a valid V3 project');
} else {
spinner.fail("Invalid V3 project structure");
spinner.fail('Invalid V3 project structure');
}
return { isValid, hasBmadAgent, hasDocs };
} catch (error) {
spinner.fail("Validation failed");
spinner.fail('Validation failed');
throw error;
}
}
async analyzeProject(projectPath) {
const docsPath = path.join(projectPath, "docs");
const bmadAgentPath = path.join(projectPath, "bmad-agent");
const docsPath = path.join(projectPath, 'docs');
const bmadAgentPath = path.join(projectPath, 'bmad-agent');
// Find PRD
const prdCandidates = ["prd.md", "PRD.md", "product-requirements.md"];
const prdCandidates = ['prd.md', 'PRD.md', 'product-requirements.md'];
let prdFile = null;
for (const candidate of prdCandidates) {
const candidatePath = path.join(docsPath, candidate);
@@ -178,11 +166,7 @@ class V3ToV4Upgrader {
}
// Find Architecture
const archCandidates = [
"architecture.md",
"Architecture.md",
"technical-architecture.md",
];
const archCandidates = ['architecture.md', 'Architecture.md', 'technical-architecture.md'];
let archFile = null;
for (const candidate of archCandidates) {
const candidatePath = path.join(docsPath, candidate);
@@ -194,9 +178,9 @@ class V3ToV4Upgrader {
// Find Front-end Architecture (V3 specific)
const frontEndCandidates = [
"front-end-architecture.md",
"frontend-architecture.md",
"ui-architecture.md",
'front-end-architecture.md',
'frontend-architecture.md',
'ui-architecture.md',
];
let frontEndArchFile = null;
for (const candidate of frontEndCandidates) {
@@ -209,10 +193,10 @@ class V3ToV4Upgrader {
// Find UX/UI spec
const uxSpecCandidates = [
"ux-ui-spec.md",
"ux-ui-specification.md",
"ui-spec.md",
"ux-spec.md",
'ux-ui-spec.md',
'ux-ui-specification.md',
'ui-spec.md',
'ux-spec.md',
];
let uxSpecFile = null;
for (const candidate of uxSpecCandidates) {
@@ -224,12 +208,7 @@ class V3ToV4Upgrader {
}
// Find v0 prompt or UX prompt
const uxPromptCandidates = [
"v0-prompt.md",
"ux-prompt.md",
"ui-prompt.md",
"design-prompt.md",
];
const uxPromptCandidates = ['v0-prompt.md', 'ux-prompt.md', 'ui-prompt.md', 'design-prompt.md'];
let uxPromptFile = null;
for (const candidate of uxPromptCandidates) {
const candidatePath = path.join(docsPath, candidate);
@@ -240,19 +219,19 @@ class V3ToV4Upgrader {
}
// Find epic files
const epicFiles = await glob("epic*.md", { cwd: docsPath });
const epicFiles = await glob('epic*.md', { cwd: docsPath });
// Find story files
const storiesPath = path.join(docsPath, "stories");
const storiesPath = path.join(docsPath, 'stories');
let storyFiles = [];
if (await this.pathExists(storiesPath)) {
storyFiles = await glob("*.md", { cwd: storiesPath });
storyFiles = await glob('*.md', { cwd: storiesPath });
}
// Count custom files in bmad-agent
const bmadAgentFiles = await glob("**/*.md", {
const bmadAgentFiles = await glob('**/*.md', {
cwd: bmadAgentPath,
ignore: ["node_modules/**"],
ignore: ['node_modules/**'],
});
return {
@@ -268,279 +247,233 @@ class V3ToV4Upgrader {
}
async showPreflightCheck(analysis, options) {
console.log(chalk.bold("\nProject Analysis:"));
console.log(chalk.bold('\nProject Analysis:'));
console.log(
`- PRD found: ${
analysis.prdFile
? `docs/${analysis.prdFile}`
: chalk.yellow("Not found")
}`
`- PRD found: ${analysis.prdFile ? `docs/${analysis.prdFile}` : chalk.yellow('Not found')}`,
);
console.log(
`- Architecture found: ${
analysis.archFile
? `docs/${analysis.archFile}`
: chalk.yellow("Not found")
}`
analysis.archFile ? `docs/${analysis.archFile}` : chalk.yellow('Not found')
}`,
);
if (analysis.frontEndArchFile) {
console.log(
`- Front-end Architecture found: docs/${analysis.frontEndArchFile}`
);
console.log(`- Front-end Architecture found: docs/${analysis.frontEndArchFile}`);
}
console.log(
`- UX/UI Spec found: ${
analysis.uxSpecFile
? `docs/${analysis.uxSpecFile}`
: chalk.yellow("Not found")
}`
analysis.uxSpecFile ? `docs/${analysis.uxSpecFile}` : chalk.yellow('Not found')
}`,
);
console.log(
`- UX/Design Prompt found: ${
analysis.uxPromptFile
? `docs/${analysis.uxPromptFile}`
: chalk.yellow("Not found")
}`
);
console.log(
`- Epic files found: ${analysis.epicFiles.length} files (epic*.md)`
);
console.log(
`- Stories found: ${analysis.storyFiles.length} files in docs/stories/`
analysis.uxPromptFile ? `docs/${analysis.uxPromptFile}` : chalk.yellow('Not found')
}`,
);
console.log(`- Epic files found: ${analysis.epicFiles.length} files (epic*.md)`);
console.log(`- Stories found: ${analysis.storyFiles.length} files in docs/stories/`);
console.log(`- Custom files in bmad-agent/: ${analysis.customFileCount}`);
if (!options.dryRun) {
console.log("\nThe following will be backed up to .bmad-v3-backup/:");
console.log("- bmad-agent/ (entire directory)");
console.log("- docs/ (entire directory)");
console.log('\nThe following will be backed up to .bmad-v3-backup/:');
console.log('- bmad-agent/ (entire directory)');
console.log('- docs/ (entire directory)');
if (analysis.epicFiles.length > 0) {
console.log(
chalk.green(
"\nNote: Epic files found! They will be placed in docs/prd/ with an index.md file."
)
'\nNote: Epic files found! They will be placed in docs/prd/ with an index.md file.',
),
);
console.log(
chalk.green(
"Since epic files exist, you won't need to shard the PRD after upgrade."
)
chalk.green("Since epic files exist, you won't need to shard the PRD after upgrade."),
);
}
}
}
async createBackup(projectPath) {
const spinner = ora("Creating backup...").start();
const spinner = ora('Creating backup...').start();
try {
const backupPath = path.join(projectPath, ".bmad-v3-backup");
const backupPath = path.join(projectPath, '.bmad-v3-backup');
// Check if backup already exists
if (await this.pathExists(backupPath)) {
spinner.fail("Backup directory already exists");
console.error(
chalk.red(
"\nError: Backup directory .bmad-v3-backup/ already exists."
)
);
console.error("\nThis might mean an upgrade was already attempted.");
console.error(
"Please remove or rename the existing backup and try again."
);
throw new Error("Backup already exists");
spinner.fail('Backup directory already exists');
console.error(chalk.red('\nError: Backup directory .bmad-v3-backup/ already exists.'));
console.error('\nThis might mean an upgrade was already attempted.');
console.error('Please remove or rename the existing backup and try again.');
throw new Error('Backup already exists');
}
// Create backup directory
await fs.mkdir(backupPath, { recursive: true });
spinner.text = "✓ Created .bmad-v3-backup/";
console.log(chalk.green("\n✓ Created .bmad-v3-backup/"));
spinner.text = '✓ Created .bmad-v3-backup/';
console.log(chalk.green('\n✓ Created .bmad-v3-backup/'));
// Move bmad-agent
const bmadAgentSrc = path.join(projectPath, "bmad-agent");
const bmadAgentDest = path.join(backupPath, "bmad-agent");
await fs.rename(bmadAgentSrc, bmadAgentDest);
console.log(chalk.green("✓ Moved bmad-agent/ to backup"));
const bmadAgentSource = path.join(projectPath, 'bmad-agent');
const bmadAgentDestination = path.join(backupPath, 'bmad-agent');
await fs.rename(bmadAgentSource, bmadAgentDestination);
console.log(chalk.green('✓ Moved bmad-agent/ to backup'));
// Move docs
const docsSrc = path.join(projectPath, "docs");
const docsDest = path.join(backupPath, "docs");
const docsSrc = path.join(projectPath, 'docs');
const docsDest = path.join(backupPath, 'docs');
await fs.rename(docsSrc, docsDest);
console.log(chalk.green("✓ Moved docs/ to backup"));
console.log(chalk.green('✓ Moved docs/ to backup'));
spinner.succeed("Backup created successfully");
spinner.succeed('Backup created successfully');
} catch (error) {
spinner.fail("Backup failed");
spinner.fail('Backup failed');
throw error;
}
}
async installV4Structure(projectPath) {
const spinner = ora("Installing V4 structure...").start();
const spinner = ora('Installing V4 structure...').start();
try {
// Get the source bmad-core directory (without dot prefix)
const sourcePath = path.join(__dirname, "..", "..", "bmad-core");
const destPath = path.join(projectPath, ".bmad-core");
const sourcePath = path.join(__dirname, '..', '..', 'bmad-core');
const destinationPath = path.join(projectPath, '.bmad-core');
// Copy .bmad-core
await this.copyDirectory(sourcePath, destPath);
spinner.text = "✓ Copied fresh .bmad-core/ directory from V4";
console.log(
chalk.green("\n✓ Copied fresh .bmad-core/ directory from V4")
);
await this.copyDirectory(sourcePath, destinationPath);
spinner.text = '✓ Copied fresh .bmad-core/ directory from V4';
console.log(chalk.green('\n✓ Copied fresh .bmad-core/ directory from V4'));
// Create docs directory
const docsPath = path.join(projectPath, "docs");
const docsPath = path.join(projectPath, 'docs');
await fs.mkdir(docsPath, { recursive: true });
console.log(chalk.green("✓ Created new docs/ directory"));
console.log(chalk.green('✓ Created new docs/ directory'));
// Create install manifest for future updates
await this.createInstallManifest(projectPath);
console.log(chalk.green("✓ Created install manifest"));
console.log(chalk.green('✓ Created install manifest'));
console.log(
chalk.yellow(
"\nNote: Your V3 bmad-agent content has been backed up and NOT migrated."
)
chalk.yellow('\nNote: Your V3 bmad-agent content has been backed up and NOT migrated.'),
);
console.log(
chalk.yellow(
"The new V4 agents are completely different and look for different file structures."
)
'The new V4 agents are completely different and look for different file structures.',
),
);
spinner.succeed("V4 structure installed successfully");
spinner.succeed('V4 structure installed successfully');
} catch (error) {
spinner.fail("V4 installation failed");
spinner.fail('V4 installation failed');
throw error;
}
}
async migrateDocuments(projectPath, analysis) {
const spinner = ora("Migrating your project documents...").start();
const spinner = ora('Migrating your project documents...').start();
try {
const backupDocsPath = path.join(projectPath, ".bmad-v3-backup", "docs");
const newDocsPath = path.join(projectPath, "docs");
const backupDocsPath = path.join(projectPath, '.bmad-v3-backup', 'docs');
const newDocsPath = path.join(projectPath, 'docs');
let copiedCount = 0;
// Copy PRD
if (analysis.prdFile) {
const src = path.join(backupDocsPath, analysis.prdFile);
const dest = path.join(newDocsPath, analysis.prdFile);
await fs.copyFile(src, dest);
const source = path.join(backupDocsPath, analysis.prdFile);
const destination = path.join(newDocsPath, analysis.prdFile);
await fs.copyFile(source, destination);
console.log(chalk.green(`\n✓ Copied PRD to docs/${analysis.prdFile}`));
copiedCount++;
}
// Copy Architecture
if (analysis.archFile) {
const src = path.join(backupDocsPath, analysis.archFile);
const dest = path.join(newDocsPath, analysis.archFile);
await fs.copyFile(src, dest);
console.log(
chalk.green(`✓ Copied Architecture to docs/${analysis.archFile}`)
);
const source = path.join(backupDocsPath, analysis.archFile);
const destination = path.join(newDocsPath, analysis.archFile);
await fs.copyFile(source, destination);
console.log(chalk.green(`✓ Copied Architecture to docs/${analysis.archFile}`));
copiedCount++;
}
// Copy Front-end Architecture if exists
if (analysis.frontEndArchFile) {
const src = path.join(backupDocsPath, analysis.frontEndArchFile);
const dest = path.join(newDocsPath, analysis.frontEndArchFile);
await fs.copyFile(src, dest);
const source = path.join(backupDocsPath, analysis.frontEndArchFile);
const destination = path.join(newDocsPath, analysis.frontEndArchFile);
await fs.copyFile(source, destination);
console.log(
chalk.green(
`✓ Copied Front-end Architecture to docs/${analysis.frontEndArchFile}`
)
chalk.green(`✓ Copied Front-end Architecture to docs/${analysis.frontEndArchFile}`),
);
console.log(
chalk.yellow(
"Note: V4 uses a single full-stack-architecture.md - use doc-migration-task to merge"
)
'Note: V4 uses a single full-stack-architecture.md - use doc-migration-task to merge',
),
);
copiedCount++;
}
// Copy UX/UI Spec if exists
if (analysis.uxSpecFile) {
const src = path.join(backupDocsPath, analysis.uxSpecFile);
const dest = path.join(newDocsPath, analysis.uxSpecFile);
await fs.copyFile(src, dest);
console.log(
chalk.green(`✓ Copied UX/UI Spec to docs/${analysis.uxSpecFile}`)
);
const source = path.join(backupDocsPath, analysis.uxSpecFile);
const destination = path.join(newDocsPath, analysis.uxSpecFile);
await fs.copyFile(source, destination);
console.log(chalk.green(`✓ Copied UX/UI Spec to docs/${analysis.uxSpecFile}`));
copiedCount++;
}
// Copy UX/Design Prompt if exists
if (analysis.uxPromptFile) {
const src = path.join(backupDocsPath, analysis.uxPromptFile);
const dest = path.join(newDocsPath, analysis.uxPromptFile);
await fs.copyFile(src, dest);
console.log(
chalk.green(
`✓ Copied UX/Design Prompt to docs/${analysis.uxPromptFile}`
)
);
const source = path.join(backupDocsPath, analysis.uxPromptFile);
const destination = path.join(newDocsPath, analysis.uxPromptFile);
await fs.copyFile(source, destination);
console.log(chalk.green(`✓ Copied UX/Design Prompt to docs/${analysis.uxPromptFile}`));
copiedCount++;
}
// Copy stories
if (analysis.storyFiles.length > 0) {
const storiesDir = path.join(newDocsPath, "stories");
const storiesDir = path.join(newDocsPath, 'stories');
await fs.mkdir(storiesDir, { recursive: true });
for (const storyFile of analysis.storyFiles) {
const src = path.join(backupDocsPath, "stories", storyFile);
const dest = path.join(storiesDir, storyFile);
await fs.copyFile(src, dest);
const source = path.join(backupDocsPath, 'stories', storyFile);
const destination = path.join(storiesDir, storyFile);
await fs.copyFile(source, destination);
}
console.log(
chalk.green(
`✓ Copied ${analysis.storyFiles.length} story files to docs/stories/`
)
chalk.green(`✓ Copied ${analysis.storyFiles.length} story files to docs/stories/`),
);
copiedCount += analysis.storyFiles.length;
}
// Copy epic files to prd subfolder
if (analysis.epicFiles.length > 0) {
const prdDir = path.join(newDocsPath, "prd");
const prdDir = path.join(newDocsPath, 'prd');
await fs.mkdir(prdDir, { recursive: true });
for (const epicFile of analysis.epicFiles) {
const src = path.join(backupDocsPath, epicFile);
const dest = path.join(prdDir, epicFile);
await fs.copyFile(src, dest);
const source = path.join(backupDocsPath, epicFile);
const destination = path.join(prdDir, epicFile);
await fs.copyFile(source, destination);
}
console.log(
chalk.green(
`✓ Found and copied ${analysis.epicFiles.length} epic files to docs/prd/`
)
chalk.green(`✓ Found and copied ${analysis.epicFiles.length} epic files to docs/prd/`),
);
// Create index.md for the prd folder
await this.createPrdIndex(projectPath, analysis);
console.log(chalk.green("✓ Created index.md in docs/prd/"));
console.log(chalk.green('✓ Created index.md in docs/prd/'));
console.log(
chalk.green(
"\nNote: Epic files detected! These are compatible with V4 and have been copied."
)
);
console.log(
chalk.green(
"You won't need to shard the PRD since epics already exist."
)
'\nNote: Epic files detected! These are compatible with V4 and have been copied.',
),
);
console.log(chalk.green("You won't need to shard the PRD since epics already exist."));
copiedCount += analysis.epicFiles.length;
}
spinner.succeed(`Migrated ${copiedCount} documents successfully`);
} catch (error) {
spinner.fail("Document migration failed");
spinner.fail('Document migration failed');
throw error;
}
}
@@ -548,21 +481,21 @@ class V3ToV4Upgrader {
async setupIDE(projectPath, selectedIdes) {
// Use the IDE selections passed from the installer
if (!selectedIdes || selectedIdes.length === 0) {
console.log(chalk.dim("No IDE setup requested - skipping"));
console.log(chalk.dim('No IDE setup requested - skipping'));
return;
}
const ideSetup = require("../installer/lib/ide-setup");
const spinner = ora("Setting up IDE rules for all agents...").start();
const ideSetup = require('../installer/lib/ide-setup');
const spinner = ora('Setting up IDE rules for all agents...').start();
try {
const ideMessages = {
cursor: "Rules created in .cursor/rules/bmad/",
"claude-code": "Commands created in .claude/commands/BMad/",
windsurf: "Rules created in .windsurf/rules/",
trae: "Rules created in.trae/rules/",
roo: "Custom modes created in .roomodes",
cline: "Rules created in .clinerules/",
cursor: 'Rules created in .cursor/rules/bmad/',
'claude-code': 'Commands created in .claude/commands/BMad/',
windsurf: 'Rules created in .windsurf/workflows/',
trae: 'Rules created in.trae/rules/',
roo: 'Custom modes created in .roomodes',
cline: 'Rules created in .clinerules/',
};
// Setup each selected IDE
@@ -573,17 +506,15 @@ class V3ToV4Upgrader {
}
spinner.succeed(`IDE setup complete for ${selectedIdes.length} IDE(s)!`);
} catch (error) {
spinner.fail("IDE setup failed");
console.error(
chalk.yellow("IDE setup failed, but upgrade is complete.")
);
} catch {
spinner.fail('IDE setup failed');
console.error(chalk.yellow('IDE setup failed, but upgrade is complete.'));
}
}
showCompletionReport(projectPath, analysis) {
console.log(chalk.bold.green("\n✓ Upgrade Complete!\n"));
console.log(chalk.bold("Summary:"));
console.log(chalk.bold.green('\n✓ Upgrade Complete!\n'));
console.log(chalk.bold('Summary:'));
console.log(`- V3 files backed up to: .bmad-v3-backup/`);
console.log(`- V4 structure installed: .bmad-core/ (fresh from V4)`);
@@ -596,50 +527,36 @@ class V3ToV4Upgrader {
analysis.storyFiles.length;
console.log(
`- Documents migrated: ${totalDocs} files${
analysis.epicFiles.length > 0
? ` + ${analysis.epicFiles.length} epics`
: ""
}`
analysis.epicFiles.length > 0 ? ` + ${analysis.epicFiles.length} epics` : ''
}`,
);
console.log(chalk.bold("\nImportant Changes:"));
console.log(
"- The V4 agents (sm, dev, etc.) expect different file structures than V3"
);
console.log(
"- Your V3 bmad-agent content was NOT migrated (it's incompatible)"
);
console.log(chalk.bold('\nImportant Changes:'));
console.log('- The V4 agents (sm, dev, etc.) expect different file structures than V3');
console.log("- Your V3 bmad-agent content was NOT migrated (it's incompatible)");
if (analysis.epicFiles.length > 0) {
console.log(
"- Epic files were found and copied - no PRD sharding needed!"
);
console.log('- Epic files were found and copied - no PRD sharding needed!');
}
if (analysis.frontEndArchFile) {
console.log(
"- Front-end architecture found - V4 uses full-stack-architecture.md, migration needed"
'- Front-end architecture found - V4 uses full-stack-architecture.md, migration needed',
);
}
if (analysis.uxSpecFile || analysis.uxPromptFile) {
console.log(
"- UX/UI design files found and copied - ready for use with V4"
);
console.log('- UX/UI design files found and copied - ready for use with V4');
}
console.log(chalk.bold("\nNext Steps:"));
console.log("1. Review your documents in the new docs/ folder");
console.log(chalk.bold('\nNext Steps:'));
console.log('1. Review your documents in the new docs/ folder');
console.log(
"2. Use @bmad-master agent to run the doc-migration-task to align your documents with V4 templates"
'2. Use @bmad-master agent to run the doc-migration-task to align your documents with V4 templates',
);
if (analysis.epicFiles.length === 0) {
console.log(
"3. Use @bmad-master agent to shard the PRD to create epic files"
);
console.log('3. Use @bmad-master agent to shard the PRD to create epic files');
}
console.log(
chalk.dim(
"\nYour V3 backup is preserved in .bmad-v3-backup/ and can be restored if needed."
)
chalk.dim('\nYour V3 backup is preserved in .bmad-v3-backup/ and can be restored if needed.'),
);
}
@@ -652,67 +569,61 @@ class V3ToV4Upgrader {
}
}
async copyDirectory(src, dest) {
await fs.mkdir(dest, { recursive: true });
const entries = await fs.readdir(src, { withFileTypes: true });
async copyDirectory(source, destination) {
await fs.mkdir(destination, { recursive: true });
const entries = await fs.readdir(source, { withFileTypes: true });
for (const entry of entries) {
const srcPath = path.join(src, entry.name);
const destPath = path.join(dest, entry.name);
const sourcePath = path.join(source, entry.name);
const destinationPath = path.join(destination, entry.name);
if (entry.isDirectory()) {
await this.copyDirectory(srcPath, destPath);
} else {
await fs.copyFile(srcPath, destPath);
}
await (entry.isDirectory()
? this.copyDirectory(sourcePath, destinationPath)
: fs.copyFile(sourcePath, destinationPath));
}
}
async createPrdIndex(projectPath, analysis) {
const prdIndexPath = path.join(projectPath, "docs", "prd", "index.md");
const prdPath = path.join(
projectPath,
"docs",
analysis.prdFile || "prd.md"
);
const prdIndexPath = path.join(projectPath, 'docs', 'prd', 'index.md');
const prdPath = path.join(projectPath, 'docs', analysis.prdFile || 'prd.md');
let indexContent = "# Product Requirements Document\n\n";
let indexContent = '# Product Requirements Document\n\n';
// Try to read the PRD to get the title and intro content
if (analysis.prdFile && (await this.pathExists(prdPath))) {
try {
const prdContent = await fs.readFile(prdPath, "utf8");
const lines = prdContent.split("\n");
const prdContent = await fs.readFile(prdPath, 'utf8');
const lines = prdContent.split('\n');
// Find the first heading
const titleMatch = lines.find((line) => line.startsWith("# "));
const titleMatch = lines.find((line) => line.startsWith('# '));
if (titleMatch) {
indexContent = titleMatch + "\n\n";
indexContent = titleMatch + '\n\n';
}
// Get any content before the first ## section
let introContent = "";
let introContent = '';
let foundFirstSection = false;
for (const line of lines) {
if (line.startsWith("## ")) {
if (line.startsWith('## ')) {
foundFirstSection = true;
break;
}
if (!line.startsWith("# ")) {
introContent += line + "\n";
if (!line.startsWith('# ')) {
introContent += line + '\n';
}
}
if (introContent.trim()) {
indexContent += introContent.trim() + "\n\n";
indexContent += introContent.trim() + '\n\n';
}
} catch (error) {
} catch {
// If we can't read the PRD, just use default content
}
}
// Add sections list
indexContent += "## Sections\n\n";
indexContent += '## Sections\n\n';
// Sort epic files for consistent ordering
const sortedEpics = [...analysis.epicFiles].sort();
@@ -720,38 +631,36 @@ class V3ToV4Upgrader {
for (const epicFile of sortedEpics) {
// Extract epic name from filename
const epicName = epicFile
.replace(/\.md$/, "")
.replace(/^epic-?/i, "")
.replace(/-/g, " ")
.replace(/^\d+\s*/, "") // Remove leading numbers
.replace(/\.md$/, '')
.replace(/^epic-?/i, '')
.replaceAll('-', ' ')
.replace(/^\d+\s*/, '') // Remove leading numbers
.trim();
const displayName = epicName.charAt(0).toUpperCase() + epicName.slice(1);
indexContent += `- [${
displayName || epicFile.replace(".md", "")
}](./${epicFile})\n`;
indexContent += `- [${displayName || epicFile.replace('.md', '')}](./${epicFile})\n`;
}
await fs.writeFile(prdIndexPath, indexContent);
}
async createInstallManifest(projectPath) {
const fileManager = require("../installer/lib/file-manager");
const { glob } = require("glob");
const fileManager = require('../installer/lib/file-manager');
const { glob } = require('glob');
// Get all files in .bmad-core for the manifest
const bmadCorePath = path.join(projectPath, ".bmad-core");
const files = await glob("**/*", {
const bmadCorePath = path.join(projectPath, '.bmad-core');
const files = await glob('**/*', {
cwd: bmadCorePath,
nodir: true,
ignore: ["**/.git/**", "**/node_modules/**"],
ignore: ['**/.git/**', '**/node_modules/**'],
});
// Prepend .bmad-core/ to file paths for manifest
const manifestFiles = files.map((file) => path.join(".bmad-core", file));
const manifestFiles = files.map((file) => path.join('.bmad-core', file));
const config = {
installType: "full",
installType: 'full',
agent: null,
ide: null, // Will be set if IDE setup is done later
};

Some files were not shown because too many files have changed in this diff Show More