Compare commits
25 Commits
prettier-e
...
agent-fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0a2dd91e40 | ||
|
|
f5272f12e4 | ||
|
|
26890a0a03 | ||
|
|
cf22fd98f3 | ||
|
|
fe318ecc07 | ||
|
|
f959a07bda | ||
|
|
c0899432c1 | ||
|
|
8573852a6e | ||
|
|
39437e9268 | ||
|
|
1772a30368 | ||
|
|
ba4fb4d084 | ||
|
|
3eb706c49a | ||
|
|
3f5abf347d | ||
|
|
c58a4f3b59 | ||
|
|
ed539432fb | ||
|
|
51284d6ecf | ||
|
|
6cba05114e | ||
|
|
ac360cd0bf | ||
|
|
fab9d5e1f5 | ||
|
|
93426c2d2f | ||
|
|
f56d37a60a | ||
|
|
224cfc05dc | ||
|
|
6cb2fa68b3 | ||
|
|
d21ac491a0 | ||
|
|
848e33fdd9 |
173
.github/workflows/manual-release.yaml
vendored
Normal file
173
.github/workflows/manual-release.yaml
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
name: Manual Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_bump:
|
||||
description: Version bump type
|
||||
required: true
|
||||
default: patch
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: npm
|
||||
registry-url: https://registry.npmjs.org
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run tests and validation
|
||||
run: |
|
||||
npm run validate
|
||||
npm run format:check
|
||||
npm run lint
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Bump version
|
||||
run: npm run version:${{ github.event.inputs.version_bump }}
|
||||
|
||||
- name: Get new version and previous tag
|
||||
id: version
|
||||
run: |
|
||||
echo "new_version=$(node -p "require('./package.json').version")" >> $GITHUB_OUTPUT
|
||||
echo "previous_tag=$(git describe --tags --abbrev=0)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Update installer package.json
|
||||
run: |
|
||||
sed -i 's/"version": ".*"/"version": "${{ steps.version.outputs.new_version }}"/' tools/installer/package.json
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Commit version bump
|
||||
run: |
|
||||
git add .
|
||||
git commit -m "release: bump to v${{ steps.version.outputs.new_version }}"
|
||||
|
||||
- name: Generate release notes
|
||||
id: release_notes
|
||||
run: |
|
||||
# Get commits since last tag
|
||||
COMMITS=$(git log ${{ steps.version.outputs.previous_tag }}..HEAD --pretty=format:"- %s" --reverse)
|
||||
|
||||
# Categorize commits
|
||||
FEATURES=$(echo "$COMMITS" | grep -E "^- (feat|Feature)" || true)
|
||||
FIXES=$(echo "$COMMITS" | grep -E "^- (fix|Fix)" || true)
|
||||
CHORES=$(echo "$COMMITS" | grep -E "^- (chore|Chore)" || true)
|
||||
OTHERS=$(echo "$COMMITS" | grep -v -E "^- (feat|Feature|fix|Fix|chore|Chore|release:|Release:)" || true)
|
||||
|
||||
# Build release notes
|
||||
cat > release_notes.md << 'EOF'
|
||||
## 🚀 What's New in v${{ steps.version.outputs.new_version }}
|
||||
|
||||
EOF
|
||||
|
||||
if [ ! -z "$FEATURES" ]; then
|
||||
echo "### ✨ New Features" >> release_notes.md
|
||||
echo "$FEATURES" >> release_notes.md
|
||||
echo "" >> release_notes.md
|
||||
fi
|
||||
|
||||
if [ ! -z "$FIXES" ]; then
|
||||
echo "### 🐛 Bug Fixes" >> release_notes.md
|
||||
echo "$FIXES" >> release_notes.md
|
||||
echo "" >> release_notes.md
|
||||
fi
|
||||
|
||||
if [ ! -z "$OTHERS" ]; then
|
||||
echo "### 📦 Other Changes" >> release_notes.md
|
||||
echo "$OTHERS" >> release_notes.md
|
||||
echo "" >> release_notes.md
|
||||
fi
|
||||
|
||||
if [ ! -z "$CHORES" ]; then
|
||||
echo "### 🔧 Maintenance" >> release_notes.md
|
||||
echo "$CHORES" >> release_notes.md
|
||||
echo "" >> release_notes.md
|
||||
fi
|
||||
|
||||
cat >> release_notes.md << 'EOF'
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
```bash
|
||||
npx bmad-method install
|
||||
```
|
||||
|
||||
**Full Changelog**: https://github.com/bmadcode/BMAD-METHOD/compare/${{ steps.version.outputs.previous_tag }}...v${{ steps.version.outputs.new_version }}
|
||||
EOF
|
||||
|
||||
# Output for GitHub Actions
|
||||
echo "RELEASE_NOTES<<EOF" >> $GITHUB_OUTPUT
|
||||
cat release_notes.md >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
# Check if tag already exists
|
||||
if git rev-parse "v${{ steps.version.outputs.new_version }}" >/dev/null 2>&1; then
|
||||
echo "Tag v${{ steps.version.outputs.new_version }} already exists, skipping tag creation"
|
||||
else
|
||||
git tag -a "v${{ steps.version.outputs.new_version }}" -m "Release v${{ steps.version.outputs.new_version }}"
|
||||
git push origin "v${{ steps.version.outputs.new_version }}"
|
||||
fi
|
||||
|
||||
- name: Push changes to main
|
||||
run: |
|
||||
if git push origin HEAD:main 2>/dev/null; then
|
||||
echo "✅ Successfully pushed to main branch"
|
||||
else
|
||||
echo "⚠️ Could not push to main (protected branch). This is expected."
|
||||
echo "📝 Version bump and tag were created successfully."
|
||||
fi
|
||||
|
||||
- name: Publish to NPM
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
run: npm publish
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: v${{ steps.version.outputs.new_version }}
|
||||
release_name: "BMad Method v${{ steps.version.outputs.new_version }}"
|
||||
body: ${{ steps.release_notes.outputs.RELEASE_NOTES }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "🎉 Successfully released v${{ steps.version.outputs.new_version }}!"
|
||||
echo "📦 Published to NPM with @latest tag"
|
||||
echo "🏷️ Git tag: v${{ steps.version.outputs.new_version }}"
|
||||
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"
|
||||
echo ""
|
||||
echo "📝 Release notes preview:"
|
||||
cat release_notes.md
|
||||
122
.github/workflows/promote-to-stable.yaml
vendored
122
.github/workflows/promote-to-stable.yaml
vendored
@@ -1,122 +0,0 @@
|
||||
name: Promote to Stable
|
||||
|
||||
"on":
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_bump:
|
||||
description: "Version bump type"
|
||||
required: true
|
||||
default: "minor"
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --global url."https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/".insteadOf "https://github.com/"
|
||||
|
||||
- name: Switch to stable branch
|
||||
run: |
|
||||
git checkout stable
|
||||
git pull origin stable
|
||||
|
||||
- name: Merge main into stable
|
||||
run: |
|
||||
git merge origin/main --no-edit
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Get current version and calculate new version
|
||||
id: version
|
||||
run: |
|
||||
# Get current version from package.json
|
||||
CURRENT_VERSION=$(node -p "require('./package.json').version")
|
||||
echo "current_version=$CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
# Remove beta suffix if present
|
||||
BASE_VERSION=$(echo $CURRENT_VERSION | sed 's/-beta\.[0-9]\+//')
|
||||
echo "base_version=$BASE_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
# Calculate new version based on bump type
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$BASE_VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]}
|
||||
MINOR=${VERSION_PARTS[1]}
|
||||
PATCH=${VERSION_PARTS[2]}
|
||||
|
||||
case "${{ github.event.inputs.version_bump }}" in
|
||||
"major")
|
||||
NEW_VERSION="$((MAJOR + 1)).0.0"
|
||||
;;
|
||||
"minor")
|
||||
NEW_VERSION="$MAJOR.$((MINOR + 1)).0"
|
||||
;;
|
||||
"patch")
|
||||
NEW_VERSION="$MAJOR.$MINOR.$((PATCH + 1))"
|
||||
;;
|
||||
*)
|
||||
NEW_VERSION="$BASE_VERSION"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Promoting from $CURRENT_VERSION to $NEW_VERSION"
|
||||
|
||||
- name: Update package.json versions
|
||||
run: |
|
||||
# Update main package.json
|
||||
npm version ${{ steps.version.outputs.new_version }} --no-git-tag-version
|
||||
|
||||
# Update installer package.json
|
||||
sed -i 's/"version": ".*"/"version": "${{ steps.version.outputs.new_version }}"/' tools/installer/package.json
|
||||
|
||||
- name: Update package-lock.json
|
||||
run: npm install --package-lock-only
|
||||
|
||||
- name: Commit stable release
|
||||
run: |
|
||||
git add .
|
||||
git commit -m "release: promote to stable ${{ steps.version.outputs.new_version }}
|
||||
|
||||
- Promote beta features to stable release
|
||||
- Update version from ${{ steps.version.outputs.current_version }} to ${{ steps.version.outputs.new_version }}
|
||||
- Automated promotion via GitHub Actions"
|
||||
|
||||
- name: Push stable release
|
||||
run: |
|
||||
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
git push origin stable
|
||||
|
||||
- name: Switch back to main
|
||||
run: git checkout main
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "🎉 Successfully promoted to stable!"
|
||||
echo "📦 Version: ${{ steps.version.outputs.new_version }}"
|
||||
echo "🚀 The stable release will be automatically published to NPM via semantic-release"
|
||||
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"
|
||||
74
.github/workflows/release.yaml
vendored
74
.github/workflows/release.yaml
vendored
@@ -1,74 +0,0 @@
|
||||
name: Release
|
||||
"on":
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_type:
|
||||
description: Version bump type
|
||||
required: true
|
||||
default: patch
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
packages: write
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event_name != 'push' || !contains(github.event.head_commit.message, '[skip ci]') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: "npm"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Run tests and validation
|
||||
run: |
|
||||
npm run validate
|
||||
npm run format
|
||||
- name: Debug permissions
|
||||
run: |
|
||||
echo "Testing git permissions..."
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
echo "Git config set successfully"
|
||||
- name: Manual version bump
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: npm run version:${{ github.event.inputs.version_type }}
|
||||
- name: Semantic Release
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
run: npm run release
|
||||
- name: Clean changelog formatting
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
# Remove any Claude Code attribution from changelog
|
||||
sed -i '/🤖 Generated with \[Claude Code\]/,+2d' CHANGELOG.md || true
|
||||
# Format and commit if changes exist
|
||||
npm run format
|
||||
if ! git diff --quiet CHANGELOG.md; then
|
||||
git add CHANGELOG.md
|
||||
git commit -m "chore: clean changelog formatting [skip ci]"
|
||||
git push
|
||||
fi
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -43,4 +43,4 @@ CLAUDE.md
|
||||
test-project-install/*
|
||||
sample-project/*
|
||||
flattened-codebase.xml
|
||||
|
||||
*.stats.md
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"branches": [
|
||||
{
|
||||
"name": "main",
|
||||
"prerelease": "beta",
|
||||
"channel": "beta"
|
||||
},
|
||||
{
|
||||
"name": "stable",
|
||||
"channel": "latest"
|
||||
}
|
||||
],
|
||||
"plugins": [
|
||||
"@semantic-release/commit-analyzer",
|
||||
"@semantic-release/release-notes-generator",
|
||||
[
|
||||
"@semantic-release/changelog",
|
||||
{
|
||||
"changelogFile": "CHANGELOG.md",
|
||||
"changelogTitle": ""
|
||||
}
|
||||
],
|
||||
"@semantic-release/npm",
|
||||
"./tools/semantic-release-sync-installer.js",
|
||||
"@semantic-release/github"
|
||||
]
|
||||
}
|
||||
@@ -75,6 +75,8 @@ This makes it easy to benefit from the latest improvements, bug fixes, and new a
|
||||
|
||||
```bash
|
||||
npx bmad-method install
|
||||
# OR explicitly use stable tag:
|
||||
npx bmad-method@stable install
|
||||
# OR if you already have BMad installed:
|
||||
git pull
|
||||
npm run install:bmad
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,7 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: Mary
|
||||
id: analyst
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,8 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- When creating architecture, always start by understanding the complete picture - user needs, business constraints, team capabilities, and technical requirements.
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: Winston
|
||||
id: architect
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,10 +27,10 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: Do NOT scan filesystem or load any resources during startup, ONLY when commanded
|
||||
- CRITICAL: Do NOT scan filesystem or load any resources during startup, ONLY when commanded (Exception: Read `bmad-core/core-config.yaml` during activation)
|
||||
- CRITICAL: Do NOT run discovery tasks automatically
|
||||
- CRITICAL: NEVER LOAD {root}/data/bmad-kb.md UNLESS USER TYPES *kb
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: BMad Master
|
||||
id: bmad-master
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -28,8 +29,8 @@ activation-instructions:
|
||||
- Assess user goal against available agents and workflows in this bundle
|
||||
- If clear match to an agent's expertise, suggest transformation with *agent command
|
||||
- If project-oriented, suggest *workflow-guidance to explore options
|
||||
- Load resources only when needed - never pre-load
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- Load resources only when needed - never pre-load (Exception: Read `bmad-core/core-config.yaml` during activation)
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: BMad Orchestrator
|
||||
id: bmad-orchestrator
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -29,7 +30,7 @@ activation-instructions:
|
||||
- CRITICAL: Read the following full files as these are your explicit rules for development standards for this project - {root}/core-config.yaml devLoadAlwaysFiles list
|
||||
- CRITICAL: Do NOT load any other files during startup aside from the assigned story and devLoadAlwaysFiles items, unless user requested you do or the following contradicts
|
||||
- CRITICAL: Do NOT begin development until a story is not in draft mode and you are told to proceed
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: James
|
||||
id: dev
|
||||
@@ -65,11 +66,13 @@ commands:
|
||||
- blocking: 'HALT for: Unapproved deps needed, confirm with user | Ambiguous after story check | 3 failures attempting to implement or fix something repeatedly | Missing config | Failing regression'
|
||||
- ready-for-review: 'Code matches requirements + All validations pass + Follows standards + File List complete'
|
||||
- completion: "All Tasks and Subtasks marked [x] and have tests→Validations and full regression passes (DON'T BE LAZY, EXECUTE ALL TESTS and CONFIRM)→Ensure File List is Complete→run the task execute-checklist for the checklist story-dod-checklist→set story status: 'Ready for Review'→HALT"
|
||||
- review-qa: run task `apply-qa-fixes.md'
|
||||
|
||||
dependencies:
|
||||
tasks:
|
||||
- execute-checklist.md
|
||||
- validate-next-story.md
|
||||
- apply-qa-fixes.md
|
||||
checklists:
|
||||
- story-dod-checklist.md
|
||||
```
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,7 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: John
|
||||
id: pm
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,7 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: Sarah
|
||||
id: po
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,7 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: Quinn
|
||||
id: qa
|
||||
@@ -64,9 +65,9 @@ commands:
|
||||
- review {story}: |
|
||||
Adaptive, risk-aware comprehensive review.
|
||||
Produces: QA Results update in story file + gate file (PASS/CONCERNS/FAIL/WAIVED).
|
||||
Gate file location: docs/qa/gates/{epic}.{story}-{slug}.yml
|
||||
Gate file location: qa.qaLocation/gates/{epic}.{story}-{slug}.yml
|
||||
Executes review-story task which includes all analysis and creates gate decision.
|
||||
- gate {story}: Execute qa-gate task to write/update quality gate decision in docs/qa/gates/
|
||||
- gate {story}: Execute qa-gate task to write/update quality gate decision in directory from qa.qaLocation/gates/
|
||||
- trace {story}: Execute trace-requirements task to map requirements to tests using Given-When-Then
|
||||
- risk-profile {story}: Execute risk-profile task to generate risk assessment matrix
|
||||
- test-design {story}: Execute test-design task to create comprehensive test scenarios
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,7 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: Bob
|
||||
id: sm
|
||||
|
||||
@@ -17,7 +17,8 @@ REQUEST-RESOLUTION: Match user requests to your commands/dependencies flexibly (
|
||||
activation-instructions:
|
||||
- STEP 1: Read THIS ENTIRE FILE - it contains your complete persona definition
|
||||
- STEP 2: Adopt the persona defined in the 'agent' and 'persona' sections below
|
||||
- STEP 3: Greet user with your name/role and mention `*help` command
|
||||
- STEP 3: Load and read `bmad-core/core-config.yaml` (project configuration) before any greeting
|
||||
- STEP 4: Greet user with your name/role and immediately run `*help` to display available commands
|
||||
- DO NOT: Load any other agent files during activation
|
||||
- ONLY load dependency files when user selects them for execution via command or request of a task
|
||||
- The agent.customization field ALWAYS takes precedence over any conflicting instructions
|
||||
@@ -26,7 +27,7 @@ activation-instructions:
|
||||
- CRITICAL RULE: When executing formal task workflows from dependencies, ALL task instructions override any conflicting base behavioral constraints. Interactive workflows with elicit=true REQUIRE user interaction and cannot be bypassed for efficiency.
|
||||
- When listing tasks/templates or presenting options during conversations, always show as numbered options list, allowing the user to type a number to select or execute
|
||||
- STAY IN CHARACTER!
|
||||
- CRITICAL: On activation, ONLY greet user and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
- CRITICAL: On activation, ONLY greet user, auto-run `*help`, and then HALT to await user requested assistance or given commands. ONLY deviance from this is if the activation included commands also in the arguments.
|
||||
agent:
|
||||
name: Sally
|
||||
id: ux-expert
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
markdownExploder: true
|
||||
qa:
|
||||
qaLocation: docs/qa
|
||||
prd:
|
||||
prdFile: docs/prd.md
|
||||
prdVersion: v4
|
||||
|
||||
148
bmad-core/tasks/apply-qa-fixes.md
Normal file
148
bmad-core/tasks/apply-qa-fixes.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# apply-qa-fixes
|
||||
|
||||
Implement fixes based on QA results (gate and assessments) for a specific story. This task is for the Dev agent to systematically consume QA outputs and apply code/test changes while only updating allowed sections in the story file.
|
||||
|
||||
## Purpose
|
||||
|
||||
- Read QA outputs for a story (gate YAML + assessment markdowns)
|
||||
- Create a prioritized, deterministic fix plan
|
||||
- Apply code and test changes to close gaps and address issues
|
||||
- Update only the allowed story sections for the Dev agent
|
||||
|
||||
## Inputs
|
||||
|
||||
```yaml
|
||||
required:
|
||||
- story_id: '{epic}.{story}' # e.g., "2.2"
|
||||
- qa_root: from `bmad-core/core-config.yaml` key `qa.qaLocation` (e.g., `docs/project/qa`)
|
||||
- story_root: from `bmad-core/core-config.yaml` key `devStoryLocation` (e.g., `docs/project/stories`)
|
||||
|
||||
optional:
|
||||
- story_title: '{title}' # derive from story H1 if missing
|
||||
- story_slug: '{slug}' # derive from title (lowercase, hyphenated) if missing
|
||||
```
|
||||
|
||||
## QA Sources to Read
|
||||
|
||||
- Gate (YAML): `{qa_root}/gates/{epic}.{story}-*.yml`
|
||||
- If multiple, use the most recent by modified time
|
||||
- Assessments (Markdown):
|
||||
- Test Design: `{qa_root}/assessments/{epic}.{story}-test-design-*.md`
|
||||
- Traceability: `{qa_root}/assessments/{epic}.{story}-trace-*.md`
|
||||
- Risk Profile: `{qa_root}/assessments/{epic}.{story}-risk-*.md`
|
||||
- NFR Assessment: `{qa_root}/assessments/{epic}.{story}-nfr-*.md`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Repository builds and tests run locally (Deno 2)
|
||||
- Lint and test commands available:
|
||||
- `deno lint`
|
||||
- `deno test -A`
|
||||
|
||||
## Process (Do not skip steps)
|
||||
|
||||
### 0) Load Core Config & Locate Story
|
||||
|
||||
- Read `bmad-core/core-config.yaml` and resolve `qa_root` and `story_root`
|
||||
- Locate story file in `{story_root}/{epic}.{story}.*.md`
|
||||
- HALT if missing and ask for correct story id/path
|
||||
|
||||
### 1) Collect QA Findings
|
||||
|
||||
- Parse the latest gate YAML:
|
||||
- `gate` (PASS|CONCERNS|FAIL|WAIVED)
|
||||
- `top_issues[]` with `id`, `severity`, `finding`, `suggested_action`
|
||||
- `nfr_validation.*.status` and notes
|
||||
- `trace` coverage summary/gaps
|
||||
- `test_design.coverage_gaps[]`
|
||||
- `risk_summary.recommendations.must_fix[]` (if present)
|
||||
- Read any present assessment markdowns and extract explicit gaps/recommendations
|
||||
|
||||
### 2) Build Deterministic Fix Plan (Priority Order)
|
||||
|
||||
Apply in order, highest priority first:
|
||||
|
||||
1. High severity items in `top_issues` (security/perf/reliability/maintainability)
|
||||
2. NFR statuses: all FAIL must be fixed → then CONCERNS
|
||||
3. Test Design `coverage_gaps` (prioritize P0 scenarios if specified)
|
||||
4. Trace uncovered requirements (AC-level)
|
||||
5. Risk `must_fix` recommendations
|
||||
6. Medium severity issues, then low
|
||||
|
||||
Guidance:
|
||||
|
||||
- Prefer tests closing coverage gaps before/with code changes
|
||||
- Keep changes minimal and targeted; follow project architecture and TS/Deno rules
|
||||
|
||||
### 3) Apply Changes
|
||||
|
||||
- Implement code fixes per plan
|
||||
- Add missing tests to close coverage gaps (unit first; integration where required by AC)
|
||||
- Keep imports centralized via `deps.ts` (see `docs/project/typescript-rules.md`)
|
||||
- Follow DI boundaries in `src/core/di.ts` and existing patterns
|
||||
|
||||
### 4) Validate
|
||||
|
||||
- Run `deno lint` and fix issues
|
||||
- Run `deno test -A` until all tests pass
|
||||
- Iterate until clean
|
||||
|
||||
### 5) Update Story (Allowed Sections ONLY)
|
||||
|
||||
CRITICAL: Dev agent is ONLY authorized to update these sections of the story file. Do not modify any other sections (e.g., QA Results, Story, Acceptance Criteria, Dev Notes, Testing):
|
||||
|
||||
- Tasks / Subtasks Checkboxes (mark any fix subtask you added as done)
|
||||
- Dev Agent Record →
|
||||
- Agent Model Used (if changed)
|
||||
- Debug Log References (commands/results, e.g., lint/tests)
|
||||
- Completion Notes List (what changed, why, how)
|
||||
- File List (all added/modified/deleted files)
|
||||
- Change Log (new dated entry describing applied fixes)
|
||||
- Status (see Rule below)
|
||||
|
||||
Status Rule:
|
||||
|
||||
- If gate was PASS and all identified gaps are closed → set `Status: Ready for Done`
|
||||
- Otherwise → set `Status: Ready for Review` and notify QA to re-run the review
|
||||
|
||||
### 6) Do NOT Edit Gate Files
|
||||
|
||||
- Dev does not modify gate YAML. If fixes address issues, request QA to re-run `review-story` to update the gate
|
||||
|
||||
## Blocking Conditions
|
||||
|
||||
- Missing `bmad-core/core-config.yaml`
|
||||
- Story file not found for `story_id`
|
||||
- No QA artifacts found (neither gate nor assessments)
|
||||
- HALT and request QA to generate at least a gate file (or proceed only with clear developer-provided fix list)
|
||||
|
||||
## Completion Checklist
|
||||
|
||||
- deno lint: 0 problems
|
||||
- deno test -A: all tests pass
|
||||
- All high severity `top_issues` addressed
|
||||
- NFR FAIL → resolved; CONCERNS minimized or documented
|
||||
- Coverage gaps closed or explicitly documented with rationale
|
||||
- Story updated (allowed sections only) including File List and Change Log
|
||||
- Status set according to Status Rule
|
||||
|
||||
## Example: Story 2.2
|
||||
|
||||
Given gate `docs/project/qa/gates/2.2-*.yml` shows
|
||||
|
||||
- `coverage_gaps`: Back action behavior untested (AC2)
|
||||
- `coverage_gaps`: Centralized dependencies enforcement untested (AC4)
|
||||
|
||||
Fix plan:
|
||||
|
||||
- Add a test ensuring the Toolkit Menu "Back" action returns to Main Menu
|
||||
- Add a static test verifying imports for service/view go through `deps.ts`
|
||||
- Re-run lint/tests and update Dev Agent Record + File List accordingly
|
||||
|
||||
## Key Principles
|
||||
|
||||
- Deterministic, risk-first prioritization
|
||||
- Minimal, maintainable changes
|
||||
- Tests validate behavior and close gaps
|
||||
- Strict adherence to allowed story update areas
|
||||
- Gate ownership remains with QA; Dev signals readiness via Status
|
||||
@@ -7,11 +7,11 @@ Quick NFR validation focused on the core four: security, performance, reliabilit
|
||||
```yaml
|
||||
required:
|
||||
- story_id: '{epic}.{story}' # e.g., "1.3"
|
||||
- story_path: 'docs/stories/{epic}.{story}.*.md'
|
||||
- story_path: `bmad-core/core-config.yaml` for the `devStoryLocation`
|
||||
|
||||
optional:
|
||||
- architecture_refs: 'docs/architecture/*.md'
|
||||
- technical_preferences: 'docs/technical-preferences.md'
|
||||
- architecture_refs: `bmad-core/core-config.yaml` for the `architecture.architectureFile`
|
||||
- technical_preferences: `bmad-core/core-config.yaml` for the `technicalPreferences`
|
||||
- acceptance_criteria: From story file
|
||||
```
|
||||
|
||||
@@ -20,7 +20,7 @@ optional:
|
||||
Assess non-functional requirements for a story and generate:
|
||||
|
||||
1. YAML block for the gate file's `nfr_validation` section
|
||||
2. Brief markdown assessment saved to `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
|
||||
2. Brief markdown assessment saved to `qa.qaLocation/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
|
||||
|
||||
## Process
|
||||
|
||||
@@ -123,7 +123,7 @@ If `technical-preferences.md` defines custom weights, use those instead.
|
||||
|
||||
## Output 2: Brief Assessment Report
|
||||
|
||||
**ALWAYS save to:** `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
|
||||
**ALWAYS save to:** `qa.qaLocation/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
|
||||
|
||||
```markdown
|
||||
# NFR Assessment: {epic}.{story}
|
||||
@@ -162,7 +162,7 @@ Reviewer: Quinn
|
||||
**End with this line for the review task to quote:**
|
||||
|
||||
```
|
||||
NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
|
||||
NFR assessment: qa.qaLocation/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
|
||||
```
|
||||
|
||||
## Output 4: Gate Integration Line
|
||||
@@ -170,7 +170,7 @@ NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
|
||||
**Always print at the end:**
|
||||
|
||||
```
|
||||
Gate NFR block ready → paste into docs/qa/gates/{epic}.{story}-{slug}.yml under nfr_validation
|
||||
Gate NFR block ready → paste into qa.qaLocation/gates/{epic}.{story}-{slug}.yml under nfr_validation
|
||||
```
|
||||
|
||||
## Assessment Criteria
|
||||
|
||||
@@ -14,7 +14,7 @@ Generate a standalone quality gate file that provides a clear pass/fail decision
|
||||
|
||||
## Gate File Location
|
||||
|
||||
**ALWAYS** create file at: `docs/qa/gates/{epic}.{story}-{slug}.yml`
|
||||
**ALWAYS** check the `bmad-core/core-config.yaml` for the `qa.qaLocation/gates`
|
||||
|
||||
Slug rules:
|
||||
|
||||
@@ -124,11 +124,13 @@ waiver:
|
||||
|
||||
## Output Requirements
|
||||
|
||||
1. **ALWAYS** create gate file at: `docs/qa/gates/{epic}.{story}-{slug}.yml`
|
||||
1. **ALWAYS** create gate file at: `qa.qaLocation/gates` from `bmad-core/core-config.yaml`
|
||||
2. **ALWAYS** append this exact format to story's QA Results section:
|
||||
|
||||
```text
|
||||
Gate: {STATUS} → qa.qaLocation/gates/{epic}.{story}-{slug}.yml
|
||||
```
|
||||
Gate: {STATUS} → docs/qa/gates/{epic}.{story}-{slug}.yml
|
||||
```
|
||||
|
||||
3. Keep status_reason to 1-2 sentences maximum
|
||||
4. Use severity values exactly: `low`, `medium`, or `high`
|
||||
|
||||
@@ -147,7 +149,7 @@ After creating gate file, append to story's QA Results section:
|
||||
|
||||
### Gate Status
|
||||
|
||||
Gate: CONCERNS → docs/qa/gates/1.3-user-auth-login.yml
|
||||
Gate: CONCERNS → qa.qaLocation/gates/{epic}.{story}-{slug}.yml
|
||||
```
|
||||
|
||||
## Key Principles
|
||||
|
||||
@@ -167,9 +167,9 @@ After review and any refactoring, append your results to the story file in the Q
|
||||
|
||||
### Gate Status
|
||||
|
||||
Gate: {STATUS} → docs/qa/gates/{epic}.{story}-{slug}.yml
|
||||
Risk profile: docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
|
||||
NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
|
||||
Gate: {STATUS} → qa.qaLocation/gates/{epic}.{story}-{slug}.yml
|
||||
Risk profile: qa.qaLocation/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
|
||||
NFR assessment: qa.qaLocation/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
|
||||
|
||||
# Note: Paths should reference core-config.yaml for custom configurations
|
||||
|
||||
@@ -183,9 +183,9 @@ NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
|
||||
|
||||
**Template and Directory:**
|
||||
|
||||
- Render from `templates/qa-gate-tmpl.yaml`
|
||||
- Create `docs/qa/gates/` directory if missing (or configure in core-config.yaml)
|
||||
- Save to: `docs/qa/gates/{epic}.{story}-{slug}.yml`
|
||||
- Render from `../templates/qa-gate-tmpl.yaml`
|
||||
- Create directory defined in `qa.qaLocation/gates` (see `bmad-core/core-config.yaml`) if missing
|
||||
- Save to: `qa.qaLocation/gates/{epic}.{story}-{slug}.yml`
|
||||
|
||||
Gate file structure:
|
||||
|
||||
@@ -308,7 +308,7 @@ Stop the review and request clarification if:
|
||||
After review:
|
||||
|
||||
1. Update the QA Results section in the story file
|
||||
2. Create the gate file in `docs/qa/gates/`
|
||||
2. Create the gate file in directory from `qa.qaLocation/gates`
|
||||
3. Recommend status: "Ready for Done" or "Changes Required" (owner decides)
|
||||
4. If files were modified, list them in QA Results and ask Dev to update File List
|
||||
5. Always provide constructive feedback and actionable recommendations
|
||||
|
||||
@@ -105,7 +105,7 @@ Evaluate each risk using probability × impact:
|
||||
- `Medium (2)`: Moderate consequences (degraded performance, minor data issues)
|
||||
- `Low (1)`: Minor consequences (cosmetic issues, slight inconvenience)
|
||||
|
||||
**Risk Score = Probability × Impact**
|
||||
### Risk Score = Probability × Impact
|
||||
|
||||
- 9: Critical Risk (Red)
|
||||
- 6: High Risk (Orange)
|
||||
@@ -182,7 +182,7 @@ risk_summary:
|
||||
|
||||
### Output 2: Markdown Report
|
||||
|
||||
**Save to:** `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md`
|
||||
**Save to:** `qa.qaLocation/assessments/{epic}.{story}-risk-{YYYYMMDD}.md`
|
||||
|
||||
```markdown
|
||||
# Risk Profile: Story {epic}.{story}
|
||||
@@ -290,7 +290,7 @@ Review and update risk profile when:
|
||||
|
||||
Calculate overall story risk score:
|
||||
|
||||
```
|
||||
```text
|
||||
Base Score = 100
|
||||
For each risk:
|
||||
- Critical (9): Deduct 20 points
|
||||
@@ -339,8 +339,8 @@ Based on risk profile, recommend:
|
||||
|
||||
**Print this line for review task to quote:**
|
||||
|
||||
```
|
||||
Risk profile: docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
|
||||
```text
|
||||
Risk profile: qa.qaLocation/assessments/{epic}.{story}-risk-{YYYYMMDD}.md
|
||||
```
|
||||
|
||||
## Key Principles
|
||||
|
||||
@@ -84,7 +84,7 @@ Ensure:
|
||||
|
||||
### Output 1: Test Design Document
|
||||
|
||||
**Save to:** `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md`
|
||||
**Save to:** `qa.qaLocation/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md`
|
||||
|
||||
```markdown
|
||||
# Test Design: Story {epic}.{story}
|
||||
@@ -150,7 +150,7 @@ test_design:
|
||||
Print for use by trace-requirements task:
|
||||
|
||||
```text
|
||||
Test design matrix: docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md
|
||||
Test design matrix: qa.qaLocation/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md
|
||||
P0 tests identified: {count}
|
||||
```
|
||||
|
||||
|
||||
@@ -95,16 +95,16 @@ trace:
|
||||
full: Y
|
||||
partial: Z
|
||||
none: W
|
||||
planning_ref: 'docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md'
|
||||
planning_ref: 'qa.qaLocation/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md'
|
||||
uncovered:
|
||||
- ac: 'AC3'
|
||||
reason: 'No test found for password reset timing'
|
||||
notes: 'See docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md'
|
||||
notes: 'See qa.qaLocation/assessments/{epic}.{story}-trace-{YYYYMMDD}.md'
|
||||
```
|
||||
|
||||
### Output 2: Traceability Report
|
||||
|
||||
**Save to:** `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md`
|
||||
**Save to:** `qa.qaLocation/assessments/{epic}.{story}-trace-{YYYYMMDD}.md`
|
||||
|
||||
Create a traceability report with:
|
||||
|
||||
@@ -250,7 +250,7 @@ This traceability feeds into quality gates:
|
||||
**Print this line for review task to quote:**
|
||||
|
||||
```text
|
||||
Trace matrix: docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md
|
||||
Trace matrix: qa.qaLocation/assessments/{epic}.{story}-trace-{YYYYMMDD}.md
|
||||
```
|
||||
|
||||
- Full coverage → PASS contribution
|
||||
|
||||
@@ -4,7 +4,7 @@ template:
|
||||
version: 1.0
|
||||
output:
|
||||
format: yaml
|
||||
filename: docs/qa/gates/{{epic_num}}.{{story_num}}-{{story_slug}}.yml
|
||||
filename: qa.qaLocation/gates/{{epic_num}}.{{story_num}}-{{story_slug}}.yml
|
||||
title: "Quality Gate: {{epic_num}}.{{story_num}}"
|
||||
|
||||
# Required fields (keep these first)
|
||||
|
||||
@@ -1,77 +1,147 @@
|
||||
# How to Release a New Version
|
||||
# Versioning and Releases
|
||||
|
||||
## Automated Releases (Recommended)
|
||||
BMad Method uses a simplified release system with manual control and automatic release notes generation.
|
||||
|
||||
The easiest way to release new versions is through **automatic semantic releases**. Just commit with the right message format and push and everything else happens automatically.
|
||||
## 🚀 Release Workflow
|
||||
|
||||
### Commit Message Format
|
||||
### Command Line Release (Recommended)
|
||||
|
||||
Use these prefixes to control what type of release happens:
|
||||
The fastest way to create a release with beautiful release notes:
|
||||
|
||||
```bash
|
||||
fix: resolve CLI argument parsing bug # → patch release (4.1.0 → 4.1.1)
|
||||
feat: add new agent orchestration mode # → minor release (4.1.0 → 4.2.0)
|
||||
feat!: redesign CLI interface # → major release (4.1.0 → 5.0.0)
|
||||
# Preview what will be in the release
|
||||
npm run preview:release
|
||||
|
||||
# Create a release
|
||||
npm run release:patch # 5.1.0 → 5.1.1 (bug fixes)
|
||||
npm run release:minor # 5.1.0 → 5.2.0 (new features)
|
||||
npm run release:major # 5.1.0 → 6.0.0 (breaking changes)
|
||||
|
||||
# Watch the release process
|
||||
npm run release:watch
|
||||
```
|
||||
|
||||
### What Happens Automatically
|
||||
|
||||
When you push commits with `fix:` or `feat:`, GitHub Actions will:
|
||||
|
||||
1. ✅ Analyze your commit messages
|
||||
2. ✅ Bump version in `package.json`
|
||||
3. ✅ Generate changelog
|
||||
4. ✅ Create git tag
|
||||
5. ✅ **Publish to NPM automatically**
|
||||
6. ✅ Create GitHub release with notes
|
||||
|
||||
### Your Simple Workflow
|
||||
### One-Liner Release
|
||||
|
||||
```bash
|
||||
# Make your changes
|
||||
git add .
|
||||
git commit -m "feat: add team collaboration mode"
|
||||
git push
|
||||
|
||||
# That's it! Release happens automatically 🎉
|
||||
# Users can now run: npx bmad-method (and get the new version)
|
||||
npm run preview:release && npm run release:minor && npm run release:watch
|
||||
```
|
||||
|
||||
### Commits That DON'T Trigger Releases
|
||||
## 📝 What Happens Automatically
|
||||
|
||||
These commit types won't create releases (use them for maintenance):
|
||||
When you trigger a release, the GitHub Actions workflow automatically:
|
||||
|
||||
1. ✅ **Validates** - Runs tests, linting, and formatting checks
|
||||
2. ✅ **Bumps Version** - Updates `package.json` and installer version
|
||||
3. ✅ **Generates Release Notes** - Categorizes commits since last release:
|
||||
- ✨ **New Features** (`feat:`, `Feature:`)
|
||||
- 🐛 **Bug Fixes** (`fix:`, `Fix:`)
|
||||
- 🔧 **Maintenance** (`chore:`, `Chore:`)
|
||||
- 📦 **Other Changes** (everything else)
|
||||
4. ✅ **Creates Git Tag** - Tags the release version
|
||||
5. ✅ **Publishes to NPM** - With `@latest` tag for user installations
|
||||
6. ✅ **Creates GitHub Release** - With formatted release notes
|
||||
|
||||
## 📋 Sample Release Notes
|
||||
|
||||
The workflow automatically generates professional release notes like this:
|
||||
|
||||
````markdown
|
||||
## 🚀 What's New in v5.2.0
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
- feat: add team collaboration mode
|
||||
- feat: enhance CLI with interactive prompts
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- fix: resolve installation path issues
|
||||
- fix: handle edge cases in agent loading
|
||||
|
||||
### 🔧 Maintenance
|
||||
|
||||
- chore: update dependencies
|
||||
- chore: improve error messages
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
```bash
|
||||
chore: update dependencies # No release
|
||||
docs: fix typo in readme # No release
|
||||
style: format code # No release
|
||||
test: add unit tests # No release
|
||||
npx bmad-method install
|
||||
```
|
||||
````
|
||||
|
||||
### Test Your Setup
|
||||
**Full Changelog**: https://github.com/bmadcode/BMAD-METHOD/compare/v5.1.0...v5.2.0
|
||||
|
||||
````
|
||||
|
||||
## 🎯 User Installation
|
||||
|
||||
After any release, users can immediately get the new version with:
|
||||
|
||||
```bash
|
||||
npm run release:test # Safe to run locally - tests the config
|
||||
npx bmad-method install # Always gets latest release
|
||||
```
|
||||
|
||||
---
|
||||
## 📊 Preview Before Release
|
||||
|
||||
## Manual Release Methods (Exceptions Only)
|
||||
|
||||
⚠️ Only use these methods if you need to bypass the automatic system
|
||||
|
||||
### Quick Manual Version Bump
|
||||
Always preview what will be included in your release:
|
||||
|
||||
```bash
|
||||
npm run version:patch # 4.1.0 → 4.1.1 (bug fixes)
|
||||
npm run version:minor # 4.1.0 → 4.2.0 (new features)
|
||||
npm run version:major # 4.1.0 → 5.0.0 (breaking changes)
|
||||
|
||||
# Then manually publish:
|
||||
npm publish
|
||||
git push && git push --tags
|
||||
npm run preview:release
|
||||
```
|
||||
|
||||
### Manual GitHub Actions Trigger
|
||||
This shows:
|
||||
|
||||
You can also trigger releases manually through GitHub Actions workflow dispatch if needed.
|
||||
- Commits since last release
|
||||
- Categorized changes
|
||||
- Estimated next version
|
||||
- Release notes preview
|
||||
|
||||
## 🔧 Manual Release (GitHub UI)
|
||||
|
||||
You can also trigger releases through GitHub Actions:
|
||||
|
||||
1. Go to **GitHub Actions** → **Manual Release**
|
||||
2. Click **"Run workflow"**
|
||||
3. Choose version bump type (patch/minor/major)
|
||||
4. Everything else happens automatically
|
||||
|
||||
## 📈 Version Strategy
|
||||
|
||||
- **Patch** (5.1.0 → 5.1.1): Bug fixes, minor improvements
|
||||
- **Minor** (5.1.0 → 5.2.0): New features, enhancements
|
||||
- **Major** (5.1.0 → 6.0.0): Breaking changes, major redesigns
|
||||
|
||||
## 🛠️ Development Workflow
|
||||
|
||||
1. **Develop Freely** - Merge PRs to main without triggering releases
|
||||
2. **Test Unreleased Changes** - Clone repo to test latest main branch
|
||||
3. **Release When Ready** - Use command line or GitHub Actions to cut releases
|
||||
4. **Users Get Updates** - Via simple `npx bmad-method install` command
|
||||
|
||||
This gives you complete control over when releases happen while automating all the tedious parts like version bumping, release notes, and publishing.
|
||||
|
||||
## 🔍 Troubleshooting
|
||||
|
||||
### Check Release Status
|
||||
|
||||
```bash
|
||||
gh run list --workflow="Manual Release"
|
||||
npm view bmad-method dist-tags
|
||||
git tag -l | sort -V | tail -5
|
||||
```
|
||||
|
||||
### View Latest Release
|
||||
|
||||
```bash
|
||||
gh release view --web
|
||||
npm view bmad-method versions --json
|
||||
```
|
||||
|
||||
### If Release Fails
|
||||
|
||||
- Check GitHub Actions logs: `gh run view <run-id> --log-failed`
|
||||
- Verify NPM tokens are configured
|
||||
- Ensure branch protection allows workflow pushes
|
||||
````
|
||||
|
||||
15
package.json
15
package.json
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/package.json",
|
||||
"name": "bmad-method",
|
||||
"version": "5.0.0",
|
||||
"version": "5.1.3",
|
||||
"description": "Breakthrough Method of Agile AI-driven Development",
|
||||
"keywords": [
|
||||
"agile",
|
||||
@@ -35,8 +35,11 @@
|
||||
"lint:fix": "eslint . --ext .js,.cjs,.mjs,.yaml --fix",
|
||||
"list:agents": "node tools/cli.js list:agents",
|
||||
"prepare": "husky",
|
||||
"release": "semantic-release",
|
||||
"release:test": "semantic-release --dry-run --no-ci || echo 'Config test complete - authentication errors are expected locally'",
|
||||
"preview:release": "node tools/preview-release-notes.js",
|
||||
"release:major": "gh workflow run \"Manual Release\" -f version_bump=major",
|
||||
"release:minor": "gh workflow run \"Manual Release\" -f version_bump=minor",
|
||||
"release:patch": "gh workflow run \"Manual Release\" -f version_bump=patch",
|
||||
"release:watch": "gh run watch",
|
||||
"validate": "node tools/cli.js validate",
|
||||
"version:all": "node tools/bump-all-versions.js",
|
||||
"version:all:major": "node tools/bump-all-versions.js major",
|
||||
@@ -80,8 +83,6 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.33.0",
|
||||
"@semantic-release/changelog": "^6.0.3",
|
||||
"@semantic-release/git": "^10.0.1",
|
||||
"eslint": "^9.33.0",
|
||||
"eslint-config-prettier": "^10.1.8",
|
||||
"eslint-plugin-n": "^17.21.3",
|
||||
@@ -92,11 +93,13 @@
|
||||
"lint-staged": "^16.1.1",
|
||||
"prettier": "^3.5.3",
|
||||
"prettier-plugin-packagejson": "^2.5.19",
|
||||
"semantic-release": "^22.0.0",
|
||||
"yaml-eslint-parser": "^1.2.3",
|
||||
"yaml-lint": "^1.7.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20.10.0"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,9 +125,6 @@ program
|
||||
// Ensure output directory exists
|
||||
await fs.ensureDir(path.dirname(outputPath));
|
||||
|
||||
console.log(`Flattening codebase from: ${inputDir}`);
|
||||
console.log(`Output file: ${outputPath}`);
|
||||
|
||||
try {
|
||||
// Verify input directory exists
|
||||
if (!(await fs.pathExists(inputDir))) {
|
||||
@@ -158,10 +155,6 @@ program
|
||||
if (aggregatedContent.errors.length > 0) {
|
||||
console.log(`Errors: ${aggregatedContent.errors.length}`);
|
||||
}
|
||||
console.log(`Text files: ${aggregatedContent.textFiles.length}`);
|
||||
if (aggregatedContent.binaryFiles.length > 0) {
|
||||
console.log(`Binary files: ${aggregatedContent.binaryFiles.length}`);
|
||||
}
|
||||
|
||||
// Generate XML output using streaming
|
||||
const xmlSpinner = ora('🔧 Generating XML output...').start();
|
||||
@@ -170,7 +163,7 @@ program
|
||||
|
||||
// Calculate and display statistics
|
||||
const outputStats = await fs.stat(outputPath);
|
||||
const stats = calculateStatistics(aggregatedContent, outputStats.size);
|
||||
const stats = await calculateStatistics(aggregatedContent, outputStats.size, inputDir);
|
||||
|
||||
// Display completion summary
|
||||
console.log('\n📊 Completion Summary:');
|
||||
@@ -183,8 +176,389 @@ program
|
||||
console.log(`📝 Total lines of code: ${stats.totalLines.toLocaleString()}`);
|
||||
console.log(`🔢 Estimated tokens: ${stats.estimatedTokens}`);
|
||||
console.log(
|
||||
`📊 File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors`,
|
||||
`📊 File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors\n`,
|
||||
);
|
||||
|
||||
// Ask user if they want detailed stats + markdown report
|
||||
const generateDetailed = await promptYesNo(
|
||||
'Generate detailed stats (console + markdown) now?',
|
||||
true,
|
||||
);
|
||||
|
||||
if (generateDetailed) {
|
||||
// Additional detailed stats
|
||||
console.log('\n📈 Size Percentiles:');
|
||||
console.log(
|
||||
` Avg: ${Math.round(stats.avgFileSize).toLocaleString()} B, Median: ${Math.round(
|
||||
stats.medianFileSize,
|
||||
).toLocaleString()} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
|
||||
);
|
||||
|
||||
if (Array.isArray(stats.histogram) && stats.histogram.length > 0) {
|
||||
console.log('\n🧮 Size Histogram:');
|
||||
for (const b of stats.histogram.slice(0, 2)) {
|
||||
console.log(` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`);
|
||||
}
|
||||
if (stats.histogram.length > 2) {
|
||||
console.log(` … and ${stats.histogram.length - 2} more buckets`);
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(stats.byExtension) && stats.byExtension.length > 0) {
|
||||
const topExt = stats.byExtension.slice(0, 2);
|
||||
console.log('\n📦 Top Extensions:');
|
||||
for (const e of topExt) {
|
||||
const pct = stats.totalBytes ? (e.bytes / stats.totalBytes) * 100 : 0;
|
||||
console.log(
|
||||
` ${e.ext}: ${e.count} files, ${e.bytes.toLocaleString()} bytes (${pct.toFixed(
|
||||
2,
|
||||
)}%)`,
|
||||
);
|
||||
}
|
||||
if (stats.byExtension.length > 2) {
|
||||
console.log(` … and ${stats.byExtension.length - 2} more extensions`);
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length > 0) {
|
||||
const topDir = stats.byDirectory.slice(0, 2);
|
||||
console.log('\n📂 Top Directories:');
|
||||
for (const d of topDir) {
|
||||
const pct = stats.totalBytes ? (d.bytes / stats.totalBytes) * 100 : 0;
|
||||
console.log(
|
||||
` ${d.dir}: ${d.count} files, ${d.bytes.toLocaleString()} bytes (${pct.toFixed(
|
||||
2,
|
||||
)}%)`,
|
||||
);
|
||||
}
|
||||
if (stats.byDirectory.length > 2) {
|
||||
console.log(` … and ${stats.byDirectory.length - 2} more directories`);
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(stats.depthDistribution) && stats.depthDistribution.length > 0) {
|
||||
console.log('\n🌳 Depth Distribution:');
|
||||
const dd = stats.depthDistribution.slice(0, 2);
|
||||
let line = ' ' + dd.map((d) => `${d.depth}:${d.count}`).join(' ');
|
||||
if (stats.depthDistribution.length > 2) {
|
||||
line += ` … +${stats.depthDistribution.length - 2} more`;
|
||||
}
|
||||
console.log(line);
|
||||
}
|
||||
|
||||
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length > 0) {
|
||||
console.log('\n🧵 Longest Paths:');
|
||||
for (const p of stats.longestPaths.slice(0, 2)) {
|
||||
console.log(` ${p.path} (${p.length} chars, ${p.size.toLocaleString()} bytes)`);
|
||||
}
|
||||
if (stats.longestPaths.length > 2) {
|
||||
console.log(` … and ${stats.longestPaths.length - 2} more paths`);
|
||||
}
|
||||
}
|
||||
|
||||
if (stats.temporal) {
|
||||
console.log('\n⏱️ Temporal:');
|
||||
if (stats.temporal.oldest) {
|
||||
console.log(
|
||||
` Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`,
|
||||
);
|
||||
}
|
||||
if (stats.temporal.newest) {
|
||||
console.log(
|
||||
` Newest: ${stats.temporal.newest.path} (${stats.temporal.newest.mtime})`,
|
||||
);
|
||||
}
|
||||
if (Array.isArray(stats.temporal.ageBuckets)) {
|
||||
console.log(' Age buckets:');
|
||||
for (const b of stats.temporal.ageBuckets.slice(0, 2)) {
|
||||
console.log(` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`);
|
||||
}
|
||||
if (stats.temporal.ageBuckets.length > 2) {
|
||||
console.log(` … and ${stats.temporal.ageBuckets.length - 2} more buckets`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (stats.quality) {
|
||||
console.log('\n✅ Quality Signals:');
|
||||
console.log(` Zero-byte files: ${stats.quality.zeroByteFiles}`);
|
||||
console.log(` Empty text files: ${stats.quality.emptyTextFiles}`);
|
||||
console.log(` Hidden files: ${stats.quality.hiddenFiles}`);
|
||||
console.log(` Symlinks: ${stats.quality.symlinks}`);
|
||||
console.log(
|
||||
` Large files (>= ${(stats.quality.largeThreshold / (1024 * 1024)).toFixed(
|
||||
0,
|
||||
)} MB): ${stats.quality.largeFilesCount}`,
|
||||
);
|
||||
console.log(
|
||||
` Suspiciously large files (>= 100 MB): ${stats.quality.suspiciousLargeFilesCount}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (Array.isArray(stats.duplicateCandidates) && stats.duplicateCandidates.length > 0) {
|
||||
console.log('\n🧬 Duplicate Candidates:');
|
||||
for (const d of stats.duplicateCandidates.slice(0, 2)) {
|
||||
console.log(` ${d.reason}: ${d.count} files @ ${d.size.toLocaleString()} bytes`);
|
||||
}
|
||||
if (stats.duplicateCandidates.length > 2) {
|
||||
console.log(` … and ${stats.duplicateCandidates.length - 2} more groups`);
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof stats.compressibilityRatio === 'number') {
|
||||
console.log(
|
||||
`\n🗜️ Compressibility ratio (sampled): ${(stats.compressibilityRatio * 100).toFixed(
|
||||
2,
|
||||
)}%`,
|
||||
);
|
||||
}
|
||||
|
||||
if (stats.git && stats.git.isRepo) {
|
||||
console.log('\n🔧 Git:');
|
||||
console.log(
|
||||
` Tracked: ${stats.git.trackedCount} files, ${stats.git.trackedBytes.toLocaleString()} bytes`,
|
||||
);
|
||||
console.log(
|
||||
` Untracked: ${stats.git.untrackedCount} files, ${stats.git.untrackedBytes.toLocaleString()} bytes`,
|
||||
);
|
||||
if (Array.isArray(stats.git.lfsCandidates) && stats.git.lfsCandidates.length > 0) {
|
||||
console.log(' LFS candidates (top 2):');
|
||||
for (const f of stats.git.lfsCandidates.slice(0, 2)) {
|
||||
console.log(` ${f.path} (${f.size.toLocaleString()} bytes)`);
|
||||
}
|
||||
if (stats.git.lfsCandidates.length > 2) {
|
||||
console.log(` … and ${stats.git.lfsCandidates.length - 2} more`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length > 0) {
|
||||
console.log('\n📚 Largest Files (top 2):');
|
||||
for (const f of stats.largestFiles.slice(0, 2)) {
|
||||
// Show LOC for text files when available; omit ext and mtime
|
||||
let locStr = '';
|
||||
if (!f.isBinary && Array.isArray(aggregatedContent?.textFiles)) {
|
||||
const tf = aggregatedContent.textFiles.find((t) => t.path === f.path);
|
||||
if (tf && typeof tf.lines === 'number') {
|
||||
locStr = `, LOC: ${tf.lines.toLocaleString()}`;
|
||||
}
|
||||
}
|
||||
console.log(
|
||||
` ${f.path} – ${f.sizeFormatted} (${f.percentOfTotal.toFixed(2)}%)${locStr}`,
|
||||
);
|
||||
}
|
||||
if (stats.largestFiles.length > 2) {
|
||||
console.log(` … and ${stats.largestFiles.length - 2} more files`);
|
||||
}
|
||||
}
|
||||
|
||||
// Write a comprehensive markdown report next to the XML
|
||||
{
|
||||
const mdPath = outputPath.endsWith('.xml')
|
||||
? outputPath.replace(/\.xml$/i, '.stats.md')
|
||||
: outputPath + '.stats.md';
|
||||
try {
|
||||
const pct = (num, den) => (den ? (num / den) * 100 : 0);
|
||||
const md = [];
|
||||
md.push(
|
||||
`# 🧾 Flatten Stats for ${path.basename(outputPath)}`,
|
||||
'',
|
||||
'## 📊 Summary',
|
||||
`- Total source size: ${stats.totalSize}`,
|
||||
`- Generated XML size: ${stats.xmlSize}`,
|
||||
`- Total lines of code: ${stats.totalLines.toLocaleString()}`,
|
||||
`- Estimated tokens: ${stats.estimatedTokens}`,
|
||||
`- File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors`,
|
||||
'',
|
||||
'## 📈 Size Percentiles',
|
||||
`Avg: ${Math.round(stats.avgFileSize).toLocaleString()} B, Median: ${Math.round(
|
||||
stats.medianFileSize,
|
||||
).toLocaleString()} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
|
||||
'',
|
||||
);
|
||||
|
||||
// Histogram
|
||||
if (Array.isArray(stats.histogram) && stats.histogram.length > 0) {
|
||||
md.push(
|
||||
'## 🧮 Size Histogram',
|
||||
'| Bucket | Files | Bytes |',
|
||||
'| --- | ---: | ---: |',
|
||||
);
|
||||
for (const b of stats.histogram) {
|
||||
md.push(`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`);
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Top Extensions
|
||||
if (Array.isArray(stats.byExtension) && stats.byExtension.length > 0) {
|
||||
md.push(
|
||||
'## 📦 Top Extensions by Bytes (Top 20)',
|
||||
'| Ext | Files | Bytes | % of total |',
|
||||
'| --- | ---: | ---: | ---: |',
|
||||
);
|
||||
for (const e of stats.byExtension.slice(0, 20)) {
|
||||
const p = pct(e.bytes, stats.totalBytes);
|
||||
md.push(
|
||||
`| ${e.ext} | ${e.count} | ${e.bytes.toLocaleString()} | ${p.toFixed(2)}% |`,
|
||||
);
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Top Directories
|
||||
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length > 0) {
|
||||
md.push(
|
||||
'## 📂 Top Directories by Bytes (Top 20)',
|
||||
'| Directory | Files | Bytes | % of total |',
|
||||
'| --- | ---: | ---: | ---: |',
|
||||
);
|
||||
for (const d of stats.byDirectory.slice(0, 20)) {
|
||||
const p = pct(d.bytes, stats.totalBytes);
|
||||
md.push(
|
||||
`| ${d.dir} | ${d.count} | ${d.bytes.toLocaleString()} | ${p.toFixed(2)}% |`,
|
||||
);
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Depth distribution
|
||||
if (Array.isArray(stats.depthDistribution) && stats.depthDistribution.length > 0) {
|
||||
md.push('## 🌳 Depth Distribution', '| Depth | Count |', '| ---: | ---: |');
|
||||
for (const d of stats.depthDistribution) {
|
||||
md.push(`| ${d.depth} | ${d.count} |`);
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Longest paths
|
||||
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length > 0) {
|
||||
md.push(
|
||||
'## 🧵 Longest Paths (Top 25)',
|
||||
'| Path | Length | Bytes |',
|
||||
'| --- | ---: | ---: |',
|
||||
);
|
||||
for (const pth of stats.longestPaths) {
|
||||
md.push(`| ${pth.path} | ${pth.length} | ${pth.size.toLocaleString()} |`);
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Temporal
|
||||
if (stats.temporal) {
|
||||
md.push('## ⏱️ Temporal');
|
||||
if (stats.temporal.oldest) {
|
||||
md.push(`- Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`);
|
||||
}
|
||||
if (stats.temporal.newest) {
|
||||
md.push(`- Newest: ${stats.temporal.newest.path} (${stats.temporal.newest.mtime})`);
|
||||
}
|
||||
if (Array.isArray(stats.temporal.ageBuckets)) {
|
||||
md.push('', '| Age | Files | Bytes |', '| --- | ---: | ---: |');
|
||||
for (const b of stats.temporal.ageBuckets) {
|
||||
md.push(`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`);
|
||||
}
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Quality signals
|
||||
if (stats.quality) {
|
||||
md.push(
|
||||
'## ✅ Quality Signals',
|
||||
`- Zero-byte files: ${stats.quality.zeroByteFiles}`,
|
||||
`- Empty text files: ${stats.quality.emptyTextFiles}`,
|
||||
`- Hidden files: ${stats.quality.hiddenFiles}`,
|
||||
`- Symlinks: ${stats.quality.symlinks}`,
|
||||
`- Large files (>= ${(stats.quality.largeThreshold / (1024 * 1024)).toFixed(0)} MB): ${stats.quality.largeFilesCount}`,
|
||||
`- Suspiciously large files (>= 100 MB): ${stats.quality.suspiciousLargeFilesCount}`,
|
||||
'',
|
||||
);
|
||||
}
|
||||
|
||||
// Duplicates
|
||||
if (Array.isArray(stats.duplicateCandidates) && stats.duplicateCandidates.length > 0) {
|
||||
md.push(
|
||||
'## 🧬 Duplicate Candidates',
|
||||
'| Reason | Files | Size (bytes) |',
|
||||
'| --- | ---: | ---: |',
|
||||
);
|
||||
for (const d of stats.duplicateCandidates) {
|
||||
md.push(`| ${d.reason} | ${d.count} | ${d.size.toLocaleString()} |`);
|
||||
}
|
||||
md.push('', '### 🧬 Duplicate Groups Details');
|
||||
let dupIndex = 1;
|
||||
for (const d of stats.duplicateCandidates) {
|
||||
md.push(
|
||||
`#### Group ${dupIndex}: ${d.count} files @ ${d.size.toLocaleString()} bytes (${d.reason})`,
|
||||
);
|
||||
if (Array.isArray(d.files) && d.files.length > 0) {
|
||||
for (const fp of d.files) {
|
||||
md.push(`- ${fp}`);
|
||||
}
|
||||
} else {
|
||||
md.push('- (file list unavailable)');
|
||||
}
|
||||
md.push('');
|
||||
dupIndex++;
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Compressibility
|
||||
if (typeof stats.compressibilityRatio === 'number') {
|
||||
md.push(
|
||||
'## 🗜️ Compressibility',
|
||||
`Sampled compressibility ratio: ${(stats.compressibilityRatio * 100).toFixed(2)}%`,
|
||||
'',
|
||||
);
|
||||
}
|
||||
|
||||
// Git
|
||||
if (stats.git && stats.git.isRepo) {
|
||||
md.push(
|
||||
'## 🔧 Git',
|
||||
`- Tracked: ${stats.git.trackedCount} files, ${stats.git.trackedBytes.toLocaleString()} bytes`,
|
||||
`- Untracked: ${stats.git.untrackedCount} files, ${stats.git.untrackedBytes.toLocaleString()} bytes`,
|
||||
);
|
||||
if (Array.isArray(stats.git.lfsCandidates) && stats.git.lfsCandidates.length > 0) {
|
||||
md.push('', '### 📦 LFS Candidates (Top 20)', '| Path | Bytes |', '| --- | ---: |');
|
||||
for (const f of stats.git.lfsCandidates.slice(0, 20)) {
|
||||
md.push(`| ${f.path} | ${f.size.toLocaleString()} |`);
|
||||
}
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
// Largest Files
|
||||
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length > 0) {
|
||||
md.push(
|
||||
'## 📚 Largest Files (Top 50)',
|
||||
'| Path | Size | % of total | LOC |',
|
||||
'| --- | ---: | ---: | ---: |',
|
||||
);
|
||||
for (const f of stats.largestFiles) {
|
||||
let loc = '';
|
||||
if (!f.isBinary && Array.isArray(aggregatedContent?.textFiles)) {
|
||||
const tf = aggregatedContent.textFiles.find((t) => t.path === f.path);
|
||||
if (tf && typeof tf.lines === 'number') {
|
||||
loc = tf.lines.toLocaleString();
|
||||
}
|
||||
}
|
||||
md.push(
|
||||
`| ${f.path} | ${f.sizeFormatted} | ${f.percentOfTotal.toFixed(2)}% | ${loc} |`,
|
||||
);
|
||||
}
|
||||
md.push('');
|
||||
}
|
||||
|
||||
await fs.writeFile(mdPath, md.join('\n'));
|
||||
console.log(`\n🧾 Detailed stats report written to: ${mdPath}`);
|
||||
} catch (error) {
|
||||
console.warn(`⚠️ Failed to write stats markdown: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('❌ Critical error:', error.message);
|
||||
console.error('An unexpected error occurred.');
|
||||
|
||||
@@ -1,40 +1,203 @@
|
||||
const fs = require('fs-extra');
|
||||
const path = require('node:path');
|
||||
|
||||
// Deno/Node compatibility: explicitly import process
|
||||
const process = require('node:process');
|
||||
const { execFile } = require('node:child_process');
|
||||
const { promisify } = require('node:util');
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
// Simple memoization across calls (keyed by realpath of startDir)
|
||||
const _cache = new Map();
|
||||
|
||||
async function _tryRun(cmd, args, cwd, timeoutMs = 500) {
|
||||
try {
|
||||
const { stdout } = await execFileAsync(cmd, args, {
|
||||
cwd,
|
||||
timeout: timeoutMs,
|
||||
windowsHide: true,
|
||||
maxBuffer: 1024 * 1024,
|
||||
});
|
||||
const out = String(stdout || '').trim();
|
||||
return out || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function _detectVcsTopLevel(startDir) {
|
||||
// Run common VCS root queries in parallel; ignore failures
|
||||
const gitP = _tryRun('git', ['rev-parse', '--show-toplevel'], startDir);
|
||||
const hgP = _tryRun('hg', ['root'], startDir);
|
||||
const svnP = (async () => {
|
||||
const show = await _tryRun('svn', ['info', '--show-item', 'wc-root'], startDir);
|
||||
if (show) return show;
|
||||
const info = await _tryRun('svn', ['info'], startDir);
|
||||
if (info) {
|
||||
const line = info
|
||||
.split(/\r?\n/)
|
||||
.find((l) => l.toLowerCase().startsWith('working copy root path:'));
|
||||
if (line) return line.split(':').slice(1).join(':').trim();
|
||||
}
|
||||
return null;
|
||||
})();
|
||||
const [git, hg, svn] = await Promise.all([gitP, hgP, svnP]);
|
||||
return git || hg || svn || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to find the project root by walking up from startDir
|
||||
* Looks for common project markers like .git, package.json, pyproject.toml, etc.
|
||||
* Attempt to find the project root by walking up from startDir.
|
||||
* Uses a robust, prioritized set of ecosystem markers (VCS > workspaces/monorepo > lock/build > language config).
|
||||
* Also recognizes package.json with "workspaces" as a workspace root.
|
||||
* You can augment markers via env PROJECT_ROOT_MARKERS as a comma-separated list of file/dir names.
|
||||
* @param {string} startDir
|
||||
* @returns {Promise<string|null>} project root directory or null if not found
|
||||
*/
|
||||
async function findProjectRoot(startDir) {
|
||||
try {
|
||||
// Resolve symlinks for robustness (e.g., when invoked from a symlinked path)
|
||||
let dir = path.resolve(startDir);
|
||||
const root = path.parse(dir).root;
|
||||
const markers = [
|
||||
'.git',
|
||||
'package.json',
|
||||
'pnpm-workspace.yaml',
|
||||
'yarn.lock',
|
||||
'pnpm-lock.yaml',
|
||||
'pyproject.toml',
|
||||
'requirements.txt',
|
||||
'go.mod',
|
||||
'Cargo.toml',
|
||||
'composer.json',
|
||||
'.hg',
|
||||
'.svn',
|
||||
];
|
||||
try {
|
||||
dir = await fs.realpath(dir);
|
||||
} catch {
|
||||
// ignore if realpath fails; continue with resolved path
|
||||
}
|
||||
const startKey = dir; // preserve starting point for caching
|
||||
if (_cache.has(startKey)) return _cache.get(startKey);
|
||||
const fsRoot = path.parse(dir).root;
|
||||
|
||||
// Helper to safely check for existence
|
||||
const exists = (p) => fs.pathExists(p);
|
||||
|
||||
// Build checks: an array of { makePath: (dir) => string, weight }
|
||||
const checks = [];
|
||||
|
||||
const add = (rel, weight) => {
|
||||
const makePath = (d) => (Array.isArray(rel) ? path.join(d, ...rel) : path.join(d, rel));
|
||||
checks.push({ makePath, weight });
|
||||
};
|
||||
|
||||
// Highest priority: explicit sentinel markers
|
||||
add('.project-root', 110);
|
||||
add('.workspace-root', 110);
|
||||
add('.repo-root', 110);
|
||||
|
||||
// Highest priority: VCS roots
|
||||
add('.git', 100);
|
||||
add('.hg', 95);
|
||||
add('.svn', 95);
|
||||
|
||||
// Monorepo/workspace indicators
|
||||
add('pnpm-workspace.yaml', 90);
|
||||
add('lerna.json', 90);
|
||||
add('turbo.json', 90);
|
||||
add('nx.json', 90);
|
||||
add('rush.json', 90);
|
||||
add('go.work', 90);
|
||||
add('WORKSPACE', 90);
|
||||
add('WORKSPACE.bazel', 90);
|
||||
add('MODULE.bazel', 90);
|
||||
add('pants.toml', 90);
|
||||
|
||||
// Lockfiles and package-manager/top-level locks
|
||||
add('yarn.lock', 85);
|
||||
add('pnpm-lock.yaml', 85);
|
||||
add('package-lock.json', 85);
|
||||
add('bun.lockb', 85);
|
||||
add('Cargo.lock', 85);
|
||||
add('composer.lock', 85);
|
||||
add('poetry.lock', 85);
|
||||
add('Pipfile.lock', 85);
|
||||
add('Gemfile.lock', 85);
|
||||
|
||||
// Build-system root indicators
|
||||
add('settings.gradle', 80);
|
||||
add('settings.gradle.kts', 80);
|
||||
add('gradlew', 80);
|
||||
add('pom.xml', 80);
|
||||
add('build.sbt', 80);
|
||||
add(['project', 'build.properties'], 80);
|
||||
|
||||
// Language/project config markers
|
||||
add('deno.json', 75);
|
||||
add('deno.jsonc', 75);
|
||||
add('pyproject.toml', 75);
|
||||
add('Pipfile', 75);
|
||||
add('requirements.txt', 75);
|
||||
add('go.mod', 75);
|
||||
add('Cargo.toml', 75);
|
||||
add('composer.json', 75);
|
||||
add('mix.exs', 75);
|
||||
add('Gemfile', 75);
|
||||
add('CMakeLists.txt', 75);
|
||||
add('stack.yaml', 75);
|
||||
add('cabal.project', 75);
|
||||
add('rebar.config', 75);
|
||||
add('pubspec.yaml', 75);
|
||||
add('flake.nix', 75);
|
||||
add('shell.nix', 75);
|
||||
add('default.nix', 75);
|
||||
add('.tool-versions', 75);
|
||||
add('package.json', 74); // generic Node project (lower than lockfiles/workspaces)
|
||||
|
||||
// Changesets
|
||||
add(['.changeset', 'config.json'], 70);
|
||||
add('.changeset', 70);
|
||||
|
||||
// Custom markers via env (comma-separated names)
|
||||
if (process.env.PROJECT_ROOT_MARKERS) {
|
||||
for (const name of process.env.PROJECT_ROOT_MARKERS.split(',')
|
||||
.map((s) => s.trim())
|
||||
.filter(Boolean)) {
|
||||
add(name, 72);
|
||||
}
|
||||
}
|
||||
|
||||
/** Check for package.json with "workspaces" */
|
||||
const hasWorkspacePackageJson = async (d) => {
|
||||
const pkgPath = path.join(d, 'package.json');
|
||||
if (!(await exists(pkgPath))) return false;
|
||||
try {
|
||||
const raw = await fs.readFile(pkgPath, 'utf8');
|
||||
const pkg = JSON.parse(raw);
|
||||
return Boolean(pkg && pkg.workspaces);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let best = null; // { dir, weight }
|
||||
|
||||
// Try to detect VCS toplevel once up-front; treat as authoritative slightly above .git marker
|
||||
const vcsTop = await _detectVcsTopLevel(dir);
|
||||
if (vcsTop) {
|
||||
best = { dir: vcsTop, weight: 101 };
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const exists = await Promise.all(markers.map((m) => fs.pathExists(path.join(dir, m))));
|
||||
if (exists.some(Boolean)) {
|
||||
return dir;
|
||||
// Special check: package.json with "workspaces"
|
||||
if ((await hasWorkspacePackageJson(dir)) && (!best || 90 >= best.weight))
|
||||
best = { dir, weight: 90 };
|
||||
|
||||
// Evaluate all other checks in parallel
|
||||
const results = await Promise.all(
|
||||
checks.map(async (c) => ({ c, ok: await exists(c.makePath(dir)) })),
|
||||
);
|
||||
|
||||
for (const { c, ok } of results) {
|
||||
if (!ok) continue;
|
||||
if (!best || c.weight >= best.weight) {
|
||||
best = { dir, weight: c.weight };
|
||||
}
|
||||
}
|
||||
if (dir === root) break;
|
||||
|
||||
if (dir === fsRoot) break;
|
||||
dir = path.dirname(dir);
|
||||
}
|
||||
return null;
|
||||
|
||||
const out = best ? best.dir : null;
|
||||
_cache.set(startKey, out);
|
||||
return out;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
|
||||
395
tools/flattener/stats.helpers.js
Normal file
395
tools/flattener/stats.helpers.js
Normal file
@@ -0,0 +1,395 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('node:fs/promises');
|
||||
const path = require('node:path');
|
||||
const zlib = require('node:zlib');
|
||||
const { Buffer } = require('node:buffer');
|
||||
const crypto = require('node:crypto');
|
||||
const cp = require('node:child_process');
|
||||
|
||||
const KB = 1024;
|
||||
const MB = 1024 * KB;
|
||||
|
||||
const formatSize = (bytes) => {
|
||||
if (bytes < 1024) return `${bytes} B`;
|
||||
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
|
||||
if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
|
||||
return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`;
|
||||
};
|
||||
|
||||
const percentile = (sorted, p) => {
|
||||
if (sorted.length === 0) return 0;
|
||||
const idx = Math.min(sorted.length - 1, Math.max(0, Math.ceil((p / 100) * sorted.length) - 1));
|
||||
return sorted[idx];
|
||||
};
|
||||
|
||||
async function processWithLimit(items, fn, concurrency = 64) {
|
||||
for (let i = 0; i < items.length; i += concurrency) {
|
||||
await Promise.all(items.slice(i, i + concurrency).map(fn));
|
||||
}
|
||||
}
|
||||
|
||||
async function enrichAllFiles(textFiles, binaryFiles) {
|
||||
/** @type {Array<{ path: string; absolutePath: string; size: number; lines?: number; isBinary: boolean; ext: string; dir: string; depth: number; hidden: boolean; mtimeMs: number; isSymlink: boolean; }>} */
|
||||
const allFiles = [];
|
||||
|
||||
async function enrich(file, isBinary) {
|
||||
const ext = (path.extname(file.path) || '').toLowerCase();
|
||||
const dir = path.dirname(file.path) || '.';
|
||||
const depth = file.path.split(path.sep).filter(Boolean).length;
|
||||
const hidden = file.path.split(path.sep).some((seg) => seg.startsWith('.'));
|
||||
let mtimeMs = 0;
|
||||
let isSymlink = false;
|
||||
try {
|
||||
const lst = await fs.lstat(file.absolutePath);
|
||||
mtimeMs = lst.mtimeMs;
|
||||
isSymlink = lst.isSymbolicLink();
|
||||
} catch {
|
||||
/* ignore lstat errors during enrichment */
|
||||
}
|
||||
allFiles.push({
|
||||
path: file.path,
|
||||
absolutePath: file.absolutePath,
|
||||
size: file.size || 0,
|
||||
lines: file.lines,
|
||||
isBinary,
|
||||
ext,
|
||||
dir,
|
||||
depth,
|
||||
hidden,
|
||||
mtimeMs,
|
||||
isSymlink,
|
||||
});
|
||||
}
|
||||
|
||||
await processWithLimit(textFiles, (f) => enrich(f, false));
|
||||
await processWithLimit(binaryFiles, (f) => enrich(f, true));
|
||||
return allFiles;
|
||||
}
|
||||
|
||||
function buildHistogram(allFiles) {
|
||||
const buckets = [
|
||||
[1 * KB, '0–1KB'],
|
||||
[10 * KB, '1–10KB'],
|
||||
[100 * KB, '10–100KB'],
|
||||
[1 * MB, '100KB–1MB'],
|
||||
[10 * MB, '1–10MB'],
|
||||
[100 * MB, '10–100MB'],
|
||||
[Infinity, '>=100MB'],
|
||||
];
|
||||
const histogram = buckets.map(([_, label]) => ({ label, count: 0, bytes: 0 }));
|
||||
for (const f of allFiles) {
|
||||
for (const [i, bucket] of buckets.entries()) {
|
||||
if (f.size < bucket[0]) {
|
||||
histogram[i].count++;
|
||||
histogram[i].bytes += f.size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return histogram;
|
||||
}
|
||||
|
||||
function aggregateByExtension(allFiles) {
|
||||
const byExtension = new Map();
|
||||
for (const f of allFiles) {
|
||||
const key = f.ext || '<none>';
|
||||
const v = byExtension.get(key) || { ext: key, count: 0, bytes: 0 };
|
||||
v.count++;
|
||||
v.bytes += f.size;
|
||||
byExtension.set(key, v);
|
||||
}
|
||||
return [...byExtension.values()].sort((a, b) => b.bytes - a.bytes);
|
||||
}
|
||||
|
||||
function aggregateByDirectory(allFiles) {
|
||||
const byDirectory = new Map();
|
||||
function addDirBytes(dir, bytes) {
|
||||
const v = byDirectory.get(dir) || { dir, count: 0, bytes: 0 };
|
||||
v.count++;
|
||||
v.bytes += bytes;
|
||||
byDirectory.set(dir, v);
|
||||
}
|
||||
for (const f of allFiles) {
|
||||
const parts = f.dir === '.' ? [] : f.dir.split(path.sep);
|
||||
let acc = '';
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
acc = i === 0 ? parts[0] : acc + path.sep + parts[i];
|
||||
addDirBytes(acc, f.size);
|
||||
}
|
||||
if (parts.length === 0) addDirBytes('.', f.size);
|
||||
}
|
||||
return [...byDirectory.values()].sort((a, b) => b.bytes - a.bytes);
|
||||
}
|
||||
|
||||
function computeDepthAndLongest(allFiles) {
|
||||
const depthDistribution = new Map();
|
||||
for (const f of allFiles) {
|
||||
depthDistribution.set(f.depth, (depthDistribution.get(f.depth) || 0) + 1);
|
||||
}
|
||||
const longestPaths = [...allFiles]
|
||||
.sort((a, b) => b.path.length - a.path.length)
|
||||
.slice(0, 25)
|
||||
.map((f) => ({ path: f.path, length: f.path.length, size: f.size }));
|
||||
const depthDist = [...depthDistribution.entries()]
|
||||
.sort((a, b) => a[0] - b[0])
|
||||
.map(([depth, count]) => ({ depth, count }));
|
||||
return { depthDist, longestPaths };
|
||||
}
|
||||
|
||||
function computeTemporal(allFiles, nowMs) {
|
||||
let oldest = null,
|
||||
newest = null;
|
||||
const ageBuckets = [
|
||||
{ label: '> 1 year', minDays: 365, maxDays: Infinity, count: 0, bytes: 0 },
|
||||
{ label: '6–12 months', minDays: 180, maxDays: 365, count: 0, bytes: 0 },
|
||||
{ label: '1–6 months', minDays: 30, maxDays: 180, count: 0, bytes: 0 },
|
||||
{ label: '7–30 days', minDays: 7, maxDays: 30, count: 0, bytes: 0 },
|
||||
{ label: '1–7 days', minDays: 1, maxDays: 7, count: 0, bytes: 0 },
|
||||
{ label: '< 1 day', minDays: 0, maxDays: 1, count: 0, bytes: 0 },
|
||||
];
|
||||
for (const f of allFiles) {
|
||||
const ageDays = Math.max(0, (nowMs - (f.mtimeMs || nowMs)) / (24 * 60 * 60 * 1000));
|
||||
for (const b of ageBuckets) {
|
||||
if (ageDays >= b.minDays && ageDays < b.maxDays) {
|
||||
b.count++;
|
||||
b.bytes += f.size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!oldest || f.mtimeMs < oldest.mtimeMs) oldest = f;
|
||||
if (!newest || f.mtimeMs > newest.mtimeMs) newest = f;
|
||||
}
|
||||
return {
|
||||
oldest: oldest
|
||||
? { path: oldest.path, mtime: oldest.mtimeMs ? new Date(oldest.mtimeMs).toISOString() : null }
|
||||
: null,
|
||||
newest: newest
|
||||
? { path: newest.path, mtime: newest.mtimeMs ? new Date(newest.mtimeMs).toISOString() : null }
|
||||
: null,
|
||||
ageBuckets,
|
||||
};
|
||||
}
|
||||
|
||||
function computeQuality(allFiles, textFiles) {
|
||||
const zeroByteFiles = allFiles.filter((f) => f.size === 0).length;
|
||||
const emptyTextFiles = textFiles.filter(
|
||||
(f) => (f.size || 0) === 0 || (f.lines || 0) === 0,
|
||||
).length;
|
||||
const hiddenFiles = allFiles.filter((f) => f.hidden).length;
|
||||
const symlinks = allFiles.filter((f) => f.isSymlink).length;
|
||||
const largeThreshold = 50 * MB;
|
||||
const suspiciousThreshold = 100 * MB;
|
||||
const largeFilesCount = allFiles.filter((f) => f.size >= largeThreshold).length;
|
||||
const suspiciousLargeFilesCount = allFiles.filter((f) => f.size >= suspiciousThreshold).length;
|
||||
return {
|
||||
zeroByteFiles,
|
||||
emptyTextFiles,
|
||||
hiddenFiles,
|
||||
symlinks,
|
||||
largeFilesCount,
|
||||
suspiciousLargeFilesCount,
|
||||
largeThreshold,
|
||||
};
|
||||
}
|
||||
|
||||
function computeDuplicates(allFiles, textFiles) {
|
||||
const duplicatesBySize = new Map();
|
||||
for (const f of allFiles) {
|
||||
const key = String(f.size);
|
||||
const arr = duplicatesBySize.get(key) || [];
|
||||
arr.push(f);
|
||||
duplicatesBySize.set(key, arr);
|
||||
}
|
||||
const duplicateCandidates = [];
|
||||
for (const [sizeKey, arr] of duplicatesBySize.entries()) {
|
||||
if (arr.length < 2) continue;
|
||||
const textGroup = arr.filter((f) => !f.isBinary);
|
||||
const otherGroup = arr.filter((f) => f.isBinary);
|
||||
const contentHashGroups = new Map();
|
||||
for (const tf of textGroup) {
|
||||
try {
|
||||
const src = textFiles.find((x) => x.absolutePath === tf.absolutePath);
|
||||
const content = src ? src.content : '';
|
||||
const h = crypto.createHash('sha1').update(content).digest('hex');
|
||||
const g = contentHashGroups.get(h) || [];
|
||||
g.push(tf);
|
||||
contentHashGroups.set(h, g);
|
||||
} catch {
|
||||
/* ignore hashing errors for duplicate detection */
|
||||
}
|
||||
}
|
||||
for (const [_h, g] of contentHashGroups.entries()) {
|
||||
if (g.length > 1)
|
||||
duplicateCandidates.push({
|
||||
reason: 'same-size+text-hash',
|
||||
size: Number(sizeKey),
|
||||
count: g.length,
|
||||
files: g.map((f) => f.path),
|
||||
});
|
||||
}
|
||||
if (otherGroup.length > 1) {
|
||||
duplicateCandidates.push({
|
||||
reason: 'same-size',
|
||||
size: Number(sizeKey),
|
||||
count: otherGroup.length,
|
||||
files: otherGroup.map((f) => f.path),
|
||||
});
|
||||
}
|
||||
}
|
||||
return duplicateCandidates;
|
||||
}
|
||||
|
||||
function estimateCompressibility(textFiles) {
|
||||
let compSampleBytes = 0;
|
||||
let compCompressedBytes = 0;
|
||||
for (const tf of textFiles) {
|
||||
try {
|
||||
const sampleLen = Math.min(256 * 1024, tf.size || 0);
|
||||
if (sampleLen <= 0) continue;
|
||||
const sample = tf.content.slice(0, sampleLen);
|
||||
const gz = zlib.gzipSync(Buffer.from(sample, 'utf8'));
|
||||
compSampleBytes += sampleLen;
|
||||
compCompressedBytes += gz.length;
|
||||
} catch {
|
||||
/* ignore compression errors during sampling */
|
||||
}
|
||||
}
|
||||
return compSampleBytes > 0 ? compCompressedBytes / compSampleBytes : null;
|
||||
}
|
||||
|
||||
function computeGitInfo(allFiles, rootDir, largeThreshold) {
|
||||
const info = {
|
||||
isRepo: false,
|
||||
trackedCount: 0,
|
||||
trackedBytes: 0,
|
||||
untrackedCount: 0,
|
||||
untrackedBytes: 0,
|
||||
lfsCandidates: [],
|
||||
};
|
||||
try {
|
||||
if (!rootDir) return info;
|
||||
const top = cp
|
||||
.execFileSync('git', ['rev-parse', '--show-toplevel'], {
|
||||
cwd: rootDir,
|
||||
stdio: ['ignore', 'pipe', 'ignore'],
|
||||
})
|
||||
.toString()
|
||||
.trim();
|
||||
if (!top) return info;
|
||||
info.isRepo = true;
|
||||
const out = cp.execFileSync('git', ['ls-files', '-z'], {
|
||||
cwd: rootDir,
|
||||
stdio: ['ignore', 'pipe', 'ignore'],
|
||||
});
|
||||
const tracked = new Set(out.toString().split('\0').filter(Boolean));
|
||||
let trackedBytes = 0,
|
||||
trackedCount = 0,
|
||||
untrackedBytes = 0,
|
||||
untrackedCount = 0;
|
||||
const lfsCandidates = [];
|
||||
for (const f of allFiles) {
|
||||
const isTracked = tracked.has(f.path);
|
||||
if (isTracked) {
|
||||
trackedCount++;
|
||||
trackedBytes += f.size;
|
||||
if (f.size >= largeThreshold) lfsCandidates.push({ path: f.path, size: f.size });
|
||||
} else {
|
||||
untrackedCount++;
|
||||
untrackedBytes += f.size;
|
||||
}
|
||||
}
|
||||
info.trackedCount = trackedCount;
|
||||
info.trackedBytes = trackedBytes;
|
||||
info.untrackedCount = untrackedCount;
|
||||
info.untrackedBytes = untrackedBytes;
|
||||
info.lfsCandidates = lfsCandidates.sort((a, b) => b.size - a.size).slice(0, 50);
|
||||
} catch {
|
||||
/* git not available or not a repo, ignore */
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
function computeLargestFiles(allFiles, totalBytes) {
|
||||
const toPct = (num, den) => (den === 0 ? 0 : (num / den) * 100);
|
||||
return [...allFiles]
|
||||
.sort((a, b) => b.size - a.size)
|
||||
.slice(0, 50)
|
||||
.map((f) => ({
|
||||
path: f.path,
|
||||
size: f.size,
|
||||
sizeFormatted: formatSize(f.size),
|
||||
percentOfTotal: toPct(f.size, totalBytes),
|
||||
ext: f.ext || '',
|
||||
isBinary: f.isBinary,
|
||||
mtime: f.mtimeMs ? new Date(f.mtimeMs).toISOString() : null,
|
||||
}));
|
||||
}
|
||||
|
||||
function mdTable(rows, headers) {
|
||||
const header = `| ${headers.join(' | ')} |`;
|
||||
const sep = `| ${headers.map(() => '---').join(' | ')} |`;
|
||||
const body = rows.map((r) => `| ${r.join(' | ')} |`).join('\n');
|
||||
return `${header}\n${sep}\n${body}`;
|
||||
}
|
||||
|
||||
function buildMarkdownReport(largestFiles, byExtensionArr, byDirectoryArr, totalBytes) {
|
||||
const toPct = (num, den) => (den === 0 ? 0 : (num / den) * 100);
|
||||
const md = [];
|
||||
md.push(
|
||||
'\n### Top Largest Files (Top 50)\n',
|
||||
mdTable(
|
||||
largestFiles.map((f) => [
|
||||
f.path,
|
||||
f.sizeFormatted,
|
||||
`${f.percentOfTotal.toFixed(2)}%`,
|
||||
f.ext || '',
|
||||
f.isBinary ? 'binary' : 'text',
|
||||
]),
|
||||
['Path', 'Size', '% of total', 'Ext', 'Type'],
|
||||
),
|
||||
'\n\n### Top Extensions by Bytes (Top 20)\n',
|
||||
);
|
||||
const topExtRows = byExtensionArr
|
||||
.slice(0, 20)
|
||||
.map((e) => [
|
||||
e.ext,
|
||||
String(e.count),
|
||||
formatSize(e.bytes),
|
||||
`${toPct(e.bytes, totalBytes).toFixed(2)}%`,
|
||||
]);
|
||||
md.push(
|
||||
mdTable(topExtRows, ['Ext', 'Count', 'Bytes', '% of total']),
|
||||
'\n\n### Top Directories by Bytes (Top 20)\n',
|
||||
);
|
||||
const topDirRows = byDirectoryArr
|
||||
.slice(0, 20)
|
||||
.map((d) => [
|
||||
d.dir,
|
||||
String(d.count),
|
||||
formatSize(d.bytes),
|
||||
`${toPct(d.bytes, totalBytes).toFixed(2)}%`,
|
||||
]);
|
||||
md.push(mdTable(topDirRows, ['Directory', 'Files', 'Bytes', '% of total']));
|
||||
return md.join('\n');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
KB,
|
||||
MB,
|
||||
formatSize,
|
||||
percentile,
|
||||
processWithLimit,
|
||||
enrichAllFiles,
|
||||
buildHistogram,
|
||||
aggregateByExtension,
|
||||
aggregateByDirectory,
|
||||
computeDepthAndLongest,
|
||||
computeTemporal,
|
||||
computeQuality,
|
||||
computeDuplicates,
|
||||
estimateCompressibility,
|
||||
computeGitInfo,
|
||||
computeLargestFiles,
|
||||
buildMarkdownReport,
|
||||
};
|
||||
@@ -1,29 +1,79 @@
|
||||
function calculateStatistics(aggregatedContent, xmlFileSize) {
|
||||
const H = require('./stats.helpers.js');
|
||||
|
||||
async function calculateStatistics(aggregatedContent, xmlFileSize, rootDir) {
|
||||
const { textFiles, binaryFiles, errors } = aggregatedContent;
|
||||
|
||||
const totalTextSize = textFiles.reduce((sum, file) => sum + file.size, 0);
|
||||
const totalBinarySize = binaryFiles.reduce((sum, file) => sum + file.size, 0);
|
||||
const totalSize = totalTextSize + totalBinarySize;
|
||||
|
||||
const totalLines = textFiles.reduce((sum, file) => sum + file.lines, 0);
|
||||
|
||||
const totalLines = textFiles.reduce((sum, f) => sum + (f.lines || 0), 0);
|
||||
const estimatedTokens = Math.ceil(xmlFileSize / 4);
|
||||
|
||||
const formatSize = (bytes) => {
|
||||
if (bytes < 1024) return `${bytes} B`;
|
||||
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
|
||||
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
|
||||
};
|
||||
// Build enriched file list
|
||||
const allFiles = await H.enrichAllFiles(textFiles, binaryFiles);
|
||||
const totalBytes = allFiles.reduce((s, f) => s + f.size, 0);
|
||||
const sizes = allFiles.map((f) => f.size).sort((a, b) => a - b);
|
||||
const avgSize = sizes.length > 0 ? totalBytes / sizes.length : 0;
|
||||
const medianSize = sizes.length > 0 ? H.percentile(sizes, 50) : 0;
|
||||
const p90 = H.percentile(sizes, 90);
|
||||
const p95 = H.percentile(sizes, 95);
|
||||
const p99 = H.percentile(sizes, 99);
|
||||
|
||||
const histogram = H.buildHistogram(allFiles);
|
||||
const byExtensionArr = H.aggregateByExtension(allFiles);
|
||||
const byDirectoryArr = H.aggregateByDirectory(allFiles);
|
||||
const { depthDist, longestPaths } = H.computeDepthAndLongest(allFiles);
|
||||
const temporal = H.computeTemporal(allFiles, Date.now());
|
||||
const quality = H.computeQuality(allFiles, textFiles);
|
||||
const duplicateCandidates = H.computeDuplicates(allFiles, textFiles);
|
||||
const compressibilityRatio = H.estimateCompressibility(textFiles);
|
||||
const git = H.computeGitInfo(allFiles, rootDir, quality.largeThreshold);
|
||||
const largestFiles = H.computeLargestFiles(allFiles, totalBytes);
|
||||
const markdownReport = H.buildMarkdownReport(
|
||||
largestFiles,
|
||||
byExtensionArr,
|
||||
byDirectoryArr,
|
||||
totalBytes,
|
||||
);
|
||||
|
||||
return {
|
||||
// Back-compat summary
|
||||
totalFiles: textFiles.length + binaryFiles.length,
|
||||
textFiles: textFiles.length,
|
||||
binaryFiles: binaryFiles.length,
|
||||
errorFiles: errors.length,
|
||||
totalSize: formatSize(totalSize),
|
||||
xmlSize: formatSize(xmlFileSize),
|
||||
totalSize: H.formatSize(totalBytes),
|
||||
totalBytes,
|
||||
xmlSize: H.formatSize(xmlFileSize),
|
||||
totalLines,
|
||||
estimatedTokens: estimatedTokens.toLocaleString(),
|
||||
|
||||
// Distributions and percentiles
|
||||
avgFileSize: avgSize,
|
||||
medianFileSize: medianSize,
|
||||
p90,
|
||||
p95,
|
||||
p99,
|
||||
histogram,
|
||||
|
||||
// Extensions and directories
|
||||
byExtension: byExtensionArr,
|
||||
byDirectory: byDirectoryArr,
|
||||
depthDistribution: depthDist,
|
||||
longestPaths,
|
||||
|
||||
// Temporal
|
||||
temporal,
|
||||
|
||||
// Quality signals
|
||||
quality,
|
||||
|
||||
// Duplicates and compressibility
|
||||
duplicateCandidates,
|
||||
compressibilityRatio,
|
||||
|
||||
// Git-aware
|
||||
git,
|
||||
|
||||
largestFiles,
|
||||
markdownReport,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
413
tools/flattener/test-matrix.js
Normal file
413
tools/flattener/test-matrix.js
Normal file
@@ -0,0 +1,413 @@
|
||||
/* deno-lint-ignore-file */
|
||||
/*
|
||||
Automatic test matrix for project root detection.
|
||||
Creates temporary fixtures for various ecosystems and validates findProjectRoot().
|
||||
No external options or flags required. Safe to run multiple times.
|
||||
*/
|
||||
|
||||
const os = require('node:os');
|
||||
const path = require('node:path');
|
||||
const fs = require('fs-extra');
|
||||
const { promisify } = require('node:util');
|
||||
const { execFile } = require('node:child_process');
|
||||
const process = require('node:process');
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
const { findProjectRoot } = require('./projectRoot.js');
|
||||
|
||||
async function cmdAvailable(cmd) {
|
||||
try {
|
||||
await execFileAsync(cmd, ['--version'], { timeout: 500, windowsHide: true });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
async function testSvnMarker() {
|
||||
const root = await mkTmpDir('svn');
|
||||
const nested = path.join(root, 'proj', 'code');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.ensureDir(path.join(root, '.svn'));
|
||||
const found = await findProjectRoot(nested);
|
||||
assertEqual(found, root, '.svn marker should be detected');
|
||||
return { name: 'svn-marker', ok: true };
|
||||
}
|
||||
|
||||
async function testSymlinkStart() {
|
||||
const root = await mkTmpDir('symlink-start');
|
||||
const nested = path.join(root, 'a', 'b');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, '.project-root'), '\n');
|
||||
const tmp = await mkTmpDir('symlink-tmp');
|
||||
const link = path.join(tmp, 'link-to-b');
|
||||
try {
|
||||
await fs.symlink(nested, link);
|
||||
} catch {
|
||||
// symlink may not be permitted on some systems; skip
|
||||
return { name: 'symlink-start', ok: true, skipped: true };
|
||||
}
|
||||
const found = await findProjectRoot(link);
|
||||
assertEqual(found, root, 'should resolve symlinked start to real root');
|
||||
return { name: 'symlink-start', ok: true };
|
||||
}
|
||||
|
||||
async function testSubmoduleLikeInnerGitFile() {
|
||||
const root = await mkTmpDir('submodule-like');
|
||||
const mid = path.join(root, 'mid');
|
||||
const leaf = path.join(mid, 'leaf');
|
||||
await fs.ensureDir(leaf);
|
||||
// outer repo
|
||||
await fs.ensureDir(path.join(root, '.git'));
|
||||
// inner submodule-like .git file
|
||||
await fs.writeFile(path.join(mid, '.git'), 'gitdir: ../.git/modules/mid\n');
|
||||
const found = await findProjectRoot(leaf);
|
||||
assertEqual(found, root, 'outermost .git should win on tie weight');
|
||||
return { name: 'submodule-like-gitfile', ok: true };
|
||||
}
|
||||
}
|
||||
|
||||
async function mkTmpDir(name) {
|
||||
const base = await fs.realpath(os.tmpdir());
|
||||
const dir = await fs.mkdtemp(path.join(base, `flattener-${name}-`));
|
||||
return dir;
|
||||
}
|
||||
|
||||
function assertEqual(actual, expected, msg) {
|
||||
if (actual !== expected) {
|
||||
throw new Error(`${msg}: expected="${expected}" actual="${actual}"`);
|
||||
}
|
||||
}
|
||||
|
||||
async function testSentinel() {
|
||||
const root = await mkTmpDir('sentinel');
|
||||
const nested = path.join(root, 'a', 'b', 'c');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, '.project-root'), '\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'sentinel .project-root should win');
|
||||
return { name: 'sentinel', ok: true };
|
||||
}
|
||||
|
||||
async function testOtherSentinels() {
|
||||
const root = await mkTmpDir('other-sentinels');
|
||||
const nested = path.join(root, 'x', 'y');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, '.workspace-root'), '\n');
|
||||
const found1 = await findProjectRoot(nested);
|
||||
assertEqual(found1, root, 'sentinel .workspace-root should win');
|
||||
|
||||
await fs.remove(path.join(root, '.workspace-root'));
|
||||
await fs.writeFile(path.join(root, '.repo-root'), '\n');
|
||||
const found2 = await findProjectRoot(nested);
|
||||
assertEqual(found2, root, 'sentinel .repo-root should win');
|
||||
return { name: 'other-sentinels', ok: true };
|
||||
}
|
||||
|
||||
async function testGitCliAndMarker() {
|
||||
const hasGit = await cmdAvailable('git');
|
||||
if (!hasGit) return { name: 'git-cli', ok: true, skipped: true };
|
||||
|
||||
const root = await mkTmpDir('git');
|
||||
const nested = path.join(root, 'pkg', 'src');
|
||||
await fs.ensureDir(nested);
|
||||
await execFileAsync('git', ['init'], { cwd: root, timeout: 2000 });
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'git toplevel should be detected');
|
||||
return { name: 'git-cli', ok: true };
|
||||
}
|
||||
|
||||
async function testHgMarkerOrCli() {
|
||||
// Prefer simple marker test to avoid requiring Mercurial install
|
||||
const root = await mkTmpDir('hg');
|
||||
const nested = path.join(root, 'lib');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.ensureDir(path.join(root, '.hg'));
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, '.hg marker should be detected');
|
||||
return { name: 'hg-marker', ok: true };
|
||||
}
|
||||
|
||||
async function testWorkspacePnpm() {
|
||||
const root = await mkTmpDir('pnpm-workspace');
|
||||
const pkgA = path.join(root, 'packages', 'a');
|
||||
await fs.ensureDir(pkgA);
|
||||
await fs.writeFile(path.join(root, 'pnpm-workspace.yaml'), 'packages:\n - packages/*\n');
|
||||
const found = await findProjectRoot(pkgA);
|
||||
await assertEqual(found, root, 'pnpm-workspace.yaml should be detected');
|
||||
return { name: 'pnpm-workspace', ok: true };
|
||||
}
|
||||
|
||||
async function testPackageJsonWorkspaces() {
|
||||
const root = await mkTmpDir('package-workspaces');
|
||||
const pkgA = path.join(root, 'packages', 'a');
|
||||
await fs.ensureDir(pkgA);
|
||||
await fs.writeJson(
|
||||
path.join(root, 'package.json'),
|
||||
{ private: true, workspaces: ['packages/*'] },
|
||||
{ spaces: 2 },
|
||||
);
|
||||
const found = await findProjectRoot(pkgA);
|
||||
await assertEqual(found, root, 'package.json workspaces should be detected');
|
||||
return { name: 'package.json-workspaces', ok: true };
|
||||
}
|
||||
|
||||
async function testLockfiles() {
|
||||
const root = await mkTmpDir('lockfiles');
|
||||
const nested = path.join(root, 'src');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'yarn.lock'), '\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'yarn.lock should be detected');
|
||||
return { name: 'lockfiles', ok: true };
|
||||
}
|
||||
|
||||
async function testLanguageConfigs() {
|
||||
const root = await mkTmpDir('lang-configs');
|
||||
const nested = path.join(root, 'x', 'y');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'pyproject.toml'), "[tool.poetry]\nname='tmp'\n");
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'pyproject.toml should be detected');
|
||||
return { name: 'language-configs', ok: true };
|
||||
}
|
||||
|
||||
async function testPreferOuterOnTie() {
|
||||
const root = await mkTmpDir('tie');
|
||||
const mid = path.join(root, 'mid');
|
||||
const leaf = path.join(mid, 'leaf');
|
||||
await fs.ensureDir(leaf);
|
||||
// same weight marker at two levels
|
||||
await fs.writeFile(path.join(root, 'requirements.txt'), '\n');
|
||||
await fs.writeFile(path.join(mid, 'requirements.txt'), '\n');
|
||||
const found = await findProjectRoot(leaf);
|
||||
await assertEqual(found, root, 'outermost directory should win on equal weight');
|
||||
return { name: 'prefer-outermost-tie', ok: true };
|
||||
}
|
||||
|
||||
// Additional coverage: Bazel, Nx/Turbo/Rush, Go workspaces, Deno, Java/Scala, PHP, Rust, Nix, Changesets, env markers,
|
||||
// and priority interaction between package.json and lockfiles.
|
||||
|
||||
async function testBazelWorkspace() {
|
||||
const root = await mkTmpDir('bazel');
|
||||
const nested = path.join(root, 'apps', 'svc');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'WORKSPACE'), 'workspace(name="tmp")\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'Bazel WORKSPACE should be detected');
|
||||
return { name: 'bazel-workspace', ok: true };
|
||||
}
|
||||
|
||||
async function testNx() {
|
||||
const root = await mkTmpDir('nx');
|
||||
const nested = path.join(root, 'apps', 'web');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeJson(path.join(root, 'nx.json'), { npmScope: 'tmp' }, { spaces: 2 });
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'nx.json should be detected');
|
||||
return { name: 'nx', ok: true };
|
||||
}
|
||||
|
||||
async function testTurbo() {
|
||||
const root = await mkTmpDir('turbo');
|
||||
const nested = path.join(root, 'packages', 'x');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeJson(path.join(root, 'turbo.json'), { pipeline: {} }, { spaces: 2 });
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'turbo.json should be detected');
|
||||
return { name: 'turbo', ok: true };
|
||||
}
|
||||
|
||||
async function testRush() {
|
||||
const root = await mkTmpDir('rush');
|
||||
const nested = path.join(root, 'apps', 'a');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeJson(path.join(root, 'rush.json'), { projectFolderMinDepth: 1 }, { spaces: 2 });
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'rush.json should be detected');
|
||||
return { name: 'rush', ok: true };
|
||||
}
|
||||
|
||||
async function testGoWorkAndMod() {
|
||||
const root = await mkTmpDir('gowork');
|
||||
const mod = path.join(root, 'modA');
|
||||
const nested = path.join(mod, 'pkg');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'go.work'), 'go 1.22\nuse ./modA\n');
|
||||
await fs.writeFile(path.join(mod, 'go.mod'), 'module example.com/a\ngo 1.22\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'go.work should define the workspace root');
|
||||
return { name: 'go-work', ok: true };
|
||||
}
|
||||
|
||||
async function testDenoJson() {
|
||||
const root = await mkTmpDir('deno');
|
||||
const nested = path.join(root, 'src');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeJson(path.join(root, 'deno.json'), { tasks: {} }, { spaces: 2 });
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'deno.json should be detected');
|
||||
return { name: 'deno-json', ok: true };
|
||||
}
|
||||
|
||||
async function testGradleSettings() {
|
||||
const root = await mkTmpDir('gradle');
|
||||
const nested = path.join(root, 'app');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'settings.gradle'), "rootProject.name='tmp'\n");
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'settings.gradle should be detected');
|
||||
return { name: 'gradle-settings', ok: true };
|
||||
}
|
||||
|
||||
async function testMavenPom() {
|
||||
const root = await mkTmpDir('maven');
|
||||
const nested = path.join(root, 'module');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'pom.xml'), '<project></project>\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'pom.xml should be detected');
|
||||
return { name: 'maven-pom', ok: true };
|
||||
}
|
||||
|
||||
async function testSbtBuild() {
|
||||
const root = await mkTmpDir('sbt');
|
||||
const nested = path.join(root, 'sub');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'build.sbt'), 'name := "tmp"\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'build.sbt should be detected');
|
||||
return { name: 'sbt-build', ok: true };
|
||||
}
|
||||
|
||||
async function testComposer() {
|
||||
const root = await mkTmpDir('composer');
|
||||
const nested = path.join(root, 'src');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeJson(path.join(root, 'composer.json'), { name: 'tmp/pkg' }, { spaces: 2 });
|
||||
await fs.writeFile(path.join(root, 'composer.lock'), '{}\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'composer.{json,lock} should be detected');
|
||||
return { name: 'composer', ok: true };
|
||||
}
|
||||
|
||||
async function testCargo() {
|
||||
const root = await mkTmpDir('cargo');
|
||||
const nested = path.join(root, 'src');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'Cargo.toml'), "[package]\nname='tmp'\nversion='0.0.0'\n");
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'Cargo.toml should be detected');
|
||||
return { name: 'cargo', ok: true };
|
||||
}
|
||||
|
||||
async function testNixFlake() {
|
||||
const root = await mkTmpDir('nix');
|
||||
const nested = path.join(root, 'work');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'flake.nix'), '{ }\n');
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'flake.nix should be detected');
|
||||
return { name: 'nix-flake', ok: true };
|
||||
}
|
||||
|
||||
async function testChangesetConfig() {
|
||||
const root = await mkTmpDir('changeset');
|
||||
const nested = path.join(root, 'pkg');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.ensureDir(path.join(root, '.changeset'));
|
||||
await fs.writeJson(
|
||||
path.join(root, '.changeset', 'config.json'),
|
||||
{ $schema: 'https://unpkg.com/@changesets/config@2.3.1/schema.json' },
|
||||
{ spaces: 2 },
|
||||
);
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, '.changeset/config.json should be detected');
|
||||
return { name: 'changesets', ok: true };
|
||||
}
|
||||
|
||||
async function testEnvCustomMarker() {
|
||||
const root = await mkTmpDir('env-marker');
|
||||
const nested = path.join(root, 'dir');
|
||||
await fs.ensureDir(nested);
|
||||
await fs.writeFile(path.join(root, 'MY_ROOT'), '\n');
|
||||
const prev = process.env.PROJECT_ROOT_MARKERS;
|
||||
process.env.PROJECT_ROOT_MARKERS = 'MY_ROOT';
|
||||
try {
|
||||
const found = await findProjectRoot(nested);
|
||||
await assertEqual(found, root, 'custom env marker should be honored');
|
||||
} finally {
|
||||
if (prev === undefined) delete process.env.PROJECT_ROOT_MARKERS;
|
||||
else process.env.PROJECT_ROOT_MARKERS = prev;
|
||||
}
|
||||
return { name: 'env-custom-marker', ok: true };
|
||||
}
|
||||
|
||||
async function testPackageLowPriorityVsLock() {
|
||||
const root = await mkTmpDir('pkg-vs-lock');
|
||||
const nested = path.join(root, 'nested');
|
||||
await fs.ensureDir(path.join(nested, 'deep'));
|
||||
await fs.writeJson(path.join(nested, 'package.json'), { name: 'nested' }, { spaces: 2 });
|
||||
await fs.writeFile(path.join(root, 'yarn.lock'), '\n');
|
||||
const found = await findProjectRoot(path.join(nested, 'deep'));
|
||||
await assertEqual(found, root, 'lockfile at root should outrank nested package.json');
|
||||
return { name: 'package-vs-lock-priority', ok: true };
|
||||
}
|
||||
|
||||
async function run() {
|
||||
const tests = [
|
||||
testSentinel,
|
||||
testOtherSentinels,
|
||||
testGitCliAndMarker,
|
||||
testHgMarkerOrCli,
|
||||
testWorkspacePnpm,
|
||||
testPackageJsonWorkspaces,
|
||||
testLockfiles,
|
||||
testLanguageConfigs,
|
||||
testPreferOuterOnTie,
|
||||
testBazelWorkspace,
|
||||
testNx,
|
||||
testTurbo,
|
||||
testRush,
|
||||
testGoWorkAndMod,
|
||||
testDenoJson,
|
||||
testGradleSettings,
|
||||
testMavenPom,
|
||||
testSbtBuild,
|
||||
testComposer,
|
||||
testCargo,
|
||||
testNixFlake,
|
||||
testChangesetConfig,
|
||||
testEnvCustomMarker,
|
||||
testPackageLowPriorityVsLock,
|
||||
testSvnMarker,
|
||||
testSymlinkStart,
|
||||
testSubmoduleLikeInnerGitFile,
|
||||
];
|
||||
|
||||
const results = [];
|
||||
for (const t of tests) {
|
||||
try {
|
||||
const r = await t();
|
||||
results.push({ ...r, ok: true });
|
||||
console.log(`✔ ${r.name}${r.skipped ? ' (skipped)' : ''}`);
|
||||
} catch (error) {
|
||||
console.error(`✖ ${t.name}:`, error && error.message ? error.message : error);
|
||||
results.push({ name: t.name, ok: false, error: String(error) });
|
||||
}
|
||||
}
|
||||
|
||||
const failed = results.filter((r) => !r.ok);
|
||||
console.log('\nSummary:');
|
||||
for (const r of results) {
|
||||
console.log(`- ${r.name}: ${r.ok ? 'ok' : 'FAIL'}${r.skipped ? ' (skipped)' : ''}`);
|
||||
}
|
||||
|
||||
if (failed.length > 0) {
|
||||
process.exitCode = 1;
|
||||
}
|
||||
}
|
||||
|
||||
run().catch((error) => {
|
||||
console.error('Fatal error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -403,6 +403,7 @@ async function promptInstallation() {
|
||||
{ name: 'Cline', value: 'cline' },
|
||||
{ name: 'Gemini CLI', value: 'gemini' },
|
||||
{ name: 'Qwen Code', value: 'qwen-code' },
|
||||
{ name: 'Crush', value: 'crush' },
|
||||
{ name: 'Github Copilot', value: 'github-copilot' },
|
||||
],
|
||||
},
|
||||
|
||||
@@ -28,6 +28,16 @@ ide-configurations:
|
||||
# To use BMad agents in Claude Code:
|
||||
# 1. Type /agent-name (e.g., "/dev", "/pm", "/architect")
|
||||
# 2. Claude will switch to that agent's persona
|
||||
crush:
|
||||
name: Crush
|
||||
rule-dir: .crush/commands/BMad/
|
||||
format: multi-file
|
||||
command-suffix: .md
|
||||
instructions: |
|
||||
# To use BMad agents in Crush:
|
||||
# 1. Press CTRL + P and press TAB
|
||||
# 2. Select agent or task
|
||||
# 3. Crush will switch to that agent's persona / task
|
||||
windsurf:
|
||||
name: Windsurf
|
||||
rule-dir: .windsurf/workflows/
|
||||
|
||||
@@ -47,6 +47,9 @@ class IdeSetup extends BaseIdeSetup {
|
||||
case 'claude-code': {
|
||||
return this.setupClaudeCode(installDir, selectedAgent);
|
||||
}
|
||||
case 'crush': {
|
||||
return this.setupCrush(installDir, selectedAgent);
|
||||
}
|
||||
case 'windsurf': {
|
||||
return this.setupWindsurf(installDir, selectedAgent);
|
||||
}
|
||||
@@ -99,6 +102,44 @@ class IdeSetup extends BaseIdeSetup {
|
||||
return true;
|
||||
}
|
||||
|
||||
async setupCrush(installDir, selectedAgent) {
|
||||
// Setup bmad-core commands
|
||||
const coreSlashPrefix = await this.getCoreSlashPrefix(installDir);
|
||||
const coreAgents = selectedAgent ? [selectedAgent] : await this.getCoreAgentIds(installDir);
|
||||
const coreTasks = await this.getCoreTaskIds(installDir);
|
||||
await this.setupCrushForPackage(
|
||||
installDir,
|
||||
'core',
|
||||
coreSlashPrefix,
|
||||
coreAgents,
|
||||
coreTasks,
|
||||
'.bmad-core',
|
||||
);
|
||||
|
||||
// Setup expansion pack commands
|
||||
const expansionPacks = await this.getInstalledExpansionPacks(installDir);
|
||||
for (const packInfo of expansionPacks) {
|
||||
const packSlashPrefix = await this.getExpansionPackSlashPrefix(packInfo.path);
|
||||
const packAgents = await this.getExpansionPackAgents(packInfo.path);
|
||||
const packTasks = await this.getExpansionPackTasks(packInfo.path);
|
||||
|
||||
if (packAgents.length > 0 || packTasks.length > 0) {
|
||||
// Use the actual directory name where the expansion pack is installed
|
||||
const rootPath = path.relative(installDir, packInfo.path);
|
||||
await this.setupCrushForPackage(
|
||||
installDir,
|
||||
packInfo.name,
|
||||
packSlashPrefix,
|
||||
packAgents,
|
||||
packTasks,
|
||||
rootPath,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
async setupClaudeCode(installDir, selectedAgent) {
|
||||
// Setup bmad-core commands
|
||||
const coreSlashPrefix = await this.getCoreSlashPrefix(installDir);
|
||||
@@ -234,6 +275,94 @@ class IdeSetup extends BaseIdeSetup {
|
||||
console.log(chalk.dim(` - Tasks in: ${tasksDir}`));
|
||||
}
|
||||
|
||||
async setupCrushForPackage(installDir, packageName, slashPrefix, agentIds, taskIds, rootPath) {
|
||||
const commandsBaseDir = path.join(installDir, '.crush', 'commands', slashPrefix);
|
||||
const agentsDir = path.join(commandsBaseDir, 'agents');
|
||||
const tasksDir = path.join(commandsBaseDir, 'tasks');
|
||||
|
||||
// Ensure directories exist
|
||||
await fileManager.ensureDirectory(agentsDir);
|
||||
await fileManager.ensureDirectory(tasksDir);
|
||||
|
||||
// Setup agents
|
||||
for (const agentId of agentIds) {
|
||||
// Find the agent file - for expansion packs, prefer the expansion pack version
|
||||
let agentPath;
|
||||
if (packageName === 'core') {
|
||||
// For core, use the normal search
|
||||
agentPath = await this.findAgentPath(agentId, installDir);
|
||||
} else {
|
||||
// For expansion packs, first try to find the agent in the expansion pack directory
|
||||
const expansionPackPath = path.join(installDir, rootPath, 'agents', `${agentId}.md`);
|
||||
if (await fileManager.pathExists(expansionPackPath)) {
|
||||
agentPath = expansionPackPath;
|
||||
} else {
|
||||
// Fall back to core if not found in expansion pack
|
||||
agentPath = await this.findAgentPath(agentId, installDir);
|
||||
}
|
||||
}
|
||||
|
||||
const commandPath = path.join(agentsDir, `${agentId}.md`);
|
||||
|
||||
if (agentPath) {
|
||||
// Create command file with agent content
|
||||
let agentContent = await fileManager.readFile(agentPath);
|
||||
|
||||
// Replace {root} placeholder with the appropriate root path for this context
|
||||
agentContent = agentContent.replaceAll('{root}', rootPath);
|
||||
|
||||
// Add command header
|
||||
let commandContent = `# /${agentId} Command\n\n`;
|
||||
commandContent += `When this command is used, adopt the following agent persona:\n\n`;
|
||||
commandContent += agentContent;
|
||||
|
||||
await fileManager.writeFile(commandPath, commandContent);
|
||||
console.log(chalk.green(`✓ Created agent command: /${agentId}`));
|
||||
}
|
||||
}
|
||||
|
||||
// Setup tasks
|
||||
for (const taskId of taskIds) {
|
||||
// Find the task file - for expansion packs, prefer the expansion pack version
|
||||
let taskPath;
|
||||
if (packageName === 'core') {
|
||||
// For core, use the normal search
|
||||
taskPath = await this.findTaskPath(taskId, installDir);
|
||||
} else {
|
||||
// For expansion packs, first try to find the task in the expansion pack directory
|
||||
const expansionPackPath = path.join(installDir, rootPath, 'tasks', `${taskId}.md`);
|
||||
if (await fileManager.pathExists(expansionPackPath)) {
|
||||
taskPath = expansionPackPath;
|
||||
} else {
|
||||
// Fall back to core if not found in expansion pack
|
||||
taskPath = await this.findTaskPath(taskId, installDir);
|
||||
}
|
||||
}
|
||||
|
||||
const commandPath = path.join(tasksDir, `${taskId}.md`);
|
||||
|
||||
if (taskPath) {
|
||||
// Create command file with task content
|
||||
let taskContent = await fileManager.readFile(taskPath);
|
||||
|
||||
// Replace {root} placeholder with the appropriate root path for this context
|
||||
taskContent = taskContent.replaceAll('{root}', rootPath);
|
||||
|
||||
// Add command header
|
||||
let commandContent = `# /${taskId} Task\n\n`;
|
||||
commandContent += `When this command is used, execute the following task:\n\n`;
|
||||
commandContent += taskContent;
|
||||
|
||||
await fileManager.writeFile(commandPath, commandContent);
|
||||
console.log(chalk.green(`✓ Created task command: /${taskId}`));
|
||||
}
|
||||
}
|
||||
|
||||
console.log(chalk.green(`\n✓ Created Crush commands for ${packageName} in ${commandsBaseDir}`));
|
||||
console.log(chalk.dim(` - Agents in: ${agentsDir}`));
|
||||
console.log(chalk.dim(` - Tasks in: ${tasksDir}`));
|
||||
}
|
||||
|
||||
async setupWindsurf(installDir, selectedAgent) {
|
||||
const windsurfWorkflowDir = path.join(installDir, '.windsurf', 'workflows');
|
||||
const agents = selectedAgent ? [selectedAgent] : await this.getAllAgentIds(installDir);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "bmad-method",
|
||||
"version": "5.0.0",
|
||||
"version": "5.1.3",
|
||||
"description": "BMad Method installer - AI-powered Agile development framework",
|
||||
"keywords": [
|
||||
"bmad",
|
||||
|
||||
66
tools/preview-release-notes.js
Executable file
66
tools/preview-release-notes.js
Executable file
@@ -0,0 +1,66 @@
|
||||
const { execSync } = require('node:child_process');
|
||||
const fs = require('node:fs');
|
||||
|
||||
// Get the latest stable tag (exclude beta tags)
|
||||
const allTags = execSync('git tag -l | sort -V', { encoding: 'utf8' }).split('\n').filter(Boolean);
|
||||
const stableTags = allTags.filter((tag) => !tag.includes('beta'));
|
||||
const latestTag = stableTags.at(-1) || 'v5.0.0';
|
||||
|
||||
// Get commits since last tag
|
||||
const commits = execSync(`git log ${latestTag}..HEAD --pretty=format:"- %s" --reverse`, {
|
||||
encoding: 'utf8',
|
||||
})
|
||||
.split('\n')
|
||||
.filter(Boolean);
|
||||
|
||||
// Categorize commits
|
||||
const features = commits.filter((commit) => /^- (feat|Feature)/.test(commit));
|
||||
const fixes = commits.filter((commit) => /^- (fix|Fix)/.test(commit));
|
||||
const chores = commits.filter((commit) => /^- (chore|Chore)/.test(commit));
|
||||
const others = commits.filter(
|
||||
(commit) => !/^- (feat|Feature|fix|Fix|chore|Chore|release:|Release:)/.test(commit),
|
||||
);
|
||||
|
||||
// Get next version (you can modify this logic)
|
||||
const currentVersion = require('../package.json').version;
|
||||
const versionParts = currentVersion.split('.').map(Number);
|
||||
const nextVersion = `${versionParts[0]}.${versionParts[1] + 1}.0`; // Default to minor bump
|
||||
|
||||
console.log(`## 🚀 What's New in v${nextVersion}\n`);
|
||||
|
||||
if (features.length > 0) {
|
||||
console.log('### ✨ New Features');
|
||||
for (const feature of features) console.log(feature);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
if (fixes.length > 0) {
|
||||
console.log('### 🐛 Bug Fixes');
|
||||
for (const fix of fixes) console.log(fix);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
if (others.length > 0) {
|
||||
console.log('### 📦 Other Changes');
|
||||
for (const other of others) console.log(other);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
if (chores.length > 0) {
|
||||
console.log('### 🔧 Maintenance');
|
||||
for (const chore of chores) console.log(chore);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
console.log('\n## 📦 Installation\n');
|
||||
console.log('```bash');
|
||||
console.log('npx bmad-method install');
|
||||
console.log('```');
|
||||
|
||||
console.log(
|
||||
`\n**Full Changelog**: https://github.com/bmadcode/BMAD-METHOD/compare/${latestTag}...v${nextVersion}`,
|
||||
);
|
||||
|
||||
console.log(`\n---\n📊 **Summary**: ${commits.length} commits since ${latestTag}`);
|
||||
console.log(`🏷️ **Previous tag**: ${latestTag}`);
|
||||
console.log(`🚀 **Next version**: v${nextVersion} (estimated)`);
|
||||
@@ -1,30 +0,0 @@
|
||||
/**
|
||||
* Semantic-release plugin to sync installer package.json version
|
||||
*/
|
||||
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
|
||||
// This function runs during the "prepare" step of semantic-release
|
||||
function prepare(_, { nextRelease, logger }) {
|
||||
// Define the path to the installer package.json file
|
||||
const file = path.join(process.cwd(), 'tools/installer/package.json');
|
||||
|
||||
// If the file does not exist, skip syncing and log a message
|
||||
if (!fs.existsSync(file)) return logger.log('Installer package.json not found, skipping');
|
||||
|
||||
// Read and parse the package.json file
|
||||
const package_ = JSON.parse(fs.readFileSync(file, 'utf8'));
|
||||
|
||||
// Update the version field with the next release version
|
||||
package_.version = nextRelease.version;
|
||||
|
||||
// Write the updated JSON back to the file
|
||||
fs.writeFileSync(file, JSON.stringify(package_, null, 2) + '\n');
|
||||
|
||||
// Log success message
|
||||
logger.log(`Synced installer package.json to version ${nextRelease.version}`);
|
||||
}
|
||||
|
||||
// Export the prepare function so semantic-release can use it
|
||||
module.exports = { prepare };
|
||||
@@ -31,18 +31,35 @@ async function bumpVersion(type = 'patch') {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(chalk.yellow('⚠️ Manual version bumping is disabled.'));
|
||||
console.log(chalk.blue('🤖 This project uses semantic-release for automated versioning.'));
|
||||
console.log('');
|
||||
console.log(chalk.bold('To create a new release, use conventional commits:'));
|
||||
console.log(chalk.cyan(' feat: new feature (minor version bump)'));
|
||||
console.log(chalk.cyan(' fix: bug fix (patch version bump)'));
|
||||
console.log(chalk.cyan(' feat!: breaking change (major version bump)'));
|
||||
console.log('');
|
||||
console.log(chalk.dim('Example: git commit -m "feat: add new installer features"'));
|
||||
console.log(chalk.dim('Then push to main branch to trigger automatic release.'));
|
||||
const currentVersion = getCurrentVersion();
|
||||
const versionParts = currentVersion.split('.').map(Number);
|
||||
let newVersion;
|
||||
|
||||
return null;
|
||||
switch (type) {
|
||||
case 'major': {
|
||||
newVersion = `${versionParts[0] + 1}.0.0`;
|
||||
break;
|
||||
}
|
||||
case 'minor': {
|
||||
newVersion = `${versionParts[0]}.${versionParts[1] + 1}.0`;
|
||||
break;
|
||||
}
|
||||
case 'patch': {
|
||||
newVersion = `${versionParts[0]}.${versionParts[1]}.${versionParts[2] + 1}`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(chalk.blue(`Bumping version: ${currentVersion} → ${newVersion}`));
|
||||
|
||||
// Update package.json
|
||||
const packageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'));
|
||||
packageJson.version = newVersion;
|
||||
fs.writeFileSync('package.json', JSON.stringify(packageJson, null, 2) + '\n');
|
||||
|
||||
console.log(chalk.green(`✓ Updated package.json to ${newVersion}`));
|
||||
|
||||
return newVersion;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
|
||||
Reference in New Issue
Block a user