Compare commits
78 Commits
v0.0.19
...
add-apm-in
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6cae496a9 | ||
|
|
f9dc5f63b9 | ||
|
|
229193e488 | ||
|
|
6a2f1950ae | ||
|
|
b5092a9dba | ||
|
|
fd77f82760 | ||
|
|
a9512e00fc | ||
|
|
794515d242 | ||
|
|
93bf878908 | ||
|
|
d501ed6939 | ||
|
|
52da4ce9d5 | ||
|
|
6e4f287913 | ||
|
|
9d449539bb | ||
|
|
63bc6b495d | ||
|
|
70b3db27db | ||
|
|
6e94588615 | ||
|
|
ad9c93c13b | ||
|
|
f979b64338 | ||
|
|
b1591282f6 | ||
|
|
60b015a094 | ||
|
|
0c2b367ba0 | ||
|
|
6b8b1a8b93 | ||
|
|
0e6f513c14 | ||
|
|
6f81f7d6a0 | ||
|
|
c875bd0f30 | ||
|
|
736e282562 | ||
|
|
542751fcd1 | ||
|
|
6c83e9ff66 | ||
|
|
a55448057b | ||
|
|
88cded5c4d | ||
|
|
0ad2f169d2 | ||
|
|
fa3171ca6e | ||
|
|
117ec67e47 | ||
|
|
5bd7027526 | ||
|
|
ec7d87f121 | ||
|
|
85e5eedef8 | ||
|
|
0a5b1ac538 | ||
|
|
eaf4caa231 | ||
|
|
c29e419b4f | ||
|
|
af3cf934e5 | ||
|
|
5787bb5537 | ||
|
|
d605d1e008 | ||
|
|
57024454bf | ||
|
|
1ae6b55c87 | ||
|
|
bfeb40cebc | ||
|
|
22b7098edb | ||
|
|
38ad8b0bac | ||
|
|
445902f2f0 | ||
|
|
20f6c9dede | ||
|
|
4cb63ed6f1 | ||
|
|
020fd27352 | ||
|
|
60ee3a75b5 | ||
|
|
b1858498d4 | ||
|
|
4b66f216e9 | ||
|
|
5c9d9a40ac | ||
|
|
ee6b83c1dd | ||
|
|
b31ca19962 | ||
|
|
15917c2094 | ||
|
|
f89361cd3d | ||
|
|
0f0e19da33 | ||
|
|
21b3dbf904 | ||
|
|
708e887022 | ||
|
|
78e6c9953c | ||
|
|
e21820fb92 | ||
|
|
51705217d4 | ||
|
|
e979ef0c7c | ||
|
|
5d1a174a95 | ||
|
|
f13eb86c0f | ||
|
|
6e2af26867 | ||
|
|
24ba30444e | ||
|
|
584175351a | ||
|
|
167038ca3c | ||
|
|
9140e9b009 | ||
|
|
fc8eb0434a | ||
|
|
fd61b8742d | ||
|
|
4591cf7df6 | ||
|
|
03ee3401e7 | ||
|
|
4b98c20f5d |
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -1,2 +1,7 @@
|
||||
# Global code owner
|
||||
* @localden
|
||||
|
||||
# APM CLI code owner
|
||||
src/apm_cli/ @danielmeppiel
|
||||
templates/apm/ @danielmeppiel
|
||||
docs/context-management.md @danielmeppiel
|
||||
|
||||
67
.github/workflows/docs.yml
vendored
Normal file
67
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# Build and deploy DocFX documentation to GitHub Pages
|
||||
name: Deploy Documentation to Pages
|
||||
|
||||
on:
|
||||
# Runs on pushes targeting the default branch
|
||||
push:
|
||||
branches: ["main"]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for git info
|
||||
|
||||
- name: Setup .NET
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: '8.x'
|
||||
|
||||
- name: Setup DocFX
|
||||
run: dotnet tool install -g docfx
|
||||
|
||||
- name: Build with DocFX
|
||||
run: |
|
||||
cd docs
|
||||
docfx docfx.json
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: 'docs/_site'
|
||||
|
||||
# Deploy job
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
191
.github/workflows/manual-release.yml
vendored
191
.github/workflows/manual-release.yml
vendored
@@ -1,191 +0,0 @@
|
||||
name: Manual Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_bump:
|
||||
description: 'Version bump type'
|
||||
required: true
|
||||
default: 'patch'
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
|
||||
jobs:
|
||||
manual_release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Calculate new version
|
||||
id: version
|
||||
run: |
|
||||
# Get the latest tag, or use v0.0.0 if no tags exist
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
# Extract version number
|
||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]:-0}
|
||||
MINOR=${VERSION_PARTS[1]:-0}
|
||||
PATCH=${VERSION_PARTS[2]:-0}
|
||||
|
||||
# Increment based on input
|
||||
case "${{ github.event.inputs.version_bump }}" in
|
||||
"major")
|
||||
MAJOR=$((MAJOR + 1))
|
||||
MINOR=0
|
||||
PATCH=0
|
||||
;;
|
||||
"minor")
|
||||
MINOR=$((MINOR + 1))
|
||||
PATCH=0
|
||||
;;
|
||||
"patch")
|
||||
PATCH=$((PATCH + 1))
|
||||
;;
|
||||
esac
|
||||
|
||||
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "New version will be: $NEW_VERSION (was $LATEST_TAG)"
|
||||
|
||||
- name: Create release package
|
||||
run: |
|
||||
# Create base package directory structure
|
||||
mkdir -p sdd-package-base
|
||||
|
||||
# Copy common folders to base
|
||||
echo "Packaging SDD common components..."
|
||||
|
||||
if [ -d "memory" ]; then
|
||||
cp -r memory sdd-package-base/
|
||||
echo "✓ Copied memory folder ($(find memory -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ memory folder not found"
|
||||
fi
|
||||
|
||||
if [ -d "scripts" ]; then
|
||||
cp -r scripts sdd-package-base/
|
||||
echo "✓ Copied scripts folder ($(find scripts -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ scripts folder not found"
|
||||
fi
|
||||
|
||||
# Create Claude Code package
|
||||
echo "Creating Claude Code package..."
|
||||
mkdir -p sdd-claude-package
|
||||
cp -r sdd-package-base/* sdd-claude-package/
|
||||
if [ -d "agent_templates/claude" ]; then
|
||||
cp -r agent_templates/claude sdd-claude-package/.claude
|
||||
echo "✓ Added Claude Code commands ($(find agent_templates/claude -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ agent_templates/claude folder not found"
|
||||
fi
|
||||
|
||||
# Create Gemini CLI package
|
||||
echo "Creating Gemini CLI package..."
|
||||
mkdir -p sdd-gemini-package
|
||||
cp -r sdd-package-base/* sdd-gemini-package/
|
||||
if [ -d "agent_templates/gemini" ]; then
|
||||
cp -r agent_templates/gemini sdd-gemini-package/.gemini
|
||||
# Move GEMINI.md to root for easier access
|
||||
if [ -f "sdd-gemini-package/.gemini/GEMINI.md" ]; then
|
||||
mv sdd-gemini-package/.gemini/GEMINI.md sdd-gemini-package/GEMINI.md
|
||||
echo "✓ Moved GEMINI.md to root of Gemini package"
|
||||
fi
|
||||
# Remove empty .gemini folder if it only contained GEMINI.md
|
||||
if [ -d "sdd-gemini-package/.gemini" ] && [ -z "$(find sdd-gemini-package/.gemini -type f)" ]; then
|
||||
rm -rf sdd-gemini-package/.gemini
|
||||
echo "✓ Removed empty .gemini folder"
|
||||
fi
|
||||
echo "✓ Added Gemini CLI commands ($(find agent_templates/gemini -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ agent_templates/gemini folder not found"
|
||||
fi
|
||||
|
||||
# Create GitHub Copilot package
|
||||
echo "Creating GitHub Copilot package..."
|
||||
mkdir -p sdd-copilot-package
|
||||
cp -r sdd-package-base/* sdd-copilot-package/
|
||||
if [ -d "agent_templates/copilot" ]; then
|
||||
mkdir -p sdd-copilot-package/.github
|
||||
cp -r agent_templates/copilot/* sdd-copilot-package/.github/
|
||||
echo "✓ Added Copilot instructions to .github ($(find agent_templates/copilot -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ agent_templates/copilot folder not found"
|
||||
fi
|
||||
|
||||
# Create archive files for each package
|
||||
echo "Creating archive files..."
|
||||
cd sdd-claude-package && zip -r ../spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
echo ""
|
||||
echo "📦 Packages created:"
|
||||
echo "Claude: $(ls -lh spec-kit-template-claude-*.zip | awk '{print $5}')"
|
||||
echo "Gemini: $(ls -lh spec-kit-template-gemini-*.zip | awk '{print $5}')"
|
||||
echo "Copilot: $(ls -lh spec-kit-template-copilot-*.zip | awk '{print $5}')"
|
||||
echo "Copilot: $(ls -lh sdd-template-copilot-*.zip | awk '{print $5}')"
|
||||
|
||||
- name: Generate detailed release notes
|
||||
run: |
|
||||
LAST_TAG=${{ steps.version.outputs.latest_tag }}
|
||||
|
||||
# Get commit range
|
||||
if [ "$LAST_TAG" = "v0.0.0" ]; then
|
||||
COMMIT_RANGE="HEAD~10..HEAD"
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $COMMIT_RANGE 2>/dev/null || echo "- Initial release")
|
||||
else
|
||||
COMMIT_RANGE="$LAST_TAG..HEAD"
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $COMMIT_RANGE 2>/dev/null || echo "- No changes since last release")
|
||||
fi
|
||||
|
||||
# Count files in each directory
|
||||
CLAUDE_COUNT=$(find agent_templates/claude -type f 2>/dev/null | wc -l || echo "0")
|
||||
GEMINI_COUNT=$(find agent_templates/gemini -type f 2>/dev/null | wc -l || echo "0")
|
||||
COPILOT_COUNT=$(find agent_templates/copilot -type f 2>/dev/null | wc -l || echo "0")
|
||||
MEMORY_COUNT=$(find memory -type f 2>/dev/null | wc -l || echo "0")
|
||||
SCRIPTS_COUNT=$(find scripts -type f 2>/dev/null | wc -l || echo "0")
|
||||
|
||||
cat > release_notes.md << EOF
|
||||
Template release ${{ steps.version.outputs.new_version }}
|
||||
|
||||
Updated specification-driven development templates for GitHub Copilot, Claude Code, and Gemini CLI.
|
||||
|
||||
Download the template for your preferred AI assistant:
|
||||
- spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip
|
||||
- spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip
|
||||
- spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip
|
||||
|
||||
Changes since $LAST_TAG:
|
||||
$COMMITS
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
run: |
|
||||
# Remove 'v' prefix from version for release title
|
||||
VERSION_NO_V=${{ steps.version.outputs.new_version }}
|
||||
VERSION_NO_V=${VERSION_NO_V#v}
|
||||
|
||||
gh release create ${{ steps.version.outputs.new_version }} \
|
||||
spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip \
|
||||
spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip \
|
||||
spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip \
|
||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||
--notes-file release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
317
.github/workflows/release.yml
vendored
317
.github/workflows/release.yml
vendored
@@ -3,220 +3,129 @@ name: Create Release
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'memory/**'
|
||||
- 'scripts/**'
|
||||
- 'templates/**'
|
||||
- '.github/workflows/**'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Get latest tag
|
||||
id: get_tag
|
||||
run: |
|
||||
# Get the latest tag, or use v0.0.0 if no tags exist
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
# Extract version number and increment
|
||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]:-0}
|
||||
MINOR=${VERSION_PARTS[1]:-0}
|
||||
PATCH=${VERSION_PARTS[2]:-0}
|
||||
|
||||
# Increment patch version
|
||||
PATCH=$((PATCH + 1))
|
||||
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
||||
|
||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "New version will be: $NEW_VERSION"
|
||||
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
run: |
|
||||
if gh release view ${{ steps.get_tag.outputs.new_version }} >/dev/null 2>&1; then
|
||||
echo "exists=true" >> $GITHUB_OUTPUT
|
||||
echo "Release ${{ steps.get_tag.outputs.new_version }} already exists, skipping..."
|
||||
else
|
||||
echo "exists=false" >> $GITHUB_OUTPUT
|
||||
echo "Release ${{ steps.get_tag.outputs.new_version }} does not exist, proceeding..."
|
||||
fi
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create release package
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
# Create base package directory structure
|
||||
mkdir -p sdd-package-base
|
||||
|
||||
# Copy common folders to base
|
||||
if [ -d "memory" ]; then
|
||||
cp -r memory sdd-package-base/
|
||||
echo "Copied memory folder"
|
||||
fi
|
||||
|
||||
if [ -d "scripts" ]; then
|
||||
cp -r scripts sdd-package-base/
|
||||
echo "Copied scripts folder"
|
||||
fi
|
||||
|
||||
if [ -d "templates" ]; then
|
||||
mkdir -p sdd-package-base/templates
|
||||
# Copy templates folder but exclude the commands directory
|
||||
find templates -type f -not -path "templates/commands/*" -exec cp --parents {} sdd-package-base/ \;
|
||||
echo "Copied templates folder (excluding commands directory)"
|
||||
fi
|
||||
|
||||
# Generate command files for each agent from source templates
|
||||
generate_commands() {
|
||||
local agent=$1
|
||||
local ext=$2
|
||||
local arg_format=$3
|
||||
local output_dir=$4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Get latest tag
|
||||
id: get_tag
|
||||
run: |
|
||||
# Get the latest tag, or use v0.0.0 if no tags exist
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
mkdir -p "$output_dir"
|
||||
# Extract version number and increment
|
||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]:-0}
|
||||
MINOR=${VERSION_PARTS[1]:-0}
|
||||
PATCH=${VERSION_PARTS[2]:-0}
|
||||
|
||||
for template in templates/commands/*.md; do
|
||||
if [[ -f "$template" ]]; then
|
||||
name=$(basename "$template" .md)
|
||||
description=$(awk '/^description:/ {gsub(/^description: *"?/, ""); gsub(/"$/, ""); print; exit}' "$template" | tr -d '\r')
|
||||
content=$(awk '/^---$/{if(++count==2) start=1; next} start' "$template" | sed "s/{ARGS}/$arg_format/g")
|
||||
|
||||
case $ext in
|
||||
"toml")
|
||||
{
|
||||
echo "description = \"$description\""
|
||||
echo ""
|
||||
echo "prompt = \"\"\""
|
||||
echo "$content"
|
||||
echo "\"\"\""
|
||||
} > "$output_dir/$name.$ext"
|
||||
;;
|
||||
"md")
|
||||
echo "$content" > "$output_dir/$name.$ext"
|
||||
;;
|
||||
"prompt.md")
|
||||
{
|
||||
echo "# $(echo "$description" | sed 's/\. .*//')"
|
||||
echo ""
|
||||
echo "$content"
|
||||
} > "$output_dir/$name.$ext"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Create Claude Code package
|
||||
mkdir -p sdd-claude-package
|
||||
cp -r sdd-package-base/* sdd-claude-package/
|
||||
mkdir -p sdd-claude-package/.claude/commands
|
||||
generate_commands "claude" "md" "\$ARGUMENTS" "sdd-claude-package/.claude/commands"
|
||||
echo "Created Claude Code package"
|
||||
|
||||
# Create Gemini CLI package
|
||||
mkdir -p sdd-gemini-package
|
||||
cp -r sdd-package-base/* sdd-gemini-package/
|
||||
mkdir -p sdd-gemini-package/.gemini/commands
|
||||
generate_commands "gemini" "toml" "{{args}}" "sdd-gemini-package/.gemini/commands"
|
||||
if [ -f "agent_templates/gemini/GEMINI.md" ]; then
|
||||
cp agent_templates/gemini/GEMINI.md sdd-gemini-package/GEMINI.md
|
||||
fi
|
||||
echo "Created Gemini CLI package"
|
||||
|
||||
# Create GitHub Copilot package
|
||||
mkdir -p sdd-copilot-package
|
||||
cp -r sdd-package-base/* sdd-copilot-package/
|
||||
mkdir -p sdd-copilot-package/.github/prompts
|
||||
generate_commands "copilot" "prompt.md" "\$ARGUMENTS" "sdd-copilot-package/.github/prompts"
|
||||
echo "Created GitHub Copilot package"
|
||||
|
||||
# Create archive files for each package
|
||||
cd sdd-claude-package && zip -r ../spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
# List contents for verification
|
||||
echo "Claude package contents:"
|
||||
unzip -l spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip | head -10
|
||||
echo "Gemini package contents:"
|
||||
unzip -l spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip | head -10
|
||||
echo "Copilot package contents:"
|
||||
unzip -l spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip | head -10
|
||||
|
||||
- name: Generate release notes
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
id: release_notes
|
||||
run: |
|
||||
# Get commits since last tag
|
||||
LAST_TAG=${{ steps.get_tag.outputs.latest_tag }}
|
||||
if [ "$LAST_TAG" = "v0.0.0" ]; then
|
||||
# Check how many commits we have and use that as the limit
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD)
|
||||
if [ "$COMMIT_COUNT" -gt 10 ]; then
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD)
|
||||
# Increment patch version
|
||||
PATCH=$((PATCH + 1))
|
||||
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
||||
|
||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "New version will be: $NEW_VERSION"
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
run: |
|
||||
if gh release view ${{ steps.get_tag.outputs.new_version }} >/dev/null 2>&1; then
|
||||
echo "exists=true" >> $GITHUB_OUTPUT
|
||||
echo "Release ${{ steps.get_tag.outputs.new_version }} already exists, skipping..."
|
||||
else
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s")
|
||||
echo "exists=false" >> $GITHUB_OUTPUT
|
||||
echo "Release ${{ steps.get_tag.outputs.new_version }} does not exist, proceeding..."
|
||||
fi
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create release package variants
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/create-release-packages.sh
|
||||
.github/workflows/scripts/create-release-packages.sh ${{ steps.get_tag.outputs.new_version }}
|
||||
- name: Generate release notes
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
id: release_notes
|
||||
run: |
|
||||
# Get commits since last tag
|
||||
LAST_TAG=${{ steps.get_tag.outputs.latest_tag }}
|
||||
if [ "$LAST_TAG" = "v0.0.0" ]; then
|
||||
# Check how many commits we have and use that as the limit
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD)
|
||||
if [ "$COMMIT_COUNT" -gt 10 ]; then
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD)
|
||||
else
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s")
|
||||
fi
|
||||
else
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD)
|
||||
fi
|
||||
|
||||
# Create release notes
|
||||
cat > release_notes.md << EOF
|
||||
Template release ${{ steps.get_tag.outputs.new_version }}
|
||||
|
||||
Updated specification-driven development templates for GitHub Copilot, Claude Code, Gemini CLI, and Cursor.
|
||||
|
||||
Now includes per-script variants for POSIX shell (sh) and PowerShell (ps).
|
||||
|
||||
Download the template for your preferred AI assistant + script type:
|
||||
- spec-kit-template-copilot-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-copilot-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-claude-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-claude-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-gemini-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-gemini-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-cursor-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-cursor-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
EOF
|
||||
|
||||
echo "Generated release notes:"
|
||||
cat release_notes.md
|
||||
- name: Create GitHub Release
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
# Remove 'v' prefix from version for release title
|
||||
VERSION_NO_V=${{ steps.get_tag.outputs.new_version }}
|
||||
VERSION_NO_V=${VERSION_NO_V#v}
|
||||
|
||||
gh release create ${{ steps.get_tag.outputs.new_version }} \
|
||||
spec-kit-template-copilot-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-copilot-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-claude-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-claude-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-gemini-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-gemini-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-cursor-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-cursor-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||
--notes-file release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Update version in pyproject.toml (for release artifacts only)
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
# Update version in pyproject.toml (remove 'v' prefix for Python versioning)
|
||||
VERSION=${{ steps.get_tag.outputs.new_version }}
|
||||
PYTHON_VERSION=${VERSION#v}
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml
|
||||
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
|
||||
fi
|
||||
else
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD)
|
||||
fi
|
||||
|
||||
# Create release notes
|
||||
cat > release_notes.md << EOF
|
||||
Template release ${{ steps.get_tag.outputs.new_version }}
|
||||
|
||||
Updated specification-driven development templates for GitHub Copilot, Claude Code, and Gemini CLI.
|
||||
|
||||
Download the template for your preferred AI assistant:
|
||||
- spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
- spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip
|
||||
EOF
|
||||
|
||||
echo "Generated release notes:"
|
||||
cat release_notes.md
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
# Remove 'v' prefix from version for release title
|
||||
VERSION_NO_V=${{ steps.get_tag.outputs.new_version }}
|
||||
VERSION_NO_V=${VERSION_NO_V#v}
|
||||
|
||||
gh release create ${{ steps.get_tag.outputs.new_version }} \
|
||||
spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||
--notes-file release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Update version in pyproject.toml (for release artifacts only)
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
# Update version in pyproject.toml (remove 'v' prefix for Python versioning)
|
||||
VERSION=${{ steps.get_tag.outputs.new_version }}
|
||||
PYTHON_VERSION=${VERSION#v}
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml
|
||||
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
|
||||
fi
|
||||
|
||||
# Note: No longer committing version changes back to main branch
|
||||
# The version is only updated in the release artifacts
|
||||
|
||||
191
.github/workflows/scripts/create-release-packages.sh
vendored
Normal file
191
.github/workflows/scripts/create-release-packages.sh
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# create-release-packages.sh (workflow-local)
|
||||
# Build Spec Kit template release archives for each supported AI assistant and script type.
|
||||
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
||||
# Version argument should include leading 'v'.
|
||||
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
||||
# AGENTS : space or comma separated subset of: claude gemini copilot (default: all)
|
||||
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
||||
# Examples:
|
||||
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
||||
# AGENTS="copilot,gemini" $0 v0.2.0
|
||||
# SCRIPTS=ps $0 v0.2.0
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 <version-with-v-prefix>" >&2
|
||||
exit 1
|
||||
fi
|
||||
NEW_VERSION="$1"
|
||||
if [[ ! $NEW_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Version must look like v0.0.0" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building release packages for $NEW_VERSION"
|
||||
|
||||
rm -rf sdd-package-base* sdd-*-package-* spec-kit-template-*-${NEW_VERSION}.zip || true
|
||||
|
||||
rewrite_paths() {
|
||||
sed -E \
|
||||
-e 's@(/?)memory/@.specify/memory/@g' \
|
||||
-e 's@(/?)scripts/@.specify/scripts/@g' \
|
||||
-e 's@(/?)templates/@.specify/templates/@g'
|
||||
}
|
||||
|
||||
generate_commands() {
|
||||
local agent=$1 ext=$2 arg_format=$3 output_dir=$4 script_variant=$5
|
||||
mkdir -p "$output_dir"
|
||||
for template in templates/commands/*.md; do
|
||||
[[ -f "$template" ]] || continue
|
||||
local name description script_command body
|
||||
name=$(basename "$template" .md)
|
||||
|
||||
# Normalize line endings
|
||||
file_content=$(tr -d '\r' < "$template")
|
||||
|
||||
# Extract description and script command from YAML frontmatter
|
||||
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
|
||||
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
|
||||
|
||||
if [[ -z $script_command ]]; then
|
||||
echo "Warning: no script command found for $script_variant in $template" >&2
|
||||
script_command="(Missing script command for $script_variant)"
|
||||
fi
|
||||
|
||||
# Replace {SCRIPT} placeholder with the script command
|
||||
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
|
||||
|
||||
# Remove the scripts: section from frontmatter while preserving YAML structure
|
||||
body=$(printf '%s\n' "$body" | awk '
|
||||
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
|
||||
in_frontmatter && /^scripts:$/ { skip_scripts=1; next }
|
||||
in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 }
|
||||
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
|
||||
{ print }
|
||||
')
|
||||
|
||||
# Apply other substitutions
|
||||
body=$(printf '%s\n' "$body" | sed "s/{ARGS}/$arg_format/g" | sed "s/__AGENT__/$agent/g" | rewrite_paths)
|
||||
|
||||
case $ext in
|
||||
toml)
|
||||
{ echo "description = \"$description\""; echo; echo "prompt = \"\"\""; echo "$body"; echo "\"\"\""; } > "$output_dir/$name.$ext" ;;
|
||||
md)
|
||||
echo "$body" > "$output_dir/$name.$ext" ;;
|
||||
prompt.md)
|
||||
echo "$body" > "$output_dir/$name.$ext" ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
build_variant() {
|
||||
local agent=$1 script=$2
|
||||
local base_dir="sdd-${agent}-package-${script}"
|
||||
echo "Building $agent ($script) package..."
|
||||
mkdir -p "$base_dir"
|
||||
|
||||
# Copy base structure but filter scripts by variant
|
||||
SPEC_DIR="$base_dir/.specify"
|
||||
mkdir -p "$SPEC_DIR"
|
||||
|
||||
[[ -d memory ]] && { cp -r memory "$SPEC_DIR/"; echo "Copied memory -> .specify"; }
|
||||
|
||||
# Only copy the relevant script variant directory
|
||||
if [[ -d scripts ]]; then
|
||||
mkdir -p "$SPEC_DIR/scripts"
|
||||
case $script in
|
||||
sh)
|
||||
[[ -d scripts/bash ]] && { cp -r scripts/bash "$SPEC_DIR/scripts/"; echo "Copied scripts/bash -> .specify/scripts"; }
|
||||
# Copy any script files that aren't in variant-specific directories
|
||||
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
|
||||
;;
|
||||
ps)
|
||||
[[ -d scripts/powershell ]] && { cp -r scripts/powershell "$SPEC_DIR/scripts/"; echo "Copied scripts/powershell -> .specify/scripts"; }
|
||||
# Copy any script files that aren't in variant-specific directories
|
||||
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; }
|
||||
# Inject variant into plan-template.md within .specify/templates if present
|
||||
local plan_tpl="$base_dir/.specify/templates/plan-template.md"
|
||||
if [[ -f "$plan_tpl" ]]; then
|
||||
plan_norm=$(tr -d '\r' < "$plan_tpl")
|
||||
variant_line=$(printf '%s\n' "$plan_norm" | grep -E "<!--[[:space:]]*VARIANT:$script" | head -1 | sed -E "s/.*VARIANT:$script[[:space:]]+//; s/-->.*//; s/^[[:space:]]+//; s/[[:space:]]+$//")
|
||||
if [[ -n $variant_line ]]; then
|
||||
tmp_file=$(mktemp)
|
||||
sed "s|VARIANT-INJECT|${variant_line}|" "$plan_tpl" | tr -d '\r' | sed "s|__AGENT__|${agent}|g" | sed '/<!--[[:space:]]*VARIANT:sh/d' | sed '/<!--[[:space:]]*VARIANT:ps/d' > "$tmp_file" && mv "$tmp_file" "$plan_tpl"
|
||||
else
|
||||
echo "Warning: no plan-template variant for $script (pattern not matched)" >&2
|
||||
fi
|
||||
fi
|
||||
case $agent in
|
||||
claude)
|
||||
mkdir -p "$base_dir/.claude/commands"
|
||||
generate_commands claude md "\$ARGUMENTS" "$base_dir/.claude/commands" "$script" ;;
|
||||
gemini)
|
||||
mkdir -p "$base_dir/.gemini/commands"
|
||||
generate_commands gemini toml "{{args}}" "$base_dir/.gemini/commands" "$script"
|
||||
[[ -f agent_templates/gemini/GEMINI.md ]] && cp agent_templates/gemini/GEMINI.md "$base_dir/GEMINI.md" ;;
|
||||
copilot)
|
||||
mkdir -p "$base_dir/.github/prompts"
|
||||
generate_commands copilot prompt.md "\$ARGUMENTS" "$base_dir/.github/prompts" "$script" ;;
|
||||
cursor)
|
||||
mkdir -p "$base_dir/.cursor/commands"
|
||||
generate_commands cursor md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
||||
esac
|
||||
( cd "$base_dir" && zip -r "../spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip" . )
|
||||
echo "Created spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip"
|
||||
}
|
||||
|
||||
# Determine agent list
|
||||
ALL_AGENTS=(claude gemini copilot cursor)
|
||||
ALL_SCRIPTS=(sh ps)
|
||||
|
||||
norm_list() {
|
||||
# convert comma+space separated -> space separated unique while preserving order of first occurrence
|
||||
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?" ":"") $i)}}}END{printf("\n")}'
|
||||
}
|
||||
|
||||
validate_subset() {
|
||||
local type=$1; shift; local -n allowed=$1; shift; local items=($@)
|
||||
local ok=1
|
||||
for it in "${items[@]}"; do
|
||||
local found=0
|
||||
for a in "${allowed[@]}"; do [[ $it == $a ]] && { found=1; break; }; done
|
||||
if [[ $found -eq 0 ]]; then
|
||||
echo "Error: unknown $type '$it' (allowed: ${allowed[*]})" >&2
|
||||
ok=0
|
||||
fi
|
||||
done
|
||||
return $ok
|
||||
}
|
||||
|
||||
if [[ -n ${AGENTS:-} ]]; then
|
||||
AGENT_LIST=($(printf '%s' "$AGENTS" | norm_list))
|
||||
validate_subset agent ALL_AGENTS "${AGENT_LIST[@]}" || exit 1
|
||||
else
|
||||
AGENT_LIST=(${ALL_AGENTS[@]})
|
||||
fi
|
||||
|
||||
if [[ -n ${SCRIPTS:-} ]]; then
|
||||
SCRIPT_LIST=($(printf '%s' "$SCRIPTS" | norm_list))
|
||||
validate_subset script ALL_SCRIPTS "${SCRIPT_LIST[@]}" || exit 1
|
||||
else
|
||||
SCRIPT_LIST=(${ALL_SCRIPTS[@]})
|
||||
fi
|
||||
|
||||
echo "Agents: ${AGENT_LIST[*]}"
|
||||
echo "Scripts: ${SCRIPT_LIST[*]}"
|
||||
|
||||
for agent in "${AGENT_LIST[@]}"; do
|
||||
for script in "${SCRIPT_LIST[@]}"; do
|
||||
build_variant "$agent" "$script"
|
||||
done
|
||||
done
|
||||
|
||||
echo "Archives:"
|
||||
ls -1 spec-kit-template-*-${NEW_VERSION}.zip
|
||||
22
CHANGELOG.md
Normal file
22
CHANGELOG.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to the Specify CLI will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.0.4] - 2025-09-14
|
||||
|
||||
### Added
|
||||
|
||||
- SOCKS proxy support for corporate environments via `httpx[socks]` dependency
|
||||
|
||||
### Fixed
|
||||
|
||||
N/A
|
||||
|
||||
### Changed
|
||||
|
||||
N/A
|
||||
@@ -11,10 +11,13 @@ These are one time installations required to be able to test your changes locall
|
||||
1. Install [Python 3.11+](https://www.python.org/downloads/)
|
||||
1. Install [uv](https://docs.astral.sh/uv/) for package management
|
||||
1. Install [Git](https://git-scm.com/downloads)
|
||||
1. Have an AI coding agent available: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||
1. Have an AI coding agent available: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli) are recommended, but we're working on adding support for other agents as well.
|
||||
|
||||
## Submitting a pull request
|
||||
|
||||
>[!NOTE]
|
||||
>If your pull request introduces a large change that materially impacts the work of the CLI or the rest of the repository (e.g., you're introducing new templates, arguments, or otherwise major changes), make sure that it was **discussed and agreed upon** by the project maintainers. Pull requests with large changes that did not have a prior conversation and agreement will be closed.
|
||||
|
||||
1. Fork and clone the repository
|
||||
1. Configure and install the dependencies: `uv sync`
|
||||
1. Make sure the CLI works on your machine: `uv run specify --help`
|
||||
|
||||
126
README.md
126
README.md
@@ -16,7 +16,10 @@
|
||||
|
||||
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
||||
- [⚡ Get started](#-get-started)
|
||||
- [📚 Core philosophy](#-core-philosophy)
|
||||
- [📽️ Video Overview](#️-video-overview)
|
||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||
- [<EFBFBD> APM Integration](#-apm-integration)
|
||||
- [<EFBFBD>📚 Core philosophy](#-core-philosophy)
|
||||
- [🌟 Development phases](#-development-phases)
|
||||
- [🎯 Experimental goals](#-experimental-goals)
|
||||
- [🔧 Prerequisites](#-prerequisites)
|
||||
@@ -44,7 +47,7 @@ uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME
|
||||
|
||||
### 2. Create the spec
|
||||
|
||||
Use the `/specify` command to describe what you want to build. Focus on the **what** and **why**, not the tech stack.
|
||||
Use the **`/specify`** command to describe what you want to build. Focus on the **what** and **why**, not the tech stack.
|
||||
|
||||
```bash
|
||||
/specify Build an application that can help me organize my photos in separate photo albums. Albums are grouped by date and can be re-organized by dragging and dropping on the main page. Albums are never in other nested albums. Within each album, photos are previewed in a tile-like interface.
|
||||
@@ -52,7 +55,7 @@ Use the `/specify` command to describe what you want to build. Focus on the **wh
|
||||
|
||||
### 3. Create a technical implementation plan
|
||||
|
||||
Use the `/plan` command to provide your tech stack and architecture choices.
|
||||
Use the **`/plan`** command to provide your tech stack and architecture choices.
|
||||
|
||||
```bash
|
||||
/plan The application uses Vite with minimal number of libraries. Use vanilla HTML, CSS, and JavaScript as much as possible. Images are not uploaded anywhere and metadata is stored in a local SQLite database.
|
||||
@@ -60,11 +63,116 @@ Use the `/plan` command to provide your tech stack and architecture choices.
|
||||
|
||||
### 4. Break down and implement
|
||||
|
||||
Use `/tasks` to create an actionable task list, then ask your agent to implement the feature.
|
||||
Use **`/tasks`** to create an actionable task list, then ask your agent to implement the feature.
|
||||
|
||||
For detailed step-by-step instructions, see our [comprehensive guide](./spec-driven.md).
|
||||
|
||||
## 📚 Core philosophy
|
||||
## 📽️ Video Overview
|
||||
|
||||
Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)!
|
||||
|
||||
[](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
|
||||
|
||||
## 🔧 Specify CLI Reference
|
||||
|
||||
The `specify` command supports the following options:
|
||||
|
||||
### Commands
|
||||
|
||||
| Command | Description |
|
||||
|-------------|----------------------------------------------------------------|
|
||||
| `init` | Initialize a new Specify project from the latest template |
|
||||
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`) |
|
||||
| `apm` | APM - Agent Package Manager commands for Context management |
|
||||
|
||||
### `specify init` Arguments & Options
|
||||
|
||||
| Argument/Option | Type | Description |
|
||||
|------------------------|----------|------------------------------------------------------------------------------|
|
||||
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`) |
|
||||
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, or `cursor` |
|
||||
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
||||
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
||||
| `--no-git` | Flag | Skip git repository initialization |
|
||||
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||
| `--use-apm` | Flag | Include APM (Agent Package Manager) structure for context management |
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Basic project initialization
|
||||
specify init my-project
|
||||
|
||||
# Initialize with specific AI assistant
|
||||
specify init my-project --ai claude
|
||||
|
||||
# Initialize with APM support
|
||||
specify init my-project --ai claude --use-apm
|
||||
|
||||
# Initialize with Cursor support
|
||||
specify init my-project --ai cursor
|
||||
|
||||
# Initialize with PowerShell scripts (Windows/cross-platform)
|
||||
specify init my-project --ai copilot --script ps
|
||||
|
||||
# Initialize in current directory with APM
|
||||
specify init --here --ai copilot --use-apm
|
||||
|
||||
# Skip git initialization
|
||||
specify init my-project --ai gemini --no-git
|
||||
|
||||
# Enable debug output for troubleshooting
|
||||
specify init my-project --ai claude --debug
|
||||
|
||||
# Check system requirements
|
||||
specify check
|
||||
```
|
||||
|
||||
## 📦 APM Integration - NPM for Agent Context
|
||||
|
||||
**Context as Code Packages**: Package and share agent intelligence like npm packages. With APM, your agents get:
|
||||
|
||||
- **Team knowledge** from reusable context packages
|
||||
- **Optimized context** through mathematical relevance scoring
|
||||
- **Universal compatibility** via dynamically generated Agents.md files
|
||||
|
||||
[Complete Context Management Guide →](docs/context-management.md)
|
||||
|
||||
Spec Kit includes full APM (Agent Package Manager) functionality for managing modular context packages and files:
|
||||
|
||||
### Unified Initialization
|
||||
```bash
|
||||
# The --use-apm flag creates both SDD and APM structures
|
||||
specify init my-project --ai claude --use-apm
|
||||
```
|
||||
|
||||
### APM Commands
|
||||
```bash
|
||||
# Core APM commands available under 'apm' subcommand
|
||||
|
||||
# Install APM packages from apm.yml
|
||||
specify apm install
|
||||
|
||||
# Add APM package to apm.yml and install
|
||||
specify apm install org/repo
|
||||
|
||||
# Remove package from apm.yml and apm_modules
|
||||
specify apm uninstall org/repo
|
||||
|
||||
# Remove orphaned packages not in apm.yml
|
||||
specify apm prune
|
||||
|
||||
# List installed APM packages
|
||||
specify apm deps list
|
||||
|
||||
# Generate nested optimal AGENTS.md tree
|
||||
# Uses installed APM packages and local context files
|
||||
specify apm compile
|
||||
```
|
||||
|
||||
## <20>📚 Core philosophy
|
||||
|
||||
Spec-Driven Development is a structured process that emphasizes:
|
||||
|
||||
@@ -105,12 +213,12 @@ Our research and experimentation focus on:
|
||||
|
||||
- Validate the concept of parallel implementation exploration
|
||||
- Provide robust iterative feature development workflows
|
||||
- Extend processes to handle upgrades and modernization tasks
|
||||
- Extend processes to handle upgrades and modernization tasks
|
||||
|
||||
## 🔧 Prerequisites
|
||||
|
||||
- **Linux/macOS** (or WSL2 on Windows)
|
||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Gemini CLI](https://github.com/google-gemini/gemini-cli), or [Cursor](https://cursor.sh/)
|
||||
- [uv](https://docs.astral.sh/uv/) for package management
|
||||
- [Python 3.11+](https://www.python.org/downloads/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
@@ -118,7 +226,7 @@ Our research and experimentation focus on:
|
||||
## 📖 Learn more
|
||||
|
||||
- **[Complete Spec-Driven Development Methodology](./spec-driven.md)** - Deep dive into the full process
|
||||
- **[Detailed Walkthrough](#detailed-process)** - Step-by-step implementation guide
|
||||
- **[Detailed Walkthrough](#-detailed-process)** - Step-by-step implementation guide
|
||||
|
||||
---
|
||||
|
||||
@@ -214,7 +322,6 @@ At this stage, your project folder contents should resemble the following:
|
||||
│ └── 001-create-taskify
|
||||
│ └── spec.md
|
||||
└── templates
|
||||
├── CLAUDE-template.md
|
||||
├── plan-template.md
|
||||
├── spec-template.md
|
||||
└── tasks-template.md
|
||||
@@ -368,6 +475,7 @@ rm gcm-linux_amd64.2.6.1.deb
|
||||
|
||||
- Den Delimarsky ([@localden](https://github.com/localden))
|
||||
- John Lam ([@jflam](https://github.com/jflam))
|
||||
- Daniel Meppiel [@danielmeppiel](https://github.com/danielmeppiel)
|
||||
|
||||
## 💬 Support
|
||||
|
||||
|
||||
8
docs/.gitignore
vendored
Normal file
8
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# DocFX build output
|
||||
_site/
|
||||
obj/
|
||||
.docfx/
|
||||
|
||||
# Temporary files
|
||||
*.tmp
|
||||
*.log
|
||||
33
docs/README.md
Normal file
33
docs/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Documentation
|
||||
|
||||
This folder contains the documentation source files for Spec Kit, built using [DocFX](https://dotnet.github.io/docfx/).
|
||||
|
||||
## Building Locally
|
||||
|
||||
To build the documentation locally:
|
||||
|
||||
1. Install DocFX:
|
||||
```bash
|
||||
dotnet tool install -g docfx
|
||||
```
|
||||
|
||||
2. Build the documentation:
|
||||
```bash
|
||||
cd docs
|
||||
docfx docfx.json --serve
|
||||
```
|
||||
|
||||
3. Open your browser to `http://localhost:8080` to view the documentation.
|
||||
|
||||
## Structure
|
||||
|
||||
- `docfx.json` - DocFX configuration file
|
||||
- `index.md` - Main documentation homepage
|
||||
- `toc.yml` - Table of contents configuration
|
||||
- `installation.md` - Installation guide
|
||||
- `quickstart.md` - Quick start guide
|
||||
- `_site/` - Generated documentation output (ignored by git)
|
||||
|
||||
## Deployment
|
||||
|
||||
Documentation is automatically built and deployed to GitHub Pages when changes are pushed to the `main` branch. The workflow is defined in `.github/workflows/docs.yml`.
|
||||
59
docs/context-management.md
Normal file
59
docs/context-management.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Context Management with APM
|
||||
|
||||
## NPM for Agent Context
|
||||
|
||||
Just like npm revolutionized JavaScript by enabling package reuse, APM creates an ecosystem for sharing agent context.
|
||||
|
||||
## Package Composition & Reuse
|
||||
|
||||
```yaml
|
||||
# Your project inherits team knowledge via apm.yml file in the root
|
||||
dependencies:
|
||||
apm:
|
||||
- company/design-system # UI patterns, brand guidelines
|
||||
- company/security-standards # Auth patterns, data handling
|
||||
- community/best-practices # Industry standards
|
||||
```
|
||||
|
||||
**Result**: Your project gets all the instructions of above packages applied via dynamically generated Agents.md files using `specify apm compile`. These files are optimally generated to minimize contextual load for Agents compatible with the Agents.md standard.
|
||||
|
||||
**Enterprise Scenario**: Design team creates accessibility guidelines once → entire organization uses them → agents work consistently across all projects.
|
||||
|
||||
## Mathematical Context Optimization
|
||||
|
||||
**The Technical Foundation**: APM uses mathematical optimization to solve the context efficiency problem.
|
||||
|
||||
```
|
||||
Context_Efficiency = Relevant_Instructions / Total_Instructions_Loaded
|
||||
```
|
||||
|
||||
**Why This Matters**: When agents work in `/styles/` directory, they shouldn't load Python compliance rules. APM's Context Optimization Engine ensures agents get minimal, highly relevant context.
|
||||
|
||||
**The Algorithm**: Constraint satisfaction optimization that finds placement minimizing context pollution while maximizing relevance. Each instruction gets mathematically optimal placement across the project hierarchy.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
specify init my-project --use-apm --ai copilot
|
||||
specify apm install company/design-system
|
||||
specify apm compile # Mathematical optimization generates distributed AGENTS.md files
|
||||
```
|
||||
|
||||
## Universal Agent Compatibility
|
||||
|
||||
APM generates distributed `AGENTS.md` files compatible with the [agents.md standard](https://agents.md), working with any coding agent (GitHub Copilot, Cursor, Claude, Codex, Aider, etc.).
|
||||
|
||||
## Authentication Setup (Optional)
|
||||
|
||||
```bash
|
||||
export GITHUB_APM_PAT=your_fine_grained_token_here
|
||||
```
|
||||
|
||||
Only needed for private packages. Public community packages work without authentication.
|
||||
|
||||
## The Complete Value
|
||||
|
||||
1. **Package Ecosystem** - Share and compose agent intelligence like code dependencies
|
||||
2. **Mathematical Optimization** - Context Optimization Engine ensures relevance without pollution
|
||||
3. **Universal Standards** - Works with any agent via industry-standard agents.md format
|
||||
4. **Enterprise Ready** - Team knowledge scales across entire organizations
|
||||
70
docs/docfx.json
Normal file
70
docs/docfx.json
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"build": {
|
||||
"content": [
|
||||
{
|
||||
"files": [
|
||||
"*.md",
|
||||
"toc.yml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"../README.md",
|
||||
"../CONTRIBUTING.md",
|
||||
"../CODE_OF_CONDUCT.md",
|
||||
"../SECURITY.md",
|
||||
"../SUPPORT.md"
|
||||
],
|
||||
"dest": "."
|
||||
}
|
||||
],
|
||||
"resource": [
|
||||
{
|
||||
"files": [
|
||||
"images/**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"../media/**"
|
||||
],
|
||||
"dest": "media"
|
||||
}
|
||||
],
|
||||
"overwrite": [
|
||||
{
|
||||
"files": [
|
||||
"apidoc/**.md"
|
||||
],
|
||||
"exclude": [
|
||||
"obj/**",
|
||||
"_site/**"
|
||||
]
|
||||
}
|
||||
],
|
||||
"dest": "_site",
|
||||
"globalMetadataFiles": [],
|
||||
"fileMetadataFiles": [],
|
||||
"template": [
|
||||
"default",
|
||||
"modern"
|
||||
],
|
||||
"postProcessors": [],
|
||||
"markdownEngineName": "markdig",
|
||||
"noLangKeyword": false,
|
||||
"keepFileLink": false,
|
||||
"cleanupCacheHistory": false,
|
||||
"disableGitFeatures": false,
|
||||
"globalMetadata": {
|
||||
"_appTitle": "Spec Kit Documentation",
|
||||
"_appName": "Spec Kit",
|
||||
"_appFooter": "Spec Kit - A specification-driven development toolkit",
|
||||
"_enableSearch": true,
|
||||
"_disableContribution": false,
|
||||
"_gitContribute": {
|
||||
"repo": "https://github.com/github/spec-kit",
|
||||
"branch": "main"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
62
docs/index.md
Normal file
62
docs/index.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Spec Kit
|
||||
|
||||
*Build high-quality software faster.*
|
||||
|
||||
**An effort to allow organizations to focus on product scenarios rather than writing undifferentiated code with the help of Spec-Driven Development.**
|
||||
|
||||
## What is Spec-Driven Development?
|
||||
|
||||
Spec-Driven Development **flips the script** on traditional software development. For decades, code has been king — specifications were just scaffolding we built and discarded once the "real work" of coding began. Spec-Driven Development changes this: **specifications become executable**, directly generating working implementations rather than just guiding them.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Installation Guide](installation.md)
|
||||
- [Quick Start Guide](quickstart.md)
|
||||
- [Local Development](local-development.md)
|
||||
|
||||
## Core Philosophy
|
||||
|
||||
Spec-Driven Development is a structured process that emphasizes:
|
||||
|
||||
- **Intent-driven development** where specifications define the "_what_" before the "_how_"
|
||||
- **Rich specification creation** using guardrails and organizational principles
|
||||
- **Multi-step refinement** rather than one-shot code generation from prompts
|
||||
- **Heavy reliance** on advanced AI model capabilities for specification interpretation
|
||||
|
||||
## Development Phases
|
||||
|
||||
| Phase | Focus | Key Activities |
|
||||
|-------|-------|----------------|
|
||||
| **0-to-1 Development** ("Greenfield") | Generate from scratch | <ul><li>Start with high-level requirements</li><li>Generate specifications</li><li>Plan implementation steps</li><li>Build production-ready applications</li></ul> |
|
||||
| **Creative Exploration** | Parallel implementations | <ul><li>Explore diverse solutions</li><li>Support multiple technology stacks & architectures</li><li>Experiment with UX patterns</li></ul> |
|
||||
| **Iterative Enhancement** ("Brownfield") | Brownfield modernization | <ul><li>Add features iteratively</li><li>Modernize legacy systems</li><li>Adapt processes</li></ul> |
|
||||
|
||||
## Experimental Goals
|
||||
|
||||
Our research and experimentation focus on:
|
||||
|
||||
### Technology Independence
|
||||
- Create applications using diverse technology stacks
|
||||
- Validate the hypothesis that Spec-Driven Development is a process not tied to specific technologies, programming languages, or frameworks
|
||||
|
||||
### Enterprise Constraints
|
||||
- Demonstrate mission-critical application development
|
||||
- Incorporate organizational constraints (cloud providers, tech stacks, engineering practices)
|
||||
- Support enterprise design systems and compliance requirements
|
||||
|
||||
### User-Centric Development
|
||||
- Build applications for different user cohorts and preferences
|
||||
- Support various development approaches (from vibe-coding to AI-native development)
|
||||
|
||||
### Creative & Iterative Processes
|
||||
- Validate the concept of parallel implementation exploration
|
||||
- Provide robust iterative feature development workflows
|
||||
- Extend processes to handle upgrades and modernization tasks
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see our [Contributing Guide](CONTRIBUTING.md) for information on how to contribute to this project.
|
||||
|
||||
## Support
|
||||
|
||||
For support, please check our [Support Guide](SUPPORT.md) or open an issue on GitHub.
|
||||
86
docs/installation.md
Normal file
86
docs/installation.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Installation Guide
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
|
||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||
- [uv](https://docs.astral.sh/uv/) for package management
|
||||
- [Python 3.11+](https://www.python.org/downloads/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
|
||||
## Installation
|
||||
|
||||
### Initialize a New Project
|
||||
|
||||
The easiest way to get started is to initialize a new project:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
```
|
||||
|
||||
Or initialize in the current directory:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here
|
||||
```
|
||||
|
||||
### Specify AI Agent
|
||||
|
||||
You can proactively specify your AI agent during initialization:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
|
||||
```
|
||||
|
||||
### Specify Script Type (Shell vs PowerShell)
|
||||
|
||||
All automation scripts now have both Bash (`.sh`) and PowerShell (`.ps1`) variants.
|
||||
|
||||
Auto behavior:
|
||||
- Windows default: `ps`
|
||||
- Other OS default: `sh`
|
||||
- Interactive mode: you'll be prompted unless you pass `--script`
|
||||
|
||||
Force a specific script type:
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script sh
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script ps
|
||||
```
|
||||
|
||||
### Ignore Agent Tools Check
|
||||
|
||||
If you prefer to get the templates without checking for the right tools:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude --ignore-agent-tools
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
After initialization, you should see the following commands available in your AI agent:
|
||||
- `/specify` - Create specifications
|
||||
- `/plan` - Generate implementation plans
|
||||
- `/tasks` - Break down into actionable tasks
|
||||
|
||||
The `.specify/scripts` directory will contain both `.sh` and `.ps1` scripts.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Git Credential Manager on Linux
|
||||
|
||||
If you're having issues with Git authentication on Linux, you can install Git Credential Manager:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
echo "Downloading Git Credential Manager v2.6.1..."
|
||||
wget https://github.com/git-ecosystem/git-credential-manager/releases/download/v2.6.1/gcm-linux_amd64.2.6.1.deb
|
||||
echo "Installing Git Credential Manager..."
|
||||
sudo dpkg -i gcm-linux_amd64.2.6.1.deb
|
||||
echo "Configuring Git to use GCM..."
|
||||
git config --global credential.helper manager
|
||||
echo "Cleaning up..."
|
||||
rm gcm-linux_amd64.2.6.1.deb
|
||||
```
|
||||
168
docs/local-development.md
Normal file
168
docs/local-development.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Local Development Guide
|
||||
|
||||
This guide shows how to iterate on the `specify` CLI locally without publishing a release or committing to `main` first.
|
||||
|
||||
> Scripts now have both Bash (`.sh`) and PowerShell (`.ps1`) variants. The CLI auto-selects based on OS unless you pass `--script sh|ps`.
|
||||
|
||||
## 1. Clone and Switch Branches
|
||||
|
||||
```bash
|
||||
git clone https://github.com/github/spec-kit.git
|
||||
cd spec-kit
|
||||
# Work on a feature branch
|
||||
git checkout -b your-feature-branch
|
||||
```
|
||||
|
||||
## 2. Run the CLI Directly (Fastest Feedback)
|
||||
|
||||
You can execute the CLI via the module entrypoint without installing anything:
|
||||
|
||||
```bash
|
||||
# From repo root
|
||||
python -m src.specify_cli --help
|
||||
python -m src.specify_cli init demo-project --ai claude --ignore-agent-tools --script sh
|
||||
```
|
||||
|
||||
If you prefer invoking the script file style (uses shebang):
|
||||
|
||||
```bash
|
||||
python src/specify_cli/__init__.py init demo-project --script ps
|
||||
```
|
||||
|
||||
## 3. Use Editable Install (Isolated Environment)
|
||||
|
||||
Create an isolated environment using `uv` so dependencies resolve exactly like end users get them:
|
||||
|
||||
```bash
|
||||
# Create & activate virtual env (uv auto-manages .venv)
|
||||
uv venv
|
||||
source .venv/bin/activate # or on Windows PowerShell: .venv\Scripts\Activate.ps1
|
||||
|
||||
# Install project in editable mode
|
||||
uv pip install -e .
|
||||
|
||||
# Now 'specify' entrypoint is available
|
||||
specify --help
|
||||
```
|
||||
|
||||
Re-running after code edits requires no reinstall because of editable mode.
|
||||
|
||||
## 4. Invoke with uvx Directly From Git (Current Branch)
|
||||
|
||||
`uvx` can run from a local path (or a Git ref) to simulate user flows:
|
||||
|
||||
```bash
|
||||
uvx --from . specify init demo-uvx --ai copilot --ignore-agent-tools --script sh
|
||||
```
|
||||
|
||||
You can also point uvx at a specific branch without merging:
|
||||
|
||||
```bash
|
||||
# Push your working branch first
|
||||
git push origin your-feature-branch
|
||||
uvx --from git+https://github.com/github/spec-kit.git@your-feature-branch specify init demo-branch-test --script ps
|
||||
```
|
||||
|
||||
### 4a. Absolute Path uvx (Run From Anywhere)
|
||||
|
||||
If you're in another directory, use an absolute path instead of `.`:
|
||||
|
||||
```bash
|
||||
uvx --from /mnt/c/GitHub/spec-kit specify --help
|
||||
uvx --from /mnt/c/GitHub/spec-kit specify init demo-anywhere --ai copilot --ignore-agent-tools --script sh
|
||||
```
|
||||
|
||||
Set an environment variable for convenience:
|
||||
```bash
|
||||
export SPEC_KIT_SRC=/mnt/c/GitHub/spec-kit
|
||||
uvx --from "$SPEC_KIT_SRC" specify init demo-env --ai copilot --ignore-agent-tools --script ps
|
||||
```
|
||||
|
||||
(Optional) Define a shell function:
|
||||
```bash
|
||||
specify-dev() { uvx --from /mnt/c/GitHub/spec-kit specify "$@"; }
|
||||
# Then
|
||||
specify-dev --help
|
||||
```
|
||||
|
||||
## 5. Testing Script Permission Logic
|
||||
|
||||
After running an `init`, check that shell scripts are executable on POSIX systems:
|
||||
|
||||
```bash
|
||||
ls -l scripts | grep .sh
|
||||
# Expect owner execute bit (e.g. -rwxr-xr-x)
|
||||
```
|
||||
On Windows you will instead use the `.ps1` scripts (no chmod needed).
|
||||
|
||||
## 6. Run Lint / Basic Checks (Add Your Own)
|
||||
|
||||
Currently no enforced lint config is bundled, but you can quickly sanity check importability:
|
||||
```bash
|
||||
python -c "import specify_cli; print('Import OK')"
|
||||
```
|
||||
|
||||
## 7. Build a Wheel Locally (Optional)
|
||||
|
||||
Validate packaging before publishing:
|
||||
|
||||
```bash
|
||||
uv build
|
||||
ls dist/
|
||||
```
|
||||
Install the built artifact into a fresh throwaway environment if needed.
|
||||
|
||||
## 8. Using a Temporary Workspace
|
||||
|
||||
When testing `init --here` in a dirty directory, create a temp workspace:
|
||||
|
||||
```bash
|
||||
mkdir /tmp/spec-test && cd /tmp/spec-test
|
||||
python -m src.specify_cli init --here --ai claude --ignore-agent-tools --script sh # if repo copied here
|
||||
```
|
||||
Or copy only the modified CLI portion if you want a lighter sandbox.
|
||||
|
||||
## 9. Debug Network / TLS Skips
|
||||
|
||||
If you need to bypass TLS validation while experimenting:
|
||||
|
||||
```bash
|
||||
specify check --skip-tls
|
||||
specify init demo --skip-tls --ai gemini --ignore-agent-tools --script ps
|
||||
```
|
||||
(Use only for local experimentation.)
|
||||
|
||||
## 10. Rapid Edit Loop Summary
|
||||
|
||||
| Action | Command |
|
||||
|--------|---------|
|
||||
| Run CLI directly | `python -m src.specify_cli --help` |
|
||||
| Editable install | `uv pip install -e .` then `specify ...` |
|
||||
| Local uvx run (repo root) | `uvx --from . specify ...` |
|
||||
| Local uvx run (abs path) | `uvx --from /mnt/c/GitHub/spec-kit specify ...` |
|
||||
| Git branch uvx | `uvx --from git+URL@branch specify ...` |
|
||||
| Build wheel | `uv build` |
|
||||
|
||||
## 11. Cleaning Up
|
||||
|
||||
Remove build artifacts / virtual env quickly:
|
||||
```bash
|
||||
rm -rf .venv dist build *.egg-info
|
||||
```
|
||||
|
||||
## 12. Common Issues
|
||||
|
||||
| Symptom | Fix |
|
||||
|---------|-----|
|
||||
| `ModuleNotFoundError: typer` | Run `uv pip install -e .` |
|
||||
| Scripts not executable (Linux) | Re-run init or `chmod +x scripts/*.sh` |
|
||||
| Git step skipped | You passed `--no-git` or Git not installed |
|
||||
| Wrong script type downloaded | Pass `--script sh` or `--script ps` explicitly |
|
||||
| TLS errors on corporate network | Try `--skip-tls` (not for production) |
|
||||
|
||||
## 13. Next Steps
|
||||
|
||||
- Update docs and run through Quick Start using your modified CLI
|
||||
- Open a PR when satisfied
|
||||
- (Optional) Tag a release once changes land in `main`
|
||||
|
||||
122
docs/quickstart.md
Normal file
122
docs/quickstart.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Quick Start Guide
|
||||
|
||||
This guide will help you get started with Spec-Driven Development using Spec Kit.
|
||||
|
||||
> NEW: All automation scripts now provide both Bash (`.sh`) and PowerShell (`.ps1`) variants. The `specify` CLI auto-selects based on OS unless you pass `--script sh|ps`.
|
||||
|
||||
## The 4-Step Process
|
||||
|
||||
### 1. Install Specify
|
||||
|
||||
Initialize your project depending on the coding agent you're using:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
```
|
||||
|
||||
Pick script type explicitly (optional):
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME> --script ps # Force PowerShell
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME> --script sh # Force POSIX shell
|
||||
```
|
||||
|
||||
### 2. Create the Spec
|
||||
|
||||
Use the `/specify` command to describe what you want to build. Focus on the **what** and **why**, not the tech stack.
|
||||
|
||||
```bash
|
||||
/specify Build an application that can help me organize my photos in separate photo albums. Albums are grouped by date and can be re-organized by dragging and dropping on the main page. Albums are never in other nested albums. Within each album, photos are previewed in a tile-like interface.
|
||||
```
|
||||
|
||||
### 3. Create a Technical Implementation Plan
|
||||
|
||||
Use the `/plan` command to provide your tech stack and architecture choices.
|
||||
|
||||
```bash
|
||||
/plan The application uses Vite with minimal number of libraries. Use vanilla HTML, CSS, and JavaScript as much as possible. Images are not uploaded anywhere and metadata is stored in a local SQLite database.
|
||||
```
|
||||
|
||||
### 4. Break Down and Implement
|
||||
|
||||
Use `/tasks` to create an actionable task list, then ask your agent to implement the feature.
|
||||
|
||||
## Detailed Example: Building Taskify
|
||||
|
||||
Here's a complete example of building a team productivity platform:
|
||||
|
||||
### Step 1: Define Requirements with `/specify`
|
||||
|
||||
```text
|
||||
Develop Taskify, a team productivity platform. It should allow users to create projects, add team members,
|
||||
assign tasks, comment and move tasks between boards in Kanban style. In this initial phase for this feature,
|
||||
let's call it "Create Taskify," let's have multiple users but the users will be declared ahead of time, predefined.
|
||||
I want five users in two different categories, one product manager and four engineers. Let's create three
|
||||
different sample projects. Let's have the standard Kanban columns for the status of each task, such as "To Do,"
|
||||
"In Progress," "In Review," and "Done." There will be no login for this application as this is just the very
|
||||
first testing thing to ensure that our basic features are set up. For each task in the UI for a task card,
|
||||
you should be able to change the current status of the task between the different columns in the Kanban work board.
|
||||
You should be able to leave an unlimited number of comments for a particular card. You should be able to, from that task
|
||||
card, assign one of the valid users. When you first launch Taskify, it's going to give you a list of the five users to pick
|
||||
from. There will be no password required. When you click on a user, you go into the main view, which displays the list of
|
||||
projects. When you click on a project, you open the Kanban board for that project. You're going to see the columns.
|
||||
You'll be able to drag and drop cards back and forth between different columns. You will see any cards that are
|
||||
assigned to you, the currently logged in user, in a different color from all the other ones, so you can quickly
|
||||
see yours. You can edit any comments that you make, but you can't edit comments that other people made. You can
|
||||
delete any comments that you made, but you can't delete comments anybody else made.
|
||||
```
|
||||
|
||||
### Step 2: Refine the Specification
|
||||
|
||||
After the initial specification is created, clarify any missing requirements:
|
||||
|
||||
```text
|
||||
For each sample project or project that you create there should be a variable number of tasks between 5 and 15
|
||||
tasks for each one randomly distributed into different states of completion. Make sure that there's at least
|
||||
one task in each stage of completion.
|
||||
```
|
||||
|
||||
Also validate the specification checklist:
|
||||
|
||||
```text
|
||||
Read the review and acceptance checklist, and check off each item in the checklist if the feature spec meets the criteria. Leave it empty if it does not.
|
||||
```
|
||||
|
||||
### Step 3: Generate Technical Plan with `/plan`
|
||||
|
||||
Be specific about your tech stack and technical requirements:
|
||||
|
||||
```text
|
||||
We are going to generate this using .NET Aspire, using Postgres as the database. The frontend should use
|
||||
Blazor server with drag-and-drop task boards, real-time updates. There should be a REST API created with a projects API,
|
||||
tasks API, and a notifications API.
|
||||
```
|
||||
|
||||
### Step 4: Validate and Implement
|
||||
|
||||
Have your AI agent audit the implementation plan:
|
||||
|
||||
```text
|
||||
Now I want you to go and audit the implementation plan and the implementation detail files.
|
||||
Read through it with an eye on determining whether or not there is a sequence of tasks that you need
|
||||
to be doing that are obvious from reading this. Because I don't know if there's enough here.
|
||||
```
|
||||
|
||||
Finally, implement the solution:
|
||||
|
||||
```text
|
||||
implement specs/002-create-taskify/plan.md
|
||||
```
|
||||
|
||||
## Key Principles
|
||||
|
||||
- **Be explicit** about what you're building and why
|
||||
- **Don't focus on tech stack** during specification phase
|
||||
- **Iterate and refine** your specifications before implementation
|
||||
- **Validate** the plan before coding begins
|
||||
- **Let the AI agent handle** the implementation details
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Read the complete methodology for in-depth guidance
|
||||
- Check out more examples in the repository
|
||||
- Explore the source code on GitHub
|
||||
17
docs/toc.yml
Normal file
17
docs/toc.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# Home page
|
||||
- name: Home
|
||||
href: index.md
|
||||
|
||||
# Getting started section
|
||||
- name: Getting Started
|
||||
items:
|
||||
- name: Installation
|
||||
href: installation.md
|
||||
- name: Quick Start
|
||||
href: quickstart.md
|
||||
|
||||
# Development workflows
|
||||
- name: Development
|
||||
items:
|
||||
- name: Local Development
|
||||
href: local-development.md
|
||||
BIN
media/spec-kit-video-header.jpg
Normal file
BIN
media/spec-kit-video-header.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 102 KiB |
@@ -1,22 +1,44 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.0.2"
|
||||
version = "0.0.4"
|
||||
description = "Setup tool for Specify spec-driven development projects"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
# Existing spec-kit dependencies
|
||||
"typer",
|
||||
"rich",
|
||||
"httpx",
|
||||
"rich>=13.0.0",
|
||||
"httpx[socks]",
|
||||
"platformdirs",
|
||||
"readchar",
|
||||
"truststore>=0.10.4",
|
||||
# APM dependencies (from awd-cli, excluding runtime/embargo items)
|
||||
"click>=8.0.0",
|
||||
"colorama>=0.4.6",
|
||||
"pyyaml>=6.0.0",
|
||||
"requests>=2.28.0",
|
||||
"python-frontmatter>=1.0.0",
|
||||
"tomli>=1.2.0; python_version<'3.11'",
|
||||
"toml>=0.10.2",
|
||||
"rich-click>=1.7.0",
|
||||
"watchdog>=3.0.0",
|
||||
"GitPython>=3.1.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
specify = "specify_cli:main"
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=7.0.0",
|
||||
"pytest-cov>=4.0.0",
|
||||
"black>=23.0.0",
|
||||
"isort>=5.0.0",
|
||||
"mypy>=1.0.0",
|
||||
]
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/specify_cli"]
|
||||
packages = ["src/specify_cli", "src/apm_cli"]
|
||||
|
||||
15
scripts/bash/check-task-prerequisites.sh
Normal file
15
scripts/bash/check-task-prerequisites.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
JSON_MODE=false
|
||||
for arg in "$@"; do case "$arg" in --json) JSON_MODE=true ;; --help|-h) echo "Usage: $0 [--json]"; exit 0 ;; esac; done
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
eval $(get_feature_paths)
|
||||
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||
if [[ ! -d "$FEATURE_DIR" ]]; then echo "ERROR: Feature directory not found: $FEATURE_DIR"; echo "Run /specify first."; exit 1; fi
|
||||
if [[ ! -f "$IMPL_PLAN" ]]; then echo "ERROR: plan.md not found in $FEATURE_DIR"; echo "Run /plan first."; exit 1; fi
|
||||
if $JSON_MODE; then
|
||||
docs=(); [[ -f "$RESEARCH" ]] && docs+=("research.md"); [[ -f "$DATA_MODEL" ]] && docs+=("data-model.md"); ([[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]) && docs+=("contracts/"); [[ -f "$QUICKSTART" ]] && docs+=("quickstart.md");
|
||||
json_docs=$(printf '"%s",' "${docs[@]}"); json_docs="[${json_docs%,}]"; printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
||||
else
|
||||
echo "FEATURE_DIR:$FEATURE_DIR"; echo "AVAILABLE_DOCS:"; check_file "$RESEARCH" "research.md"; check_file "$DATA_MODEL" "data-model.md"; check_dir "$CONTRACTS_DIR" "contracts/"; check_file "$QUICKSTART" "quickstart.md"; fi
|
||||
37
scripts/bash/common.sh
Normal file
37
scripts/bash/common.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
# (Moved to scripts/bash/) Common functions and variables for all scripts
|
||||
|
||||
get_repo_root() { git rev-parse --show-toplevel; }
|
||||
get_current_branch() { git rev-parse --abbrev-ref HEAD; }
|
||||
|
||||
check_feature_branch() {
|
||||
local branch="$1"
|
||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
||||
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
||||
echo "Feature branches should be named like: 001-feature-name" >&2
|
||||
return 1
|
||||
fi; return 0
|
||||
}
|
||||
|
||||
get_feature_dir() { echo "$1/specs/$2"; }
|
||||
|
||||
get_feature_paths() {
|
||||
local repo_root=$(get_repo_root)
|
||||
local current_branch=$(get_current_branch)
|
||||
local feature_dir=$(get_feature_dir "$repo_root" "$current_branch")
|
||||
cat <<EOF
|
||||
REPO_ROOT='$repo_root'
|
||||
CURRENT_BRANCH='$current_branch'
|
||||
FEATURE_DIR='$feature_dir'
|
||||
FEATURE_SPEC='$feature_dir/spec.md'
|
||||
IMPL_PLAN='$feature_dir/plan.md'
|
||||
TASKS='$feature_dir/tasks.md'
|
||||
RESEARCH='$feature_dir/research.md'
|
||||
DATA_MODEL='$feature_dir/data-model.md'
|
||||
QUICKSTART='$feature_dir/quickstart.md'
|
||||
CONTRACTS_DIR='$feature_dir/contracts'
|
||||
EOF
|
||||
}
|
||||
|
||||
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||
check_dir() { [[ -d "$1" && -n $(ls -A "$1" 2>/dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||
58
scripts/bash/create-new-feature.sh
Normal file
58
scripts/bash/create-new-feature.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
# (Moved to scripts/bash/) Create a new feature with branch, directory structure, and template
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
ARGS=()
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--json) JSON_MODE=true ;;
|
||||
--help|-h) echo "Usage: $0 [--json] <feature_description>"; exit 0 ;;
|
||||
*) ARGS+=("$arg") ;;
|
||||
esac
|
||||
done
|
||||
|
||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Usage: $0 [--json] <feature_description>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
SPECS_DIR="$REPO_ROOT/specs"
|
||||
mkdir -p "$SPECS_DIR"
|
||||
|
||||
HIGHEST=0
|
||||
if [ -d "$SPECS_DIR" ]; then
|
||||
for dir in "$SPECS_DIR"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$HIGHEST" ]; then HIGHEST=$number; fi
|
||||
done
|
||||
fi
|
||||
|
||||
NEXT=$((HIGHEST + 1))
|
||||
FEATURE_NUM=$(printf "%03d" "$NEXT")
|
||||
|
||||
BRANCH_NAME=$(echo "$FEATURE_DESCRIPTION" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//')
|
||||
WORDS=$(echo "$BRANCH_NAME" | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//')
|
||||
BRANCH_NAME="${FEATURE_NUM}-${WORDS}"
|
||||
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
TEMPLATE="$REPO_ROOT/templates/spec-template.md"
|
||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
||||
|
||||
if $JSON_MODE; then
|
||||
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
||||
else
|
||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||
echo "SPEC_FILE: $SPEC_FILE"
|
||||
echo "FEATURE_NUM: $FEATURE_NUM"
|
||||
fi
|
||||
7
scripts/bash/get-feature-paths.sh
Normal file
7
scripts/bash/get-feature-paths.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
eval $(get_feature_paths)
|
||||
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||
echo "REPO_ROOT: $REPO_ROOT"; echo "BRANCH: $CURRENT_BRANCH"; echo "FEATURE_DIR: $FEATURE_DIR"; echo "FEATURE_SPEC: $FEATURE_SPEC"; echo "IMPL_PLAN: $IMPL_PLAN"; echo "TASKS: $TASKS"
|
||||
17
scripts/bash/setup-plan.sh
Normal file
17
scripts/bash/setup-plan.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
JSON_MODE=false
|
||||
for arg in "$@"; do case "$arg" in --json) JSON_MODE=true ;; --help|-h) echo "Usage: $0 [--json]"; exit 0 ;; esac; done
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
eval $(get_feature_paths)
|
||||
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md"
|
||||
[[ -f "$TEMPLATE" ]] && cp "$TEMPLATE" "$IMPL_PLAN"
|
||||
if $JSON_MODE; then
|
||||
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s"}\n' \
|
||||
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH"
|
||||
else
|
||||
echo "FEATURE_SPEC: $FEATURE_SPEC"; echo "IMPL_PLAN: $IMPL_PLAN"; echo "SPECS_DIR: $FEATURE_DIR"; echo "BRANCH: $CURRENT_BRANCH"
|
||||
fi
|
||||
57
scripts/bash/update-agent-context.sh
Normal file
57
scripts/bash/update-agent-context.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||
FEATURE_DIR="$REPO_ROOT/specs/$CURRENT_BRANCH"
|
||||
NEW_PLAN="$FEATURE_DIR/plan.md"
|
||||
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"; GEMINI_FILE="$REPO_ROOT/GEMINI.md"; COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
||||
AGENT_TYPE="$1"
|
||||
[ -f "$NEW_PLAN" ] || { echo "ERROR: No plan.md found at $NEW_PLAN"; exit 1; }
|
||||
echo "=== Updating agent context files for feature $CURRENT_BRANCH ==="
|
||||
NEW_LANG=$(grep "^**Language/Version**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Language\/Version**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_FRAMEWORK=$(grep "^**Primary Dependencies**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Primary Dependencies**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_DB=$(grep "^**Storage**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Storage**: //' | grep -v "N/A" | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_PROJECT_TYPE=$(grep "^**Project Type**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Project Type**: //' || echo "")
|
||||
update_agent_file() { local target_file="$1" agent_name="$2"; echo "Updating $agent_name context file: $target_file"; local temp_file=$(mktemp); if [ ! -f "$target_file" ]; then
|
||||
echo "Creating new $agent_name context file..."; if [ -f "$REPO_ROOT/templates/agent-file-template.md" ]; then cp "$REPO_ROOT/templates/agent-file-template.md" "$temp_file"; else echo "ERROR: Template not found"; return 1; fi;
|
||||
sed -i.bak "s/\[PROJECT NAME\]/$(basename $REPO_ROOT)/" "$temp_file"; sed -i.bak "s/\[DATE\]/$(date +%Y-%m-%d)/" "$temp_file"; sed -i.bak "s/\[EXTRACTED FROM ALL PLAN.MD FILES\]/- $NEW_LANG + $NEW_FRAMEWORK ($CURRENT_BRANCH)/" "$temp_file";
|
||||
if [[ "$NEW_PROJECT_TYPE" == *"web"* ]]; then sed -i.bak "s|\[ACTUAL STRUCTURE FROM PLANS\]|backend/\nfrontend/\ntests/|" "$temp_file"; else sed -i.bak "s|\[ACTUAL STRUCTURE FROM PLANS\]|src/\ntests/|" "$temp_file"; fi;
|
||||
if [[ "$NEW_LANG" == *"Python"* ]]; then COMMANDS="cd src && pytest && ruff check ."; elif [[ "$NEW_LANG" == *"Rust"* ]]; then COMMANDS="cargo test && cargo clippy"; elif [[ "$NEW_LANG" == *"JavaScript"* ]] || [[ "$NEW_LANG" == *"TypeScript"* ]]; then COMMANDS="npm test && npm run lint"; else COMMANDS="# Add commands for $NEW_LANG"; fi; sed -i.bak "s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$COMMANDS|" "$temp_file";
|
||||
sed -i.bak "s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$NEW_LANG: Follow standard conventions|" "$temp_file"; sed -i.bak "s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|- $CURRENT_BRANCH: Added $NEW_LANG + $NEW_FRAMEWORK|" "$temp_file"; rm "$temp_file.bak";
|
||||
else
|
||||
echo "Updating existing $agent_name context file..."; manual_start=$(grep -n "<!-- MANUAL ADDITIONS START -->" "$target_file" | cut -d: -f1); manual_end=$(grep -n "<!-- MANUAL ADDITIONS END -->" "$target_file" | cut -d: -f1); if [ -n "$manual_start" ] && [ -n "$manual_end" ]; then sed -n "${manual_start},${manual_end}p" "$target_file" > /tmp/manual_additions.txt; fi;
|
||||
python3 - "$target_file" <<'EOF'
|
||||
import re,sys,datetime
|
||||
target=sys.argv[1]
|
||||
with open(target) as f: content=f.read()
|
||||
NEW_LANG="'$NEW_LANG'";NEW_FRAMEWORK="'$NEW_FRAMEWORK'";CURRENT_BRANCH="'$CURRENT_BRANCH'";NEW_DB="'$NEW_DB'";NEW_PROJECT_TYPE="'$NEW_PROJECT_TYPE'"
|
||||
# Tech section
|
||||
m=re.search(r'## Active Technologies\n(.*?)\n\n',content, re.DOTALL)
|
||||
if m:
|
||||
existing=m.group(1)
|
||||
additions=[]
|
||||
if '$NEW_LANG' and '$NEW_LANG' not in existing: additions.append(f"- $NEW_LANG + $NEW_FRAMEWORK ($CURRENT_BRANCH)")
|
||||
if '$NEW_DB' and '$NEW_DB' not in existing and '$NEW_DB'!='N/A': additions.append(f"- $NEW_DB ($CURRENT_BRANCH)")
|
||||
if additions:
|
||||
new_block=existing+"\n"+"\n".join(additions)
|
||||
content=content.replace(m.group(0),f"## Active Technologies\n{new_block}\n\n")
|
||||
# Recent changes
|
||||
m2=re.search(r'## Recent Changes\n(.*?)(\n\n|$)',content, re.DOTALL)
|
||||
if m2:
|
||||
lines=[l for l in m2.group(1).strip().split('\n') if l]
|
||||
lines.insert(0,f"- $CURRENT_BRANCH: Added $NEW_LANG + $NEW_FRAMEWORK")
|
||||
lines=lines[:3]
|
||||
content=re.sub(r'## Recent Changes\n.*?(\n\n|$)', '## Recent Changes\n'+"\n".join(lines)+'\n\n', content, flags=re.DOTALL)
|
||||
content=re.sub(r'Last updated: \d{4}-\d{2}-\d{2}', 'Last updated: '+datetime.datetime.now().strftime('%Y-%m-%d'), content)
|
||||
open(target+'.tmp','w').write(content)
|
||||
EOF
|
||||
mv "$target_file.tmp" "$target_file"; if [ -f /tmp/manual_additions.txt ]; then sed -i.bak '/<!-- MANUAL ADDITIONS START -->/,/<!-- MANUAL ADDITIONS END -->/d' "$target_file"; cat /tmp/manual_additions.txt >> "$target_file"; rm /tmp/manual_additions.txt "$target_file.bak"; fi;
|
||||
fi; mv "$temp_file" "$target_file" 2>/dev/null || true; echo "✅ $agent_name context file updated successfully"; }
|
||||
case "$AGENT_TYPE" in
|
||||
claude) update_agent_file "$CLAUDE_FILE" "Claude Code" ;;
|
||||
gemini) update_agent_file "$GEMINI_FILE" "Gemini CLI" ;;
|
||||
copilot) update_agent_file "$COPILOT_FILE" "GitHub Copilot" ;;
|
||||
"") [ -f "$CLAUDE_FILE" ] && update_agent_file "$CLAUDE_FILE" "Claude Code"; [ -f "$GEMINI_FILE" ] && update_agent_file "$GEMINI_FILE" "Gemini CLI"; [ -f "$COPILOT_FILE" ] && update_agent_file "$COPILOT_FILE" "GitHub Copilot"; if [ ! -f "$CLAUDE_FILE" ] && [ ! -f "$GEMINI_FILE" ] && [ ! -f "$COPILOT_FILE" ]; then update_agent_file "$CLAUDE_FILE" "Claude Code"; fi ;;
|
||||
*) echo "ERROR: Unknown agent type '$AGENT_TYPE'"; exit 1 ;;
|
||||
esac
|
||||
echo; echo "Summary of changes:"; [ -n "$NEW_LANG" ] && echo "- Added language: $NEW_LANG"; [ -n "$NEW_FRAMEWORK" ] && echo "- Added framework: $NEW_FRAMEWORK"; [ -n "$NEW_DB" ] && [ "$NEW_DB" != "N/A" ] && echo "- Added database: $NEW_DB"; echo; echo "Usage: $0 [claude|gemini|copilot]"
|
||||
@@ -1,62 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check that implementation plan exists and find optional design documents
|
||||
# Usage: ./check-task-prerequisites.sh [--json]
|
||||
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--json) JSON_MODE=true ;;
|
||||
--help|-h) echo "Usage: $0 [--json]"; exit 0 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
|
||||
# Get all paths
|
||||
eval $(get_feature_paths)
|
||||
|
||||
# Check if on feature branch
|
||||
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||
|
||||
# Check if feature directory exists
|
||||
if [[ ! -d "$FEATURE_DIR" ]]; then
|
||||
echo "ERROR: Feature directory not found: $FEATURE_DIR"
|
||||
echo "Run /specify first to create the feature structure."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for implementation plan (required)
|
||||
if [[ ! -f "$IMPL_PLAN" ]]; then
|
||||
echo "ERROR: plan.md not found in $FEATURE_DIR"
|
||||
echo "Run /plan first to create the plan."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if $JSON_MODE; then
|
||||
# Build JSON array of available docs that actually exist
|
||||
docs=()
|
||||
[[ -f "$RESEARCH" ]] && docs+=("research.md")
|
||||
[[ -f "$DATA_MODEL" ]] && docs+=("data-model.md")
|
||||
([[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]) && docs+=("contracts/")
|
||||
[[ -f "$QUICKSTART" ]] && docs+=("quickstart.md")
|
||||
# join array into JSON
|
||||
json_docs=$(printf '"%s",' "${docs[@]}")
|
||||
json_docs="[${json_docs%,}]"
|
||||
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
||||
else
|
||||
# List available design documents (optional)
|
||||
echo "FEATURE_DIR:$FEATURE_DIR"
|
||||
echo "AVAILABLE_DOCS:"
|
||||
|
||||
# Use common check functions
|
||||
check_file "$RESEARCH" "research.md"
|
||||
check_file "$DATA_MODEL" "data-model.md"
|
||||
check_dir "$CONTRACTS_DIR" "contracts/"
|
||||
check_file "$QUICKSTART" "quickstart.md"
|
||||
fi
|
||||
|
||||
# Always succeed - task generation should work with whatever docs are available
|
||||
@@ -1,77 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Common functions and variables for all scripts
|
||||
|
||||
# Get repository root
|
||||
get_repo_root() {
|
||||
git rev-parse --show-toplevel
|
||||
}
|
||||
|
||||
# Get current branch
|
||||
get_current_branch() {
|
||||
git rev-parse --abbrev-ref HEAD
|
||||
}
|
||||
|
||||
# Check if current branch is a feature branch
|
||||
# Returns 0 if valid, 1 if not
|
||||
check_feature_branch() {
|
||||
local branch="$1"
|
||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
||||
echo "ERROR: Not on a feature branch. Current branch: $branch"
|
||||
echo "Feature branches should be named like: 001-feature-name"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Get feature directory path
|
||||
get_feature_dir() {
|
||||
local repo_root="$1"
|
||||
local branch="$2"
|
||||
echo "$repo_root/specs/$branch"
|
||||
}
|
||||
|
||||
# Get all standard paths for a feature
|
||||
# Usage: eval $(get_feature_paths)
|
||||
# Sets: REPO_ROOT, CURRENT_BRANCH, FEATURE_DIR, FEATURE_SPEC, IMPL_PLAN, TASKS
|
||||
get_feature_paths() {
|
||||
local repo_root=$(get_repo_root)
|
||||
local current_branch=$(get_current_branch)
|
||||
local feature_dir=$(get_feature_dir "$repo_root" "$current_branch")
|
||||
|
||||
echo "REPO_ROOT='$repo_root'"
|
||||
echo "CURRENT_BRANCH='$current_branch'"
|
||||
echo "FEATURE_DIR='$feature_dir'"
|
||||
echo "FEATURE_SPEC='$feature_dir/spec.md'"
|
||||
echo "IMPL_PLAN='$feature_dir/plan.md'"
|
||||
echo "TASKS='$feature_dir/tasks.md'"
|
||||
echo "RESEARCH='$feature_dir/research.md'"
|
||||
echo "DATA_MODEL='$feature_dir/data-model.md'"
|
||||
echo "QUICKSTART='$feature_dir/quickstart.md'"
|
||||
echo "CONTRACTS_DIR='$feature_dir/contracts'"
|
||||
}
|
||||
|
||||
# Check if a file exists and report
|
||||
check_file() {
|
||||
local file="$1"
|
||||
local description="$2"
|
||||
if [[ -f "$file" ]]; then
|
||||
echo " ✓ $description"
|
||||
return 0
|
||||
else
|
||||
echo " ✗ $description"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if a directory exists and has files
|
||||
check_dir() {
|
||||
local dir="$1"
|
||||
local description="$2"
|
||||
if [[ -d "$dir" ]] && [[ -n "$(ls -A "$dir" 2>/dev/null)" ]]; then
|
||||
echo " ✓ $description"
|
||||
return 0
|
||||
else
|
||||
echo " ✗ $description"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Create a new feature with branch, directory structure, and template
|
||||
# Usage: ./create-new-feature.sh "feature description"
|
||||
# ./create-new-feature.sh --json "feature description"
|
||||
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
|
||||
# Collect non-flag args
|
||||
ARGS=()
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] <feature_description>"; exit 0 ;;
|
||||
*)
|
||||
ARGS+=("$arg") ;;
|
||||
esac
|
||||
done
|
||||
|
||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Usage: $0 [--json] <feature_description>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get repository root
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
SPECS_DIR="$REPO_ROOT/specs"
|
||||
|
||||
# Create specs directory if it doesn't exist
|
||||
mkdir -p "$SPECS_DIR"
|
||||
|
||||
# Find the highest numbered feature directory
|
||||
HIGHEST=0
|
||||
if [ -d "$SPECS_DIR" ]; then
|
||||
for dir in "$SPECS_DIR"/*; do
|
||||
if [ -d "$dir" ]; then
|
||||
dirname=$(basename "$dir")
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$HIGHEST" ]; then
|
||||
HIGHEST=$number
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Generate next feature number with zero padding
|
||||
NEXT=$((HIGHEST + 1))
|
||||
FEATURE_NUM=$(printf "%03d" "$NEXT")
|
||||
|
||||
# Create branch name from description
|
||||
BRANCH_NAME=$(echo "$FEATURE_DESCRIPTION" | \
|
||||
tr '[:upper:]' '[:lower:]' | \
|
||||
sed 's/[^a-z0-9]/-/g' | \
|
||||
sed 's/-\+/-/g' | \
|
||||
sed 's/^-//' | \
|
||||
sed 's/-$//')
|
||||
|
||||
# Extract 2-3 meaningful words
|
||||
WORDS=$(echo "$BRANCH_NAME" | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//')
|
||||
|
||||
# Final branch name
|
||||
BRANCH_NAME="${FEATURE_NUM}-${WORDS}"
|
||||
|
||||
# Create and switch to new branch
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
|
||||
# Create feature directory
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
# Copy template if it exists
|
||||
TEMPLATE="$REPO_ROOT/templates/spec-template.md"
|
||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||
|
||||
if [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Template not found at $TEMPLATE" >&2
|
||||
touch "$SPEC_FILE"
|
||||
fi
|
||||
|
||||
if $JSON_MODE; then
|
||||
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' \
|
||||
"$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
||||
else
|
||||
# Output results for the LLM to use (legacy key: value format)
|
||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||
echo "SPEC_FILE: $SPEC_FILE"
|
||||
echo "FEATURE_NUM: $FEATURE_NUM"
|
||||
fi
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Get paths for current feature branch without creating anything
|
||||
# Used by commands that need to find existing feature files
|
||||
|
||||
set -e
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
|
||||
# Get all paths
|
||||
eval $(get_feature_paths)
|
||||
|
||||
# Check if on feature branch
|
||||
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||
|
||||
# Output paths (don't create anything)
|
||||
echo "REPO_ROOT: $REPO_ROOT"
|
||||
echo "BRANCH: $CURRENT_BRANCH"
|
||||
echo "FEATURE_DIR: $FEATURE_DIR"
|
||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
||||
echo "TASKS: $TASKS"
|
||||
35
scripts/powershell/check-task-prerequisites.ps1
Normal file
35
scripts/powershell/check-task-prerequisites.ps1
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env pwsh
|
||||
[CmdletBinding()]
|
||||
param([switch]$Json)
|
||||
$ErrorActionPreference = 'Stop'
|
||||
. "$PSScriptRoot/common.ps1"
|
||||
|
||||
$paths = Get-FeaturePathsEnv
|
||||
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH)) { exit 1 }
|
||||
|
||||
if (-not (Test-Path $paths.FEATURE_DIR -PathType Container)) {
|
||||
Write-Output "ERROR: Feature directory not found: $($paths.FEATURE_DIR)"
|
||||
Write-Output "Run /specify first to create the feature structure."
|
||||
exit 1
|
||||
}
|
||||
if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) {
|
||||
Write-Output "ERROR: plan.md not found in $($paths.FEATURE_DIR)"
|
||||
Write-Output "Run /plan first to create the plan."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if ($Json) {
|
||||
$docs = @()
|
||||
if (Test-Path $paths.RESEARCH) { $docs += 'research.md' }
|
||||
if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' }
|
||||
if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) { $docs += 'contracts/' }
|
||||
if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' }
|
||||
[PSCustomObject]@{ FEATURE_DIR=$paths.FEATURE_DIR; AVAILABLE_DOCS=$docs } | ConvertTo-Json -Compress
|
||||
} else {
|
||||
Write-Output "FEATURE_DIR:$($paths.FEATURE_DIR)"
|
||||
Write-Output "AVAILABLE_DOCS:"
|
||||
Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null
|
||||
Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null
|
||||
Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null
|
||||
Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null
|
||||
}
|
||||
65
scripts/powershell/common.ps1
Normal file
65
scripts/powershell/common.ps1
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env pwsh
|
||||
# Common PowerShell functions analogous to common.sh (moved to powershell/)
|
||||
|
||||
function Get-RepoRoot {
|
||||
git rev-parse --show-toplevel
|
||||
}
|
||||
|
||||
function Get-CurrentBranch {
|
||||
git rev-parse --abbrev-ref HEAD
|
||||
}
|
||||
|
||||
function Test-FeatureBranch {
|
||||
param([string]$Branch)
|
||||
if ($Branch -notmatch '^[0-9]{3}-') {
|
||||
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
||||
Write-Output "Feature branches should be named like: 001-feature-name"
|
||||
return $false
|
||||
}
|
||||
return $true
|
||||
}
|
||||
|
||||
function Get-FeatureDir {
|
||||
param([string]$RepoRoot, [string]$Branch)
|
||||
Join-Path $RepoRoot "specs/$Branch"
|
||||
}
|
||||
|
||||
function Get-FeaturePathsEnv {
|
||||
$repoRoot = Get-RepoRoot
|
||||
$currentBranch = Get-CurrentBranch
|
||||
$featureDir = Get-FeatureDir -RepoRoot $repoRoot -Branch $currentBranch
|
||||
[PSCustomObject]@{
|
||||
REPO_ROOT = $repoRoot
|
||||
CURRENT_BRANCH = $currentBranch
|
||||
FEATURE_DIR = $featureDir
|
||||
FEATURE_SPEC = Join-Path $featureDir 'spec.md'
|
||||
IMPL_PLAN = Join-Path $featureDir 'plan.md'
|
||||
TASKS = Join-Path $featureDir 'tasks.md'
|
||||
RESEARCH = Join-Path $featureDir 'research.md'
|
||||
DATA_MODEL = Join-Path $featureDir 'data-model.md'
|
||||
QUICKSTART = Join-Path $featureDir 'quickstart.md'
|
||||
CONTRACTS_DIR = Join-Path $featureDir 'contracts'
|
||||
}
|
||||
}
|
||||
|
||||
function Test-FileExists {
|
||||
param([string]$Path, [string]$Description)
|
||||
if (Test-Path -Path $Path -PathType Leaf) {
|
||||
Write-Output " ✓ $Description"
|
||||
return $true
|
||||
} else {
|
||||
Write-Output " ✗ $Description"
|
||||
return $false
|
||||
}
|
||||
}
|
||||
|
||||
function Test-DirHasFiles {
|
||||
param([string]$Path, [string]$Description)
|
||||
if ((Test-Path -Path $Path -PathType Container) -and (Get-ChildItem -Path $Path -ErrorAction SilentlyContinue | Where-Object { -not $_.PSIsContainer } | Select-Object -First 1)) {
|
||||
Write-Output " ✓ $Description"
|
||||
return $true
|
||||
} else {
|
||||
Write-Output " ✗ $Description"
|
||||
return $false
|
||||
}
|
||||
}
|
||||
52
scripts/powershell/create-new-feature.ps1
Normal file
52
scripts/powershell/create-new-feature.ps1
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env pwsh
|
||||
# Create a new feature (moved to powershell/)
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[switch]$Json,
|
||||
[Parameter(ValueFromRemainingArguments = $true)]
|
||||
[string[]]$FeatureDescription
|
||||
)
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] <feature description>"; exit 1
|
||||
}
|
||||
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
||||
|
||||
$repoRoot = git rev-parse --show-toplevel
|
||||
$specsDir = Join-Path $repoRoot 'specs'
|
||||
New-Item -ItemType Directory -Path $specsDir -Force | Out-Null
|
||||
|
||||
$highest = 0
|
||||
if (Test-Path $specsDir) {
|
||||
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
|
||||
if ($_.Name -match '^(\d{3})') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
}
|
||||
}
|
||||
}
|
||||
$next = $highest + 1
|
||||
$featureNum = ('{0:000}' -f $next)
|
||||
|
||||
$branchName = $featureDesc.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
|
||||
$words = ($branchName -split '-') | Where-Object { $_ } | Select-Object -First 3
|
||||
$branchName = "$featureNum-$([string]::Join('-', $words))"
|
||||
|
||||
git checkout -b $branchName | Out-Null
|
||||
|
||||
$featureDir = Join-Path $specsDir $branchName
|
||||
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||
|
||||
$template = Join-Path $repoRoot 'templates/spec-template.md'
|
||||
$specFile = Join-Path $featureDir 'spec.md'
|
||||
if (Test-Path $template) { Copy-Item $template $specFile -Force } else { New-Item -ItemType File -Path $specFile | Out-Null }
|
||||
|
||||
if ($Json) {
|
||||
$obj = [PSCustomObject]@{ BRANCH_NAME = $branchName; SPEC_FILE = $specFile; FEATURE_NUM = $featureNum }
|
||||
$obj | ConvertTo-Json -Compress
|
||||
} else {
|
||||
Write-Output "BRANCH_NAME: $branchName"
|
||||
Write-Output "SPEC_FILE: $specFile"
|
||||
Write-Output "FEATURE_NUM: $featureNum"
|
||||
}
|
||||
15
scripts/powershell/get-feature-paths.ps1
Normal file
15
scripts/powershell/get-feature-paths.ps1
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env pwsh
|
||||
param()
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
. "$PSScriptRoot/common.ps1"
|
||||
|
||||
$paths = Get-FeaturePathsEnv
|
||||
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH)) { exit 1 }
|
||||
|
||||
Write-Output "REPO_ROOT: $($paths.REPO_ROOT)"
|
||||
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||
Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)"
|
||||
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
||||
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
||||
Write-Output "TASKS: $($paths.TASKS)"
|
||||
21
scripts/powershell/setup-plan.ps1
Normal file
21
scripts/powershell/setup-plan.ps1
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env pwsh
|
||||
[CmdletBinding()]
|
||||
param([switch]$Json)
|
||||
$ErrorActionPreference = 'Stop'
|
||||
. "$PSScriptRoot/common.ps1"
|
||||
|
||||
$paths = Get-FeaturePathsEnv
|
||||
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH)) { exit 1 }
|
||||
|
||||
New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null
|
||||
$template = Join-Path $paths.REPO_ROOT 'templates/plan-template.md'
|
||||
if (Test-Path $template) { Copy-Item $template $paths.IMPL_PLAN -Force }
|
||||
|
||||
if ($Json) {
|
||||
[PSCustomObject]@{ FEATURE_SPEC=$paths.FEATURE_SPEC; IMPL_PLAN=$paths.IMPL_PLAN; SPECS_DIR=$paths.FEATURE_DIR; BRANCH=$paths.CURRENT_BRANCH } | ConvertTo-Json -Compress
|
||||
} else {
|
||||
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
||||
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
||||
Write-Output "SPECS_DIR: $($paths.FEATURE_DIR)"
|
||||
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||
}
|
||||
91
scripts/powershell/update-agent-context.ps1
Normal file
91
scripts/powershell/update-agent-context.ps1
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env pwsh
|
||||
[CmdletBinding()]
|
||||
param([string]$AgentType)
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$repoRoot = git rev-parse --show-toplevel
|
||||
$currentBranch = git rev-parse --abbrev-ref HEAD
|
||||
$featureDir = Join-Path $repoRoot "specs/$currentBranch"
|
||||
$newPlan = Join-Path $featureDir 'plan.md'
|
||||
if (-not (Test-Path $newPlan)) { Write-Error "ERROR: No plan.md found at $newPlan"; exit 1 }
|
||||
|
||||
$claudeFile = Join-Path $repoRoot 'CLAUDE.md'
|
||||
$geminiFile = Join-Path $repoRoot 'GEMINI.md'
|
||||
$copilotFile = Join-Path $repoRoot '.github/copilot-instructions.md'
|
||||
|
||||
Write-Output "=== Updating agent context files for feature $currentBranch ==="
|
||||
|
||||
function Get-PlanValue($pattern) {
|
||||
if (-not (Test-Path $newPlan)) { return '' }
|
||||
$line = Select-String -Path $newPlan -Pattern $pattern | Select-Object -First 1
|
||||
if ($line) { return ($line.Line -replace "^\*\*$pattern\*\*: ", '') }
|
||||
return ''
|
||||
}
|
||||
|
||||
$newLang = Get-PlanValue 'Language/Version'
|
||||
$newFramework = Get-PlanValue 'Primary Dependencies'
|
||||
$newTesting = Get-PlanValue 'Testing'
|
||||
$newDb = Get-PlanValue 'Storage'
|
||||
$newProjectType = Get-PlanValue 'Project Type'
|
||||
|
||||
function Initialize-AgentFile($targetFile, $agentName) {
|
||||
if (Test-Path $targetFile) { return }
|
||||
$template = Join-Path $repoRoot 'templates/agent-file-template.md'
|
||||
if (-not (Test-Path $template)) { Write-Error "Template not found: $template"; return }
|
||||
$content = Get-Content $template -Raw
|
||||
$content = $content.Replace('[PROJECT NAME]', (Split-Path $repoRoot -Leaf))
|
||||
$content = $content.Replace('[DATE]', (Get-Date -Format 'yyyy-MM-dd'))
|
||||
$content = $content.Replace('[EXTRACTED FROM ALL PLAN.MD FILES]', "- $newLang + $newFramework ($currentBranch)")
|
||||
if ($newProjectType -match 'web') { $structure = "backend/`nfrontend/`ntests/" } else { $structure = "src/`ntests/" }
|
||||
$content = $content.Replace('[ACTUAL STRUCTURE FROM PLANS]', $structure)
|
||||
if ($newLang -match 'Python') { $commands = 'cd src && pytest && ruff check .' }
|
||||
elseif ($newLang -match 'Rust') { $commands = 'cargo test && cargo clippy' }
|
||||
elseif ($newLang -match 'JavaScript|TypeScript') { $commands = 'npm test && npm run lint' }
|
||||
else { $commands = "# Add commands for $newLang" }
|
||||
$content = $content.Replace('[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES]', $commands)
|
||||
$content = $content.Replace('[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE]', "${newLang}: Follow standard conventions")
|
||||
$content = $content.Replace('[LAST 3 FEATURES AND WHAT THEY ADDED]', "- ${currentBranch}: Added ${newLang} + ${newFramework}")
|
||||
$content | Set-Content $targetFile -Encoding UTF8
|
||||
}
|
||||
|
||||
function Update-AgentFile($targetFile, $agentName) {
|
||||
if (-not (Test-Path $targetFile)) { Initialize-AgentFile $targetFile $agentName; return }
|
||||
$content = Get-Content $targetFile -Raw
|
||||
if ($newLang -and ($content -notmatch [regex]::Escape($newLang))) { $content = $content -replace '(## Active Technologies\n)', "`$1- $newLang + $newFramework ($currentBranch)`n" }
|
||||
if ($newDb -and $newDb -ne 'N/A' -and ($content -notmatch [regex]::Escape($newDb))) { $content = $content -replace '(## Active Technologies\n)', "`$1- $newDb ($currentBranch)`n" }
|
||||
if ($content -match '## Recent Changes\n([\s\S]*?)(\n\n|$)') {
|
||||
$changesBlock = $matches[1].Trim().Split("`n")
|
||||
$changesBlock = ,"- $currentBranch: Added $newLang + $newFramework" + $changesBlock
|
||||
$changesBlock = $changesBlock | Where-Object { $_ } | Select-Object -First 3
|
||||
$joined = ($changesBlock -join "`n")
|
||||
$content = [regex]::Replace($content, '## Recent Changes\n([\s\S]*?)(\n\n|$)', "## Recent Changes`n$joined`n`n")
|
||||
}
|
||||
$content = [regex]::Replace($content, 'Last updated: \d{4}-\d{2}-\d{2}', "Last updated: $(Get-Date -Format 'yyyy-MM-dd')")
|
||||
$content | Set-Content $targetFile -Encoding UTF8
|
||||
Write-Output "✅ $agentName context file updated successfully"
|
||||
}
|
||||
|
||||
switch ($AgentType) {
|
||||
'claude' { Update-AgentFile $claudeFile 'Claude Code' }
|
||||
'gemini' { Update-AgentFile $geminiFile 'Gemini CLI' }
|
||||
'copilot' { Update-AgentFile $copilotFile 'GitHub Copilot' }
|
||||
'' {
|
||||
foreach ($pair in @(@{file=$claudeFile; name='Claude Code'}, @{file=$geminiFile; name='Gemini CLI'}, @{file=$copilotFile; name='GitHub Copilot'})) {
|
||||
if (Test-Path $pair.file) { Update-AgentFile $pair.file $pair.name }
|
||||
}
|
||||
if (-not (Test-Path $claudeFile) -and -not (Test-Path $geminiFile) -and -not (Test-Path $copilotFile)) {
|
||||
Write-Output 'No agent context files found. Creating Claude Code context file by default.'
|
||||
Update-AgentFile $claudeFile 'Claude Code'
|
||||
}
|
||||
}
|
||||
Default { Write-Error "ERROR: Unknown agent type '$AgentType'. Use: claude, gemini, copilot, or leave empty for all."; exit 1 }
|
||||
}
|
||||
|
||||
Write-Output ''
|
||||
Write-Output 'Summary of changes:'
|
||||
if ($newLang) { Write-Output "- Added language: $newLang" }
|
||||
if ($newFramework) { Write-Output "- Added framework: $newFramework" }
|
||||
if ($newDb -and $newDb -ne 'N/A') { Write-Output "- Added database: $newDb" }
|
||||
|
||||
Write-Output ''
|
||||
Write-Output 'Usage: ./update-agent-context.ps1 [claude|gemini|copilot]'
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Setup implementation plan structure for current branch
|
||||
# Returns paths needed for implementation plan generation
|
||||
# Usage: ./setup-plan.sh [--json]
|
||||
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--json) JSON_MODE=true ;;
|
||||
--help|-h) echo "Usage: $0 [--json]"; exit 0 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
|
||||
# Get all paths
|
||||
eval $(get_feature_paths)
|
||||
|
||||
# Check if on feature branch
|
||||
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||
|
||||
# Create specs directory if it doesn't exist
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
# Copy plan template if it exists
|
||||
TEMPLATE="$REPO_ROOT/templates/plan-template.md"
|
||||
if [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$IMPL_PLAN"
|
||||
fi
|
||||
|
||||
if $JSON_MODE; then
|
||||
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s"}\n' \
|
||||
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH"
|
||||
else
|
||||
# Output all paths for LLM use
|
||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
||||
echo "SPECS_DIR: $FEATURE_DIR"
|
||||
echo "BRANCH: $CURRENT_BRANCH"
|
||||
fi
|
||||
@@ -1,234 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Incrementally update agent context files based on new feature plan
|
||||
# Supports: CLAUDE.md, GEMINI.md, and .github/copilot-instructions.md
|
||||
# O(1) operation - only reads current context file and new plan.md
|
||||
|
||||
set -e
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||
FEATURE_DIR="$REPO_ROOT/specs/$CURRENT_BRANCH"
|
||||
NEW_PLAN="$FEATURE_DIR/plan.md"
|
||||
|
||||
# Determine which agent context files to update
|
||||
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
|
||||
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
||||
|
||||
# Allow override via argument
|
||||
AGENT_TYPE="$1"
|
||||
|
||||
if [ ! -f "$NEW_PLAN" ]; then
|
||||
echo "ERROR: No plan.md found at $NEW_PLAN"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=== Updating agent context files for feature $CURRENT_BRANCH ==="
|
||||
|
||||
# Extract tech from new plan
|
||||
NEW_LANG=$(grep "^**Language/Version**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Language\/Version**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_FRAMEWORK=$(grep "^**Primary Dependencies**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Primary Dependencies**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_TESTING=$(grep "^**Testing**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Testing**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_DB=$(grep "^**Storage**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Storage**: //' | grep -v "N/A" | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||
NEW_PROJECT_TYPE=$(grep "^**Project Type**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Project Type**: //' || echo "")
|
||||
|
||||
# Function to update a single agent context file
|
||||
update_agent_file() {
|
||||
local target_file="$1"
|
||||
local agent_name="$2"
|
||||
|
||||
echo "Updating $agent_name context file: $target_file"
|
||||
|
||||
# Create temp file for new context
|
||||
local temp_file=$(mktemp)
|
||||
|
||||
# If file doesn't exist, create from template
|
||||
if [ ! -f "$target_file" ]; then
|
||||
echo "Creating new $agent_name context file..."
|
||||
|
||||
# Check if this is the SDD repo itself
|
||||
if [ -f "$REPO_ROOT/templates/agent-file-template.md" ]; then
|
||||
cp "$REPO_ROOT/templates/agent-file-template.md" "$temp_file"
|
||||
else
|
||||
echo "ERROR: Template not found at $REPO_ROOT/templates/agent-file-template.md"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Replace placeholders
|
||||
sed -i.bak "s/\[PROJECT NAME\]/$(basename $REPO_ROOT)/" "$temp_file"
|
||||
sed -i.bak "s/\[DATE\]/$(date +%Y-%m-%d)/" "$temp_file"
|
||||
sed -i.bak "s/\[EXTRACTED FROM ALL PLAN.MD FILES\]/- $NEW_LANG + $NEW_FRAMEWORK ($CURRENT_BRANCH)/" "$temp_file"
|
||||
|
||||
# Add project structure based on type
|
||||
if [[ "$NEW_PROJECT_TYPE" == *"web"* ]]; then
|
||||
sed -i.bak "s|\[ACTUAL STRUCTURE FROM PLANS\]|backend/\nfrontend/\ntests/|" "$temp_file"
|
||||
else
|
||||
sed -i.bak "s|\[ACTUAL STRUCTURE FROM PLANS\]|src/\ntests/|" "$temp_file"
|
||||
fi
|
||||
|
||||
# Add minimal commands
|
||||
if [[ "$NEW_LANG" == *"Python"* ]]; then
|
||||
COMMANDS="cd src && pytest && ruff check ."
|
||||
elif [[ "$NEW_LANG" == *"Rust"* ]]; then
|
||||
COMMANDS="cargo test && cargo clippy"
|
||||
elif [[ "$NEW_LANG" == *"JavaScript"* ]] || [[ "$NEW_LANG" == *"TypeScript"* ]]; then
|
||||
COMMANDS="npm test && npm run lint"
|
||||
else
|
||||
COMMANDS="# Add commands for $NEW_LANG"
|
||||
fi
|
||||
sed -i.bak "s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$COMMANDS|" "$temp_file"
|
||||
|
||||
# Add code style
|
||||
sed -i.bak "s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$NEW_LANG: Follow standard conventions|" "$temp_file"
|
||||
|
||||
# Add recent changes
|
||||
sed -i.bak "s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|- $CURRENT_BRANCH: Added $NEW_LANG + $NEW_FRAMEWORK|" "$temp_file"
|
||||
|
||||
rm "$temp_file.bak"
|
||||
else
|
||||
echo "Updating existing $agent_name context file..."
|
||||
|
||||
# Extract manual additions
|
||||
local manual_start=$(grep -n "<!-- MANUAL ADDITIONS START -->" "$target_file" | cut -d: -f1)
|
||||
local manual_end=$(grep -n "<!-- MANUAL ADDITIONS END -->" "$target_file" | cut -d: -f1)
|
||||
|
||||
if [ ! -z "$manual_start" ] && [ ! -z "$manual_end" ]; then
|
||||
sed -n "${manual_start},${manual_end}p" "$target_file" > /tmp/manual_additions.txt
|
||||
fi
|
||||
|
||||
# Parse existing file and create updated version
|
||||
python3 - << EOF
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
# Read existing file
|
||||
with open("$target_file", 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Check if new tech already exists
|
||||
tech_section = re.search(r'## Active Technologies\n(.*?)\n\n', content, re.DOTALL)
|
||||
if tech_section:
|
||||
existing_tech = tech_section.group(1)
|
||||
|
||||
# Add new tech if not already present
|
||||
new_additions = []
|
||||
if "$NEW_LANG" and "$NEW_LANG" not in existing_tech:
|
||||
new_additions.append(f"- $NEW_LANG + $NEW_FRAMEWORK ($CURRENT_BRANCH)")
|
||||
if "$NEW_DB" and "$NEW_DB" not in existing_tech and "$NEW_DB" != "N/A":
|
||||
new_additions.append(f"- $NEW_DB ($CURRENT_BRANCH)")
|
||||
|
||||
if new_additions:
|
||||
updated_tech = existing_tech + "\n" + "\n".join(new_additions)
|
||||
content = content.replace(tech_section.group(0), f"## Active Technologies\n{updated_tech}\n\n")
|
||||
|
||||
# Update project structure if needed
|
||||
if "$NEW_PROJECT_TYPE" == "web" and "frontend/" not in content:
|
||||
struct_section = re.search(r'## Project Structure\n\`\`\`\n(.*?)\n\`\`\`', content, re.DOTALL)
|
||||
if struct_section:
|
||||
updated_struct = struct_section.group(1) + "\nfrontend/src/ # Web UI"
|
||||
content = re.sub(r'(## Project Structure\n\`\`\`\n).*?(\n\`\`\`)',
|
||||
f'\\1{updated_struct}\\2', content, flags=re.DOTALL)
|
||||
|
||||
# Add new commands if language is new
|
||||
if "$NEW_LANG" and f"# {NEW_LANG}" not in content:
|
||||
commands_section = re.search(r'## Commands\n\`\`\`bash\n(.*?)\n\`\`\`', content, re.DOTALL)
|
||||
if not commands_section:
|
||||
commands_section = re.search(r'## Commands\n(.*?)\n\n', content, re.DOTALL)
|
||||
|
||||
if commands_section:
|
||||
new_commands = commands_section.group(1)
|
||||
if "Python" in "$NEW_LANG":
|
||||
new_commands += "\ncd src && pytest && ruff check ."
|
||||
elif "Rust" in "$NEW_LANG":
|
||||
new_commands += "\ncargo test && cargo clippy"
|
||||
elif "JavaScript" in "$NEW_LANG" or "TypeScript" in "$NEW_LANG":
|
||||
new_commands += "\nnpm test && npm run lint"
|
||||
|
||||
if "```bash" in content:
|
||||
content = re.sub(r'(## Commands\n\`\`\`bash\n).*?(\n\`\`\`)',
|
||||
f'\\1{new_commands}\\2', content, flags=re.DOTALL)
|
||||
else:
|
||||
content = re.sub(r'(## Commands\n).*?(\n\n)',
|
||||
f'\\1{new_commands}\\2', content, flags=re.DOTALL)
|
||||
|
||||
# Update recent changes (keep only last 3)
|
||||
changes_section = re.search(r'## Recent Changes\n(.*?)(\n\n|$)', content, re.DOTALL)
|
||||
if changes_section:
|
||||
changes = changes_section.group(1).strip().split('\n')
|
||||
changes.insert(0, f"- $CURRENT_BRANCH: Added $NEW_LANG + $NEW_FRAMEWORK")
|
||||
# Keep only last 3
|
||||
changes = changes[:3]
|
||||
content = re.sub(r'(## Recent Changes\n).*?(\n\n|$)',
|
||||
f'\\1{chr(10).join(changes)}\\2', content, flags=re.DOTALL)
|
||||
|
||||
# Update date
|
||||
content = re.sub(r'Last updated: \d{4}-\d{2}-\d{2}',
|
||||
f'Last updated: {datetime.now().strftime("%Y-%m-%d")}', content)
|
||||
|
||||
# Write to temp file
|
||||
with open("$temp_file", 'w') as f:
|
||||
f.write(content)
|
||||
EOF
|
||||
|
||||
# Restore manual additions if they exist
|
||||
if [ -f /tmp/manual_additions.txt ]; then
|
||||
# Remove old manual section from temp file
|
||||
sed -i.bak '/<!-- MANUAL ADDITIONS START -->/,/<!-- MANUAL ADDITIONS END -->/d' "$temp_file"
|
||||
# Append manual additions
|
||||
cat /tmp/manual_additions.txt >> "$temp_file"
|
||||
rm /tmp/manual_additions.txt "$temp_file.bak"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Move temp file to final location
|
||||
mv "$temp_file" "$target_file"
|
||||
echo "✅ $agent_name context file updated successfully"
|
||||
}
|
||||
|
||||
# Update files based on argument or detect existing files
|
||||
case "$AGENT_TYPE" in
|
||||
"claude")
|
||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
||||
;;
|
||||
"gemini")
|
||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
||||
;;
|
||||
"copilot")
|
||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
||||
;;
|
||||
"")
|
||||
# Update all existing files
|
||||
[ -f "$CLAUDE_FILE" ] && update_agent_file "$CLAUDE_FILE" "Claude Code"
|
||||
[ -f "$GEMINI_FILE" ] && update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
||||
[ -f "$COPILOT_FILE" ] && update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
||||
|
||||
# If no files exist, create based on current directory or ask user
|
||||
if [ ! -f "$CLAUDE_FILE" ] && [ ! -f "$GEMINI_FILE" ] && [ ! -f "$COPILOT_FILE" ]; then
|
||||
echo "No agent context files found. Creating Claude Code context file by default."
|
||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unknown agent type '$AGENT_TYPE'. Use: claude, gemini, copilot, or leave empty for all."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo ""
|
||||
echo "Summary of changes:"
|
||||
if [ ! -z "$NEW_LANG" ]; then
|
||||
echo "- Added language: $NEW_LANG"
|
||||
fi
|
||||
if [ ! -z "$NEW_FRAMEWORK" ]; then
|
||||
echo "- Added framework: $NEW_FRAMEWORK"
|
||||
fi
|
||||
if [ ! -z "$NEW_DB" ] && [ "$NEW_DB" != "N/A" ]; then
|
||||
echo "- Added database: $NEW_DB"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Usage: $0 [claude|gemini|copilot]"
|
||||
echo " - No argument: Update all existing agent context files"
|
||||
echo " - claude: Update only CLAUDE.md"
|
||||
echo " - gemini: Update only GEMINI.md"
|
||||
echo " - copilot: Update only .github/copilot-instructions.md"
|
||||
110
spec-driven.md
110
spec-driven.md
@@ -70,11 +70,11 @@ Today, practicing SDD requires assembling existing tools and maintaining discipl
|
||||
|
||||
The key is treating specifications as the source of truth, with code as the generated output that serves the specification rather than the other way around.
|
||||
|
||||
## Streamlining SDD with Claude Commands
|
||||
## Streamlining SDD with Commands
|
||||
|
||||
The SDD methodology is significantly enhanced through two powerful Claude commands that automate the specification and planning workflow:
|
||||
The SDD methodology is significantly enhanced through three powerful commands that automate the specification → planning → tasking workflow:
|
||||
|
||||
### The `new_feature` Command
|
||||
### The `/specify` Command
|
||||
|
||||
This command transforms a simple feature description (the user-prompt) into a complete, structured specification with automatic repository management:
|
||||
|
||||
@@ -83,7 +83,7 @@ This command transforms a simple feature description (the user-prompt) into a co
|
||||
3. **Template-Based Generation**: Copies and customizes the feature specification template with your requirements
|
||||
4. **Directory Structure**: Creates the proper `specs/[branch-name]/` structure for all related documents
|
||||
|
||||
### The `generate_plan` Command
|
||||
### The `/plan` Command
|
||||
|
||||
Once a feature specification exists, this command creates a comprehensive implementation plan:
|
||||
|
||||
@@ -91,14 +91,24 @@ Once a feature specification exists, this command creates a comprehensive implem
|
||||
2. **Constitutional Compliance**: Ensures alignment with project constitution and architectural principles
|
||||
3. **Technical Translation**: Converts business requirements into technical architecture and implementation details
|
||||
4. **Detailed Documentation**: Generates supporting documents for data models, API contracts, and test scenarios
|
||||
5. **Manual Testing Plans**: Creates step-by-step validation procedures for each user story
|
||||
5. **Quickstart Validation**: Produces a quickstart guide capturing key validation scenarios
|
||||
|
||||
### The `/tasks` Command
|
||||
|
||||
After a plan is created, this command analyzes the plan and related design documents to generate an executable task list:
|
||||
|
||||
1. **Inputs**: Reads `plan.md` (required) and, if present, `data-model.md`, `contracts/`, and `research.md`
|
||||
2. **Task Derivation**: Converts contracts, entities, and scenarios into specific tasks
|
||||
3. **Parallelization**: Marks independent tasks `[P]` and outlines safe parallel groups
|
||||
4. **Output**: Writes `tasks.md` in the feature directory, ready for execution by a Task agent
|
||||
|
||||
### Example: Building a Chat Feature
|
||||
|
||||
Here's how these commands transform the traditional development workflow:
|
||||
|
||||
**Traditional Approach:**
|
||||
```
|
||||
|
||||
```text
|
||||
1. Write a PRD in a document (2-3 hours)
|
||||
2. Create design documents (2-3 hours)
|
||||
3. Set up project structure manually (30 minutes)
|
||||
@@ -108,30 +118,33 @@ Total: ~12 hours of documentation work
|
||||
```
|
||||
|
||||
**SDD with Commands Approach:**
|
||||
|
||||
```bash
|
||||
# Step 1: Create the feature specification (5 minutes)
|
||||
/new_feature Real-time chat system with message history and user presence
|
||||
/specify Real-time chat system with message history and user presence
|
||||
|
||||
# This automatically:
|
||||
# - Creates branch "003-chat-system"
|
||||
# - Generates specs/003-chat-system/feature-spec.md
|
||||
# - Generates specs/003-chat-system/spec.md
|
||||
# - Populates it with structured requirements
|
||||
|
||||
# Step 2: Generate implementation plan (10 minutes)
|
||||
/generate_plan WebSocket for real-time messaging, PostgreSQL for history, Redis for presence
|
||||
# Step 2: Generate implementation plan (5 minutes)
|
||||
/plan WebSocket for real-time messaging, PostgreSQL for history, Redis for presence
|
||||
|
||||
# Step 3: Generate executable tasks (5 minutes)
|
||||
/tasks
|
||||
|
||||
# This automatically creates:
|
||||
# - specs/003-chat-system/implementation-plan.md
|
||||
# - specs/003-chat-system/implementation-details/
|
||||
# - 00-research.md (WebSocket library comparisons)
|
||||
# - 02-data-model.md (Message and User schemas)
|
||||
# - 03-api-contracts.md (WebSocket events, REST endpoints)
|
||||
# - 06-contract-tests.md (Message flow scenarios)
|
||||
# - 08-inter-library-tests.md (Database-WebSocket integration)
|
||||
# - specs/003-chat-system/manual-testing.md
|
||||
# - specs/003-chat-system/plan.md
|
||||
# - specs/003-chat-system/research.md (WebSocket library comparisons)
|
||||
# - specs/003-chat-system/data-model.md (Message and User schemas)
|
||||
# - specs/003-chat-system/contracts/ (WebSocket events, REST endpoints)
|
||||
# - specs/003-chat-system/quickstart.md (Key validation scenarios)
|
||||
# - specs/003-chat-system/tasks.md (Task list derived from the plan)
|
||||
```
|
||||
|
||||
In 15 minutes, you have:
|
||||
|
||||
- A complete feature specification with user stories and acceptance criteria
|
||||
- A detailed implementation plan with technology choices and rationale
|
||||
- API contracts and data models ready for code generation
|
||||
@@ -156,7 +169,8 @@ The true power of these commands lies not just in automation, but in how the tem
|
||||
#### 1. **Preventing Premature Implementation Details**
|
||||
|
||||
The feature specification template explicitly instructs:
|
||||
```
|
||||
|
||||
```text
|
||||
- ✅ Focus on WHAT users need and WHY
|
||||
- ❌ Avoid HOW to implement (no tech stack, APIs, code structure)
|
||||
```
|
||||
@@ -166,9 +180,10 @@ This constraint forces the LLM to maintain proper abstraction levels. When an LL
|
||||
#### 2. **Forcing Explicit Uncertainty Markers**
|
||||
|
||||
Both templates mandate the use of `[NEEDS CLARIFICATION]` markers:
|
||||
```
|
||||
|
||||
```text
|
||||
When creating this spec from a user prompt:
|
||||
1. **Mark all ambiguities**: Use [NEEDS CLARIFICATION: specific question]
|
||||
1. **Mark all ambiguities**: Use [NEEDS CLARIFICATION: specific question]
|
||||
2. **Don't guess**: If the prompt doesn't specify something, mark it
|
||||
```
|
||||
|
||||
@@ -177,10 +192,11 @@ This prevents the common LLM behavior of making plausible but potentially incorr
|
||||
#### 3. **Structured Thinking Through Checklists**
|
||||
|
||||
The templates include comprehensive checklists that act as "unit tests" for the specification:
|
||||
```
|
||||
|
||||
```markdown
|
||||
### Requirement Completeness
|
||||
- [ ] No [NEEDS CLARIFICATION] markers remain
|
||||
- [ ] Requirements are testable and unambiguous
|
||||
- [ ] Requirements are testable and unambiguous
|
||||
- [ ] Success criteria are measurable
|
||||
```
|
||||
|
||||
@@ -189,7 +205,8 @@ These checklists force the LLM to self-review its output systematically, catchin
|
||||
#### 4. **Constitutional Compliance Through Gates**
|
||||
|
||||
The implementation plan template enforces architectural principles through phase gates:
|
||||
```
|
||||
|
||||
```markdown
|
||||
### Phase -1: Pre-Implementation Gates
|
||||
#### Simplicity Gate (Article VII)
|
||||
- [ ] Using ≤3 projects?
|
||||
@@ -204,9 +221,10 @@ These gates prevent over-engineering by making the LLM explicitly justify any co
|
||||
#### 5. **Hierarchical Detail Management**
|
||||
|
||||
The templates enforce proper information architecture:
|
||||
```
|
||||
**IMPORTANT**: This implementation plan should remain high-level and readable.
|
||||
Any code samples, detailed algorithms, or extensive technical specifications
|
||||
|
||||
```text
|
||||
**IMPORTANT**: This implementation plan should remain high-level and readable.
|
||||
Any code samples, detailed algorithms, or extensive technical specifications
|
||||
must be placed in the appropriate `implementation-details/` file
|
||||
```
|
||||
|
||||
@@ -215,7 +233,8 @@ This prevents the common problem of specifications becoming unreadable code dump
|
||||
#### 6. **Test-First Thinking**
|
||||
|
||||
The implementation template enforces test-first development:
|
||||
```
|
||||
|
||||
```text
|
||||
### File Creation Order
|
||||
1. Create `contracts/` with API specifications
|
||||
2. Create test files in order: contract → integration → e2e → unit
|
||||
@@ -227,7 +246,8 @@ This ordering constraint ensures the LLM thinks about testability and contracts
|
||||
#### 7. **Preventing Speculative Features**
|
||||
|
||||
Templates explicitly discourage speculation:
|
||||
```
|
||||
|
||||
```text
|
||||
- [ ] No speculative or "might need" features
|
||||
- [ ] All phases have clear prerequisites and deliverables
|
||||
```
|
||||
@@ -237,6 +257,7 @@ This stops the LLM from adding "nice to have" features that complicate implement
|
||||
### The Compound Effect
|
||||
|
||||
These constraints work together to produce specifications that are:
|
||||
|
||||
- **Complete**: Checklists ensure nothing is forgotten
|
||||
- **Unambiguous**: Forced clarification markers highlight uncertainties
|
||||
- **Testable**: Test-first thinking baked into the process
|
||||
@@ -247,25 +268,29 @@ The templates transform the LLM from a creative writer into a disciplined specif
|
||||
|
||||
## The Constitutional Foundation: Enforcing Architectural Discipline
|
||||
|
||||
At the heart of SDD lies a constitution—a set of immutable principles that govern how specifications become code. The constitution (`base/memory/constitution.md`) acts as the architectural DNA of the system, ensuring that every generated implementation maintains consistency, simplicity, and quality.
|
||||
At the heart of SDD lies a constitution—a set of immutable principles that govern how specifications become code. The constitution (`memory/constitution.md`) acts as the architectural DNA of the system, ensuring that every generated implementation maintains consistency, simplicity, and quality.
|
||||
|
||||
### The Nine Articles of Development
|
||||
|
||||
The constitution defines nine articles that shape every aspect of the development process:
|
||||
|
||||
#### Article I: Library-First Principle
|
||||
|
||||
Every feature must begin as a standalone library—no exceptions. This forces modular design from the start:
|
||||
```
|
||||
Every feature in Specify MUST begin its existence as a standalone library.
|
||||
No feature shall be implemented directly within application code without
|
||||
|
||||
```text
|
||||
Every feature in Specify MUST begin its existence as a standalone library.
|
||||
No feature shall be implemented directly within application code without
|
||||
first being abstracted into a reusable library component.
|
||||
```
|
||||
|
||||
This principle ensures that specifications generate modular, reusable code rather than monolithic applications. When the LLM generates an implementation plan, it must structure features as libraries with clear boundaries and minimal dependencies.
|
||||
|
||||
#### Article II: CLI Interface Mandate
|
||||
|
||||
Every library must expose its functionality through a command-line interface:
|
||||
```
|
||||
|
||||
```text
|
||||
All CLI interfaces MUST:
|
||||
- Accept text as input (via stdin, arguments, or files)
|
||||
- Produce text as output (via stdout)
|
||||
@@ -275,8 +300,10 @@ All CLI interfaces MUST:
|
||||
This enforces observability and testability. The LLM cannot hide functionality inside opaque classes—everything must be accessible and verifiable through text-based interfaces.
|
||||
|
||||
#### Article III: Test-First Imperative
|
||||
|
||||
The most transformative article—no code before tests:
|
||||
```
|
||||
|
||||
```text
|
||||
This is NON-NEGOTIABLE: All implementation MUST follow strict Test-Driven Development.
|
||||
No implementation code shall be written before:
|
||||
1. Unit tests are written
|
||||
@@ -287,8 +314,10 @@ No implementation code shall be written before:
|
||||
This completely inverts traditional AI code generation. Instead of generating code and hoping it works, the LLM must first generate comprehensive tests that define behavior, get them approved, and only then generate implementation.
|
||||
|
||||
#### Articles VII & VIII: Simplicity and Anti-Abstraction
|
||||
|
||||
These paired articles combat over-engineering:
|
||||
```
|
||||
|
||||
```text
|
||||
Section 7.3: Minimal Project Structure
|
||||
- Maximum 3 projects for initial implementation
|
||||
- Additional projects require documented justification
|
||||
@@ -300,8 +329,10 @@ Section 8.1: Framework Trust
|
||||
When an LLM might naturally create elaborate abstractions, these articles force it to justify every layer of complexity. The implementation plan template's "Phase -1 Gates" directly enforce these principles.
|
||||
|
||||
#### Article IX: Integration-First Testing
|
||||
|
||||
Prioritizes real-world testing over isolated unit tests:
|
||||
```
|
||||
|
||||
```text
|
||||
Tests MUST use realistic environments:
|
||||
- Prefer real databases over mocks
|
||||
- Use actual service instances over stubs
|
||||
@@ -343,7 +374,8 @@ The constitution's power lies in its immutability. While implementation details
|
||||
### Constitutional Evolution
|
||||
|
||||
While principles are immutable, their application can evolve:
|
||||
```
|
||||
|
||||
```text
|
||||
Section 4.2: Amendment Process
|
||||
Modifications to this constitution require:
|
||||
- Explicit documentation of the rationale for change
|
||||
@@ -368,4 +400,4 @@ By embedding these principles into the specification and planning process, SDD e
|
||||
|
||||
This isn't about replacing developers or automating creativity. It's about amplifying human capability by automating mechanical translation. It's about creating a tight feedback loop where specifications, research, and code evolve together, each iteration bringing deeper understanding and better alignment between intent and implementation.
|
||||
|
||||
Software development needs better tools for maintaining alignment between intent and implementation. SDD provides the methodology for achieving this alignment through executable specifications that generate code rather than merely guiding it.
|
||||
Software development needs better tools for maintaining alignment between intent and implementation. SDD provides the methodology for achieving this alignment through executable specifications that generate code rather than merely guiding it.
|
||||
|
||||
5
src/apm_cli/__init__.py
Normal file
5
src/apm_cli/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""APM-CLI package."""
|
||||
|
||||
from .version import get_version
|
||||
|
||||
__version__ = get_version()
|
||||
1
src/apm_cli/adapters/__init__.py
Normal file
1
src/apm_cli/adapters/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Adapters package."""
|
||||
1
src/apm_cli/adapters/client/__init__.py
Normal file
1
src/apm_cli/adapters/client/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Client adapters package."""
|
||||
39
src/apm_cli/adapters/client/base.py
Normal file
39
src/apm_cli/adapters/client/base.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""Base adapter interface for MCP clients."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class MCPClientAdapter(ABC):
|
||||
"""Base adapter for MCP clients."""
|
||||
|
||||
@abstractmethod
|
||||
def get_config_path(self):
|
||||
"""Get the path to the MCP configuration file."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_config(self, config_updates):
|
||||
"""Update the MCP configuration."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_current_config(self):
|
||||
"""Get the current MCP configuration."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None):
|
||||
"""Configure an MCP server in the client configuration.
|
||||
|
||||
Args:
|
||||
server_url (str): URL of the MCP server.
|
||||
server_name (str, optional): Name of the server. Defaults to None.
|
||||
enabled (bool, optional): Whether to enable the server. Defaults to True.
|
||||
env_overrides (dict, optional): Environment variable overrides. Defaults to None.
|
||||
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||
runtime_vars (dict, optional): Runtime variable values. Defaults to None.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
pass
|
||||
528
src/apm_cli/adapters/client/codex.py
Normal file
528
src/apm_cli/adapters/client/codex.py
Normal file
@@ -0,0 +1,528 @@
|
||||
"""OpenAI Codex CLI implementation of MCP client adapter.
|
||||
|
||||
This adapter implements the Codex CLI-specific handling of MCP server configuration,
|
||||
targeting the global ~/.codex/config.toml file as specified in the MCP installation
|
||||
architecture specification.
|
||||
"""
|
||||
|
||||
import os
|
||||
import toml
|
||||
from pathlib import Path
|
||||
from .base import MCPClientAdapter
|
||||
from ...registry.client import SimpleRegistryClient
|
||||
from ...registry.integration import RegistryIntegration
|
||||
|
||||
|
||||
class CodexClientAdapter(MCPClientAdapter):
|
||||
"""Codex CLI implementation of MCP client adapter.
|
||||
|
||||
This adapter handles Codex CLI-specific configuration for MCP servers using
|
||||
a global ~/.codex/config.toml file, following the TOML format for
|
||||
MCP server configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, registry_url=None):
|
||||
"""Initialize the Codex CLI client adapter.
|
||||
|
||||
Args:
|
||||
registry_url (str, optional): URL of the MCP registry.
|
||||
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||
or falls back to the default GitHub registry.
|
||||
"""
|
||||
self.registry_client = SimpleRegistryClient(registry_url)
|
||||
self.registry_integration = RegistryIntegration(registry_url)
|
||||
|
||||
def get_config_path(self):
|
||||
"""Get the path to the Codex CLI MCP configuration file.
|
||||
|
||||
Returns:
|
||||
str: Path to ~/.codex/config.toml
|
||||
"""
|
||||
codex_dir = Path.home() / ".codex"
|
||||
return str(codex_dir / "config.toml")
|
||||
|
||||
def update_config(self, config_updates):
|
||||
"""Update the Codex CLI MCP configuration.
|
||||
|
||||
Args:
|
||||
config_updates (dict): Configuration updates to apply.
|
||||
"""
|
||||
current_config = self.get_current_config()
|
||||
|
||||
# Ensure mcp_servers section exists
|
||||
if "mcp_servers" not in current_config:
|
||||
current_config["mcp_servers"] = {}
|
||||
|
||||
# Apply updates to mcp_servers section
|
||||
current_config["mcp_servers"].update(config_updates)
|
||||
|
||||
# Write back to file
|
||||
config_path = Path(self.get_config_path())
|
||||
|
||||
# Ensure directory exists
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(config_path, 'w') as f:
|
||||
toml.dump(current_config, f)
|
||||
|
||||
def get_current_config(self):
|
||||
"""Get the current Codex CLI MCP configuration.
|
||||
|
||||
Returns:
|
||||
dict: Current configuration, or empty dict if file doesn't exist.
|
||||
"""
|
||||
config_path = self.get_config_path()
|
||||
|
||||
if not os.path.exists(config_path):
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
return toml.load(f)
|
||||
except (toml.TomlDecodeError, IOError):
|
||||
return {}
|
||||
|
||||
def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None):
|
||||
"""Configure an MCP server in Codex CLI configuration.
|
||||
|
||||
This method follows the Codex CLI MCP configuration format with
|
||||
mcp_servers sections in the TOML configuration.
|
||||
|
||||
Args:
|
||||
server_url (str): URL or identifier of the MCP server.
|
||||
server_name (str, optional): Name of the server. Defaults to None.
|
||||
enabled (bool, optional): Ignored parameter, kept for API compatibility.
|
||||
env_overrides (dict, optional): Pre-collected environment variable overrides.
|
||||
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||
runtime_vars (dict, optional): Runtime variable values. Defaults to None.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
if not server_url:
|
||||
print("Error: server_url cannot be empty")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Use cached server info if available, otherwise fetch from registry
|
||||
if server_info_cache and server_url in server_info_cache:
|
||||
server_info = server_info_cache[server_url]
|
||||
else:
|
||||
# Fallback to registry lookup if not cached
|
||||
server_info = self.registry_client.find_server_by_reference(server_url)
|
||||
|
||||
# Fail if server is not found in registry - security requirement
|
||||
if not server_info:
|
||||
print(f"Error: MCP server '{server_url}' not found in registry")
|
||||
return False
|
||||
|
||||
# Check for remote servers early - Codex doesn't support remote/SSE servers
|
||||
remotes = server_info.get("remotes", [])
|
||||
packages = server_info.get("packages", [])
|
||||
|
||||
# If server has only remote endpoints and no packages, it's a remote-only server
|
||||
if remotes and not packages:
|
||||
print(f"⚠️ Warning: MCP server '{server_url}' is a remote server (SSE type)")
|
||||
print(" Codex CLI only supports local servers with command/args configuration")
|
||||
print(" Remote servers are not supported by Codex CLI")
|
||||
print(" Skipping installation for Codex CLI")
|
||||
return False
|
||||
|
||||
# Determine the server name for configuration key
|
||||
if server_name:
|
||||
# Use explicitly provided server name
|
||||
config_key = server_name
|
||||
else:
|
||||
# Extract name from server_url (part after last slash)
|
||||
# For URLs like "microsoft/azure-devops-mcp" -> "azure-devops-mcp"
|
||||
# For URLs like "github/github-mcp-server" -> "github-mcp-server"
|
||||
if '/' in server_url:
|
||||
config_key = server_url.split('/')[-1]
|
||||
else:
|
||||
# Fallback to full server_url if no slash
|
||||
config_key = server_url
|
||||
|
||||
# Generate server configuration with environment variable resolution
|
||||
server_config = self._format_server_config(server_info, env_overrides, runtime_vars)
|
||||
|
||||
# Update configuration using the chosen key
|
||||
self.update_config({config_key: server_config})
|
||||
|
||||
print(f"Successfully configured MCP server '{config_key}' for Codex CLI")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error configuring MCP server: {e}")
|
||||
return False
|
||||
|
||||
def _format_server_config(self, server_info, env_overrides=None, runtime_vars=None):
|
||||
"""Format server information into Codex CLI MCP configuration format.
|
||||
|
||||
Args:
|
||||
server_info (dict): Server information from registry.
|
||||
env_overrides (dict, optional): Pre-collected environment variable overrides.
|
||||
runtime_vars (dict, optional): Runtime variable values.
|
||||
|
||||
Returns:
|
||||
dict: Formatted server configuration for Codex CLI.
|
||||
"""
|
||||
# Default configuration structure with registry ID for conflict detection
|
||||
config = {
|
||||
"command": "unknown",
|
||||
"args": [],
|
||||
"env": {},
|
||||
"id": server_info.get("id", "") # Add registry UUID for conflict detection
|
||||
}
|
||||
|
||||
# Note: Remote servers (SSE type) are handled in configure_mcp_server and rejected early
|
||||
# This method only handles local servers with packages
|
||||
|
||||
# Get packages from server info
|
||||
packages = server_info.get("packages", [])
|
||||
|
||||
if not packages:
|
||||
# If no packages are available, this indicates incomplete server configuration
|
||||
# This should fail installation with a clear error message
|
||||
raise ValueError(f"MCP server has no package information available in registry. "
|
||||
f"This appears to be a temporary registry issue or the server is remote-only. "
|
||||
f"Server: {server_info.get('name', 'unknown')}")
|
||||
|
||||
if packages:
|
||||
# Use the first package for configuration (prioritize npm, then docker, then others)
|
||||
package = self._select_best_package(packages)
|
||||
|
||||
if package:
|
||||
registry_name = package.get("registry_name", "")
|
||||
package_name = package.get("name", "")
|
||||
runtime_hint = package.get("runtime_hint", "")
|
||||
runtime_arguments = package.get("runtime_arguments", [])
|
||||
package_arguments = package.get("package_arguments", [])
|
||||
env_vars = package.get("environment_variables", [])
|
||||
|
||||
# Resolve environment variables first
|
||||
resolved_env = self._process_environment_variables(env_vars, env_overrides)
|
||||
|
||||
# Process arguments to extract simple string values
|
||||
processed_runtime_args = self._process_arguments(runtime_arguments, resolved_env, runtime_vars)
|
||||
processed_package_args = self._process_arguments(package_arguments, resolved_env, runtime_vars)
|
||||
|
||||
# Generate command and args based on package type
|
||||
if registry_name == "npm":
|
||||
config["command"] = runtime_hint or "npx"
|
||||
# For npm packages, use runtime_arguments directly as they contain the complete npx command
|
||||
config["args"] = processed_runtime_args + processed_package_args
|
||||
# For NPM packages, also use env block for environment variables
|
||||
if resolved_env:
|
||||
config["env"] = resolved_env
|
||||
elif registry_name == "docker":
|
||||
config["command"] = "docker"
|
||||
|
||||
# For Docker packages in Codex TOML format:
|
||||
# - Ensure all environment variables from resolved_env are represented as -e flags in args
|
||||
# - Put actual environment variable values in separate [env] section
|
||||
config["args"] = self._ensure_docker_env_flags(processed_runtime_args + processed_package_args, resolved_env)
|
||||
|
||||
# Environment variables go in separate env section for Codex TOML format
|
||||
if resolved_env:
|
||||
config["env"] = resolved_env
|
||||
elif registry_name == "pypi":
|
||||
config["command"] = runtime_hint or "uvx"
|
||||
config["args"] = [package_name] + processed_runtime_args + processed_package_args
|
||||
# For PyPI packages, use env block for environment variables
|
||||
if resolved_env:
|
||||
config["env"] = resolved_env
|
||||
elif registry_name == "homebrew":
|
||||
# For homebrew packages, assume the binary name is the command
|
||||
config["command"] = package_name.split('/')[-1] if '/' in package_name else package_name
|
||||
config["args"] = processed_runtime_args + processed_package_args
|
||||
# For Homebrew packages, use env block for environment variables
|
||||
if resolved_env:
|
||||
config["env"] = resolved_env
|
||||
else:
|
||||
# Generic package handling
|
||||
config["command"] = runtime_hint or package_name
|
||||
config["args"] = processed_runtime_args + processed_package_args
|
||||
# For generic packages, use env block for environment variables
|
||||
if resolved_env:
|
||||
config["env"] = resolved_env
|
||||
|
||||
return config
|
||||
|
||||
def _process_arguments(self, arguments, resolved_env=None, runtime_vars=None):
|
||||
"""Process argument objects to extract simple string values with environment resolution.
|
||||
|
||||
Args:
|
||||
arguments (list): List of argument objects from registry.
|
||||
resolved_env (dict): Resolved environment variables.
|
||||
runtime_vars (dict): Runtime variable values.
|
||||
|
||||
Returns:
|
||||
list: List of processed argument strings.
|
||||
"""
|
||||
if resolved_env is None:
|
||||
resolved_env = {}
|
||||
if runtime_vars is None:
|
||||
runtime_vars = {}
|
||||
|
||||
processed = []
|
||||
|
||||
for arg in arguments:
|
||||
if isinstance(arg, dict):
|
||||
# Extract value from argument object
|
||||
arg_type = arg.get("type", "")
|
||||
if arg_type == "positional":
|
||||
value = arg.get("value", arg.get("default", ""))
|
||||
if value:
|
||||
# Resolve both environment and runtime variable placeholders with actual values
|
||||
processed_value = self._resolve_variable_placeholders(str(value), resolved_env, runtime_vars)
|
||||
processed.append(processed_value)
|
||||
elif arg_type == "named":
|
||||
# For named arguments, the flag name is in the "value" field
|
||||
flag_name = arg.get("value", "")
|
||||
if flag_name:
|
||||
processed.append(flag_name)
|
||||
# Some named arguments might have additional values (rare)
|
||||
additional_value = arg.get("name", "")
|
||||
if additional_value and additional_value != flag_name and not additional_value.startswith("-"):
|
||||
processed_value = self._resolve_variable_placeholders(str(additional_value), resolved_env, runtime_vars)
|
||||
processed.append(processed_value)
|
||||
elif isinstance(arg, str):
|
||||
# Already a string, use as-is but resolve variable placeholders
|
||||
processed_value = self._resolve_variable_placeholders(arg, resolved_env, runtime_vars)
|
||||
processed.append(processed_value)
|
||||
|
||||
return processed
|
||||
|
||||
def _process_environment_variables(self, env_vars, env_overrides=None):
|
||||
"""Process environment variable definitions and resolve actual values.
|
||||
|
||||
Args:
|
||||
env_vars (list): List of environment variable definitions.
|
||||
env_overrides (dict, optional): Pre-collected environment variable overrides.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of resolved environment variable values.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from rich.prompt import Prompt
|
||||
|
||||
resolved = {}
|
||||
env_overrides = env_overrides or {}
|
||||
|
||||
# If env_overrides is provided, it means the CLI has already handled environment variable collection
|
||||
# In this case, we should NEVER prompt for additional variables
|
||||
skip_prompting = bool(env_overrides)
|
||||
|
||||
# Check for CI/automated environment via APM_E2E_TESTS flag (more reliable than TTY detection)
|
||||
if os.getenv('APM_E2E_TESTS') == '1':
|
||||
skip_prompting = True
|
||||
print(f"💡 APM_E2E_TESTS detected, will skip environment variable prompts")
|
||||
|
||||
# Also skip prompting if we're in a non-interactive environment (fallback)
|
||||
is_interactive = sys.stdin.isatty() and sys.stdout.isatty()
|
||||
if not is_interactive:
|
||||
skip_prompting = True
|
||||
|
||||
# Add default GitHub MCP server environment variables for essential functionality first
|
||||
# This ensures variables have defaults when user provides empty values or they're optional
|
||||
default_github_env = {
|
||||
"GITHUB_TOOLSETS": "context",
|
||||
"GITHUB_DYNAMIC_TOOLSETS": "1"
|
||||
}
|
||||
|
||||
# Track which variables were explicitly provided with empty values (user wants defaults)
|
||||
empty_value_vars = set()
|
||||
if env_overrides:
|
||||
for key, value in env_overrides.items():
|
||||
if key in env_overrides and (not value or not value.strip()):
|
||||
empty_value_vars.add(key)
|
||||
|
||||
for env_var in env_vars:
|
||||
if isinstance(env_var, dict):
|
||||
name = env_var.get("name", "")
|
||||
description = env_var.get("description", "")
|
||||
required = env_var.get("required", True)
|
||||
|
||||
if name:
|
||||
# First check overrides, then environment
|
||||
value = env_overrides.get(name) or os.getenv(name)
|
||||
|
||||
# Only prompt if not provided in overrides or environment AND it's required AND we're not in managed override mode
|
||||
if not value and required and not skip_prompting:
|
||||
# Only prompt if not provided in overrides
|
||||
prompt_text = f"Enter value for {name}"
|
||||
if description:
|
||||
prompt_text += f" ({description})"
|
||||
value = Prompt.ask(prompt_text, password=True if "token" in name.lower() or "key" in name.lower() else False)
|
||||
|
||||
# Add variable if it has a value OR if user explicitly provided empty and we have a default
|
||||
if value and value.strip():
|
||||
resolved[name] = value
|
||||
elif name in empty_value_vars and name in default_github_env:
|
||||
# User provided empty value and we have a default - use default
|
||||
resolved[name] = default_github_env[name]
|
||||
elif not required and name in default_github_env:
|
||||
# Variable is optional and we have a default - use default
|
||||
resolved[name] = default_github_env[name]
|
||||
elif skip_prompting and name in default_github_env:
|
||||
# Non-interactive environment and we have a default - use default
|
||||
resolved[name] = default_github_env[name]
|
||||
|
||||
return resolved
|
||||
|
||||
def _resolve_variable_placeholders(self, value, resolved_env, runtime_vars):
|
||||
"""Resolve both environment and runtime variable placeholders in values.
|
||||
|
||||
Args:
|
||||
value (str): Value that may contain placeholders like <TOKEN_NAME> or {runtime_var}
|
||||
resolved_env (dict): Dictionary of resolved environment variables.
|
||||
runtime_vars (dict): Dictionary of resolved runtime variables.
|
||||
|
||||
Returns:
|
||||
str: Processed value with actual variable values.
|
||||
"""
|
||||
import re
|
||||
|
||||
if not value:
|
||||
return value
|
||||
|
||||
processed = str(value)
|
||||
|
||||
# Replace <TOKEN_NAME> with actual values from resolved_env (for Docker env vars)
|
||||
env_pattern = r'<([A-Z_][A-Z0-9_]*)>'
|
||||
|
||||
def replace_env_var(match):
|
||||
env_name = match.group(1)
|
||||
return resolved_env.get(env_name, match.group(0)) # Return original if not found
|
||||
|
||||
processed = re.sub(env_pattern, replace_env_var, processed)
|
||||
|
||||
# Replace {runtime_var} with actual values from runtime_vars
|
||||
runtime_pattern = r'\{([a-zA-Z_][a-zA-Z0-9_]*)\}'
|
||||
|
||||
def replace_runtime_var(match):
|
||||
var_name = match.group(1)
|
||||
return runtime_vars.get(var_name, match.group(0)) # Return original if not found
|
||||
|
||||
processed = re.sub(runtime_pattern, replace_runtime_var, processed)
|
||||
|
||||
return processed
|
||||
|
||||
def _resolve_env_placeholders(self, value, resolved_env):
|
||||
"""Legacy method for backward compatibility. Use _resolve_variable_placeholders instead."""
|
||||
return self._resolve_variable_placeholders(value, resolved_env, {})
|
||||
|
||||
def _ensure_docker_env_flags(self, base_args, env_vars):
|
||||
"""Ensure all environment variables are represented as -e flags in Docker args.
|
||||
|
||||
For Codex TOML format, Docker args should contain -e flags for ALL environment variables
|
||||
that will be available to the container, while actual values go in the [env] section.
|
||||
|
||||
Args:
|
||||
base_args (list): Base Docker arguments from registry.
|
||||
env_vars (dict): All environment variables that should be available.
|
||||
|
||||
Returns:
|
||||
list: Docker arguments with -e flags for all environment variables.
|
||||
"""
|
||||
if not env_vars:
|
||||
return base_args
|
||||
|
||||
result = []
|
||||
existing_env_vars = set()
|
||||
|
||||
# First pass: collect existing -e flags and build result with existing args
|
||||
i = 0
|
||||
while i < len(base_args):
|
||||
arg = base_args[i]
|
||||
result.append(arg)
|
||||
|
||||
# Track existing -e flags
|
||||
if arg == "-e" and i + 1 < len(base_args):
|
||||
env_var_name = base_args[i + 1]
|
||||
existing_env_vars.add(env_var_name)
|
||||
result.append(env_var_name)
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
# Second pass: add -e flags for any environment variables not already present
|
||||
# Insert them after "run" but before the image name (last argument)
|
||||
image_name = result[-1] if result else ""
|
||||
if image_name and not image_name.startswith("-"):
|
||||
# Remove image name temporarily
|
||||
result.pop()
|
||||
|
||||
# Add missing environment variable flags
|
||||
for env_name in sorted(env_vars.keys()):
|
||||
if env_name not in existing_env_vars:
|
||||
result.extend(["-e", env_name])
|
||||
|
||||
# Add image name back
|
||||
result.append(image_name)
|
||||
else:
|
||||
# If we can't identify image name, just append at the end
|
||||
for env_name in sorted(env_vars.keys()):
|
||||
if env_name not in existing_env_vars:
|
||||
result.extend(["-e", env_name])
|
||||
|
||||
return result
|
||||
|
||||
def _inject_docker_env_vars(self, args, env_vars):
|
||||
"""Inject environment variables into Docker arguments as -e flags.
|
||||
|
||||
Args:
|
||||
args (list): Original Docker arguments.
|
||||
env_vars (dict): Environment variables to inject.
|
||||
|
||||
Returns:
|
||||
list: Updated arguments with environment variables injected as -e flags.
|
||||
"""
|
||||
if not env_vars:
|
||||
return args
|
||||
|
||||
result = []
|
||||
existing_env_vars = set()
|
||||
|
||||
# First pass: collect existing -e flags to avoid duplicates
|
||||
i = 0
|
||||
while i < len(args):
|
||||
if args[i] == "-e" and i + 1 < len(args):
|
||||
existing_env_vars.add(args[i + 1])
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
# Second pass: build the result with new env vars injected after "run"
|
||||
for i, arg in enumerate(args):
|
||||
result.append(arg)
|
||||
# If this is a docker run command, inject new environment variables after "run"
|
||||
if arg == "run":
|
||||
for env_name in env_vars.keys():
|
||||
if env_name not in existing_env_vars:
|
||||
result.extend(["-e", env_name])
|
||||
|
||||
return result
|
||||
|
||||
def _select_best_package(self, packages):
|
||||
"""Select the best package for installation from available packages.
|
||||
|
||||
Prioritizes packages in order: npm, docker, pypi, homebrew, others.
|
||||
|
||||
Args:
|
||||
packages (list): List of package dictionaries.
|
||||
|
||||
Returns:
|
||||
dict: Best package to use, or None if no suitable package found.
|
||||
"""
|
||||
priority_order = ["npm", "docker", "pypi", "homebrew"]
|
||||
|
||||
# Sort packages by priority
|
||||
for registry_name in priority_order:
|
||||
for package in packages:
|
||||
if package.get("registry_name") == registry_name:
|
||||
return package
|
||||
|
||||
# If no priority package found, return the first one
|
||||
return packages[0] if packages else None
|
||||
311
src/apm_cli/adapters/client/vscode.py
Normal file
311
src/apm_cli/adapters/client/vscode.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""VSCode implementation of MCP client adapter.
|
||||
|
||||
This adapter implements the VSCode-specific handling of MCP server configuration,
|
||||
following the official documentation at:
|
||||
https://code.visualstudio.com/docs/copilot/chat/mcp-servers
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from .base import MCPClientAdapter
|
||||
from ...registry.client import SimpleRegistryClient
|
||||
from ...registry.integration import RegistryIntegration
|
||||
|
||||
|
||||
class VSCodeClientAdapter(MCPClientAdapter):
|
||||
"""VSCode implementation of MCP client adapter.
|
||||
|
||||
This adapter handles VSCode-specific configuration for MCP servers using
|
||||
a repository-level .vscode/mcp.json file, following the format specified
|
||||
in the VSCode documentation.
|
||||
"""
|
||||
|
||||
def __init__(self, registry_url=None):
|
||||
"""Initialize the VSCode client adapter.
|
||||
|
||||
Args:
|
||||
registry_url (str, optional): URL of the MCP registry.
|
||||
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||
or falls back to the default demo registry.
|
||||
"""
|
||||
self.registry_client = SimpleRegistryClient(registry_url)
|
||||
self.registry_integration = RegistryIntegration(registry_url)
|
||||
|
||||
def get_config_path(self):
|
||||
"""Get the path to the VSCode MCP configuration file in the repository.
|
||||
|
||||
Returns:
|
||||
str: Path to the .vscode/mcp.json file.
|
||||
"""
|
||||
# Use the current working directory as the repository root
|
||||
repo_root = Path(os.getcwd())
|
||||
|
||||
# Path to .vscode/mcp.json in the repository
|
||||
vscode_dir = repo_root / ".vscode"
|
||||
mcp_config_path = vscode_dir / "mcp.json"
|
||||
|
||||
# Create the .vscode directory if it doesn't exist
|
||||
try:
|
||||
if not vscode_dir.exists():
|
||||
vscode_dir.mkdir(parents=True, exist_ok=True)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not create .vscode directory: {e}")
|
||||
|
||||
return str(mcp_config_path)
|
||||
|
||||
def update_config(self, new_config):
|
||||
"""Update the VSCode MCP configuration with new values.
|
||||
|
||||
Args:
|
||||
new_config (dict): Complete configuration object to write.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
config_path = self.get_config_path()
|
||||
|
||||
try:
|
||||
# Write the updated config
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(new_config, f, indent=2)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error updating VSCode MCP configuration: {e}")
|
||||
return False
|
||||
|
||||
def get_current_config(self):
|
||||
"""Get the current VSCode MCP configuration.
|
||||
|
||||
Returns:
|
||||
dict: Current VSCode MCP configuration from the local .vscode/mcp.json file.
|
||||
"""
|
||||
config_path = self.get_config_path()
|
||||
|
||||
try:
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {}
|
||||
except Exception as e:
|
||||
print(f"Error reading VSCode MCP configuration: {e}")
|
||||
return {}
|
||||
|
||||
def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None):
|
||||
"""Configure an MCP server in VS Code mcp.json file.
|
||||
|
||||
This method updates the .vscode/mcp.json file to add or update
|
||||
an MCP server configuration.
|
||||
|
||||
Args:
|
||||
server_url (str): URL or identifier of the MCP server.
|
||||
server_name (str, optional): Name of the server. Defaults to None.
|
||||
enabled (bool, optional): Whether to enable the server. Defaults to True.
|
||||
env_overrides (dict, optional): Environment variable overrides. Defaults to None.
|
||||
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
|
||||
Raises:
|
||||
ValueError: If server is not found in registry.
|
||||
"""
|
||||
if not server_url:
|
||||
print("Error: server_url cannot be empty")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Use cached server info if available, otherwise fetch from registry
|
||||
if server_info_cache and server_url in server_info_cache:
|
||||
server_info = server_info_cache[server_url]
|
||||
else:
|
||||
# Fallback to registry lookup if not cached
|
||||
server_info = self.registry_client.find_server_by_reference(server_url)
|
||||
|
||||
# Fail if server is not found in registry - security requirement
|
||||
# This raises ValueError as expected by tests
|
||||
if not server_info:
|
||||
raise ValueError(f"Failed to retrieve server details for '{server_url}'. Server not found in registry.")
|
||||
|
||||
# Generate server configuration
|
||||
server_config, input_vars = self._format_server_config(server_info)
|
||||
|
||||
if not server_config:
|
||||
print(f"Unable to configure server: {server_url}")
|
||||
return False
|
||||
|
||||
# Use provided server name or fallback to server_url
|
||||
config_key = server_name or server_url
|
||||
|
||||
# Get current config
|
||||
current_config = self.get_current_config()
|
||||
|
||||
# Ensure servers and inputs sections exist
|
||||
if "servers" not in current_config:
|
||||
current_config["servers"] = {}
|
||||
if "inputs" not in current_config:
|
||||
current_config["inputs"] = []
|
||||
|
||||
# Add the server configuration
|
||||
current_config["servers"][config_key] = server_config
|
||||
|
||||
# Add input variables (avoiding duplicates)
|
||||
existing_input_ids = {var.get("id") for var in current_config["inputs"] if isinstance(var, dict)}
|
||||
for var in input_vars:
|
||||
if var.get("id") not in existing_input_ids:
|
||||
current_config["inputs"].append(var)
|
||||
existing_input_ids.add(var.get("id"))
|
||||
|
||||
# Update the configuration
|
||||
result = self.update_config(current_config)
|
||||
|
||||
if result:
|
||||
print(f"Successfully configured MCP server '{config_key}' for VS Code")
|
||||
return result
|
||||
|
||||
except ValueError:
|
||||
# Re-raise ValueError for registry errors
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"Error configuring MCP server: {e}")
|
||||
return False
|
||||
|
||||
def _format_server_config(self, server_info):
|
||||
"""Format server details into VSCode mcp.json compatible format.
|
||||
|
||||
Args:
|
||||
server_info (dict): Server information from registry.
|
||||
|
||||
Returns:
|
||||
tuple: (server_config, input_vars) where:
|
||||
- server_config is the formatted server configuration for mcp.json
|
||||
- input_vars is a list of input variable definitions
|
||||
"""
|
||||
# Initialize the base config structure
|
||||
server_config = {}
|
||||
input_vars = []
|
||||
|
||||
# Check for packages information
|
||||
if "packages" in server_info and server_info["packages"]:
|
||||
package = server_info["packages"][0]
|
||||
runtime_hint = package.get("runtime_hint", "")
|
||||
|
||||
# Handle npm packages
|
||||
if runtime_hint == "npx" or "npm" in package.get("registry_name", "").lower():
|
||||
# Get args directly from runtime_arguments
|
||||
args = []
|
||||
if "runtime_arguments" in package and package["runtime_arguments"]:
|
||||
for arg in package["runtime_arguments"]:
|
||||
if arg.get("is_required", False) and arg.get("value_hint"):
|
||||
args.append(arg.get("value_hint"))
|
||||
|
||||
# Fallback if no runtime_arguments are provided
|
||||
if not args and package.get("name"):
|
||||
args = [package.get("name")]
|
||||
|
||||
server_config = {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": args
|
||||
}
|
||||
|
||||
# Handle docker packages
|
||||
elif runtime_hint == "docker":
|
||||
# Get args directly from runtime_arguments
|
||||
args = []
|
||||
if "runtime_arguments" in package and package["runtime_arguments"]:
|
||||
for arg in package["runtime_arguments"]:
|
||||
if arg.get("is_required", False) and arg.get("value_hint"):
|
||||
args.append(arg.get("value_hint"))
|
||||
|
||||
# Fallback if no runtime_arguments are provided - use standard docker run command
|
||||
if not args:
|
||||
args = ["run", "-i", "--rm", package.get("name")]
|
||||
|
||||
server_config = {
|
||||
"type": "stdio",
|
||||
"command": "docker",
|
||||
"args": args
|
||||
}
|
||||
|
||||
# Handle Python packages
|
||||
elif runtime_hint in ["uvx", "pip", "python"] or "python" in runtime_hint or package.get("registry_name", "").lower() == "pypi":
|
||||
# Determine the command based on runtime_hint
|
||||
if runtime_hint == "uvx":
|
||||
command = "uvx"
|
||||
elif "python" in runtime_hint:
|
||||
# Use the specified Python path if it's a full path, otherwise default to python3
|
||||
command = "python3" if runtime_hint in ["python", "pip"] else runtime_hint
|
||||
else:
|
||||
command = "python3"
|
||||
|
||||
# Get args directly from runtime_arguments
|
||||
args = []
|
||||
if "runtime_arguments" in package and package["runtime_arguments"]:
|
||||
for arg in package["runtime_arguments"]:
|
||||
if arg.get("is_required", False) and arg.get("value_hint"):
|
||||
args.append(arg.get("value_hint"))
|
||||
|
||||
# Fallback if no runtime_arguments are provided
|
||||
if not args:
|
||||
if runtime_hint == "uvx":
|
||||
module_name = package.get("name", "").replace("mcp-server-", "")
|
||||
args = [f"mcp-server-{module_name}"]
|
||||
else:
|
||||
module_name = package.get("name", "").replace("mcp-server-", "").replace("-", "_")
|
||||
args = ["-m", f"mcp_server_{module_name}"]
|
||||
|
||||
server_config = {
|
||||
"type": "stdio",
|
||||
"command": command,
|
||||
"args": args
|
||||
}
|
||||
|
||||
# Add environment variables if present
|
||||
if "environment_variables" in package and package["environment_variables"]:
|
||||
server_config["env"] = {}
|
||||
for env_var in package["environment_variables"]:
|
||||
if "name" in env_var:
|
||||
# Convert variable name to lowercase and replace underscores with hyphens for VS Code convention
|
||||
input_var_name = env_var["name"].lower().replace("_", "-")
|
||||
|
||||
# Create the input variable reference
|
||||
server_config["env"][env_var["name"]] = f"${{input:{input_var_name}}}"
|
||||
|
||||
# Create the input variable definition
|
||||
input_var_def = {
|
||||
"type": "promptString",
|
||||
"id": input_var_name,
|
||||
"description": env_var.get("description", f"{env_var['name']} for MCP server"),
|
||||
"password": True # Default to True for security
|
||||
}
|
||||
input_vars.append(input_var_def)
|
||||
|
||||
# If no server config was created from packages, check for other server types
|
||||
if not server_config:
|
||||
# Check for SSE endpoints
|
||||
if "sse_endpoint" in server_info:
|
||||
server_config = {
|
||||
"type": "sse",
|
||||
"url": server_info["sse_endpoint"],
|
||||
"headers": server_info.get("sse_headers", {})
|
||||
}
|
||||
# Check for remotes (similar to Copilot adapter)
|
||||
elif "remotes" in server_info and server_info["remotes"]:
|
||||
remotes = server_info["remotes"]
|
||||
remote = remotes[0] # Take the first remote
|
||||
if remote.get("transport_type") == "sse":
|
||||
server_config = {
|
||||
"type": "sse",
|
||||
"url": remote.get("url", ""),
|
||||
"headers": remote.get("headers", {})
|
||||
}
|
||||
# If no packages AND no endpoints/remotes, fail with clear error
|
||||
else:
|
||||
raise ValueError(f"MCP server has incomplete configuration in registry - no package information or remote endpoints available. "
|
||||
f"This appears to be a temporary registry issue. "
|
||||
f"Server: {server_info.get('name', 'unknown')}")
|
||||
|
||||
return server_config, input_vars
|
||||
1
src/apm_cli/adapters/package_manager/__init__.py
Normal file
1
src/apm_cli/adapters/package_manager/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Package manager adapters package."""
|
||||
27
src/apm_cli/adapters/package_manager/base.py
Normal file
27
src/apm_cli/adapters/package_manager/base.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""Base adapter interface for MCP package managers."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class MCPPackageManagerAdapter(ABC):
|
||||
"""Base adapter for MCP package managers."""
|
||||
|
||||
@abstractmethod
|
||||
def install(self, package_name, version=None):
|
||||
"""Install an MCP package."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def uninstall(self, package_name):
|
||||
"""Uninstall an MCP package."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_installed(self):
|
||||
"""List all installed MCP packages."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(self, query):
|
||||
"""Search for MCP packages."""
|
||||
pass
|
||||
123
src/apm_cli/adapters/package_manager/default_manager.py
Normal file
123
src/apm_cli/adapters/package_manager/default_manager.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Implementation of the default MCP package manager."""
|
||||
|
||||
from .base import MCPPackageManagerAdapter
|
||||
from ...config import get_default_client
|
||||
from ...registry.integration import RegistryIntegration
|
||||
|
||||
|
||||
class DefaultMCPPackageManager(MCPPackageManagerAdapter):
|
||||
"""Implementation of the default MCP package manager."""
|
||||
|
||||
def install(self, package_name, version=None):
|
||||
"""Install an MCP package.
|
||||
|
||||
Args:
|
||||
package_name (str): Name of the package to install.
|
||||
version (str, optional): Version of the package to install.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Import here to avoid circular import
|
||||
from ...factory import ClientFactory
|
||||
|
||||
client_type = get_default_client()
|
||||
client_adapter = ClientFactory.create_client(client_type)
|
||||
|
||||
# For VSCode, configure MCP server in mcp.json
|
||||
result = client_adapter.configure_mcp_server(package_name, package_name, True)
|
||||
|
||||
if result:
|
||||
print(f"Successfully installed {package_name}")
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Error installing package {package_name}: {e}")
|
||||
return False
|
||||
|
||||
def uninstall(self, package_name):
|
||||
"""Uninstall an MCP package.
|
||||
|
||||
Args:
|
||||
package_name (str): Name of the package to uninstall.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Import here to avoid circular import
|
||||
from ...factory import ClientFactory
|
||||
|
||||
client_type = get_default_client()
|
||||
client_adapter = ClientFactory.create_client(client_type)
|
||||
config = client_adapter.get_current_config()
|
||||
|
||||
# For VSCode, remove the server from mcp.json
|
||||
if "servers" in config and package_name in config["servers"]:
|
||||
servers = config["servers"]
|
||||
servers.pop(package_name, None)
|
||||
result = client_adapter.update_config({"servers": servers})
|
||||
|
||||
if result:
|
||||
print(f"Successfully uninstalled {package_name}")
|
||||
return result
|
||||
else:
|
||||
print(f"Package {package_name} not found in configuration")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error uninstalling package {package_name}: {e}")
|
||||
return False
|
||||
|
||||
def list_installed(self):
|
||||
"""List all installed MCP packages.
|
||||
|
||||
Returns:
|
||||
list: List of installed packages.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Import here to avoid circular import
|
||||
from ...factory import ClientFactory
|
||||
|
||||
# Get client type from configuration (default is vscode)
|
||||
client_type = get_default_client()
|
||||
|
||||
# Create client adapter
|
||||
client_adapter = ClientFactory.create_client(client_type)
|
||||
|
||||
# Get config from local .vscode/mcp.json file
|
||||
config = client_adapter.get_current_config()
|
||||
|
||||
# Extract server names from the config
|
||||
servers = config.get("servers", {})
|
||||
|
||||
# Return the list of server names
|
||||
return list(servers.keys())
|
||||
except Exception as e:
|
||||
print(f"Error retrieving installed MCP servers: {e}")
|
||||
return []
|
||||
|
||||
def search(self, query):
|
||||
"""Search for MCP packages.
|
||||
|
||||
Args:
|
||||
query (str): Search query.
|
||||
|
||||
Returns:
|
||||
list: List of packages matching the query.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Use the registry integration to search for packages
|
||||
registry = RegistryIntegration()
|
||||
packages = registry.search_packages(query)
|
||||
|
||||
# Return the list of package IDs/names
|
||||
return [pkg.get("id", pkg.get("name", "Unknown")) for pkg in packages] if packages else []
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error searching for packages: {e}")
|
||||
return []
|
||||
2555
src/apm_cli/cli.py
Normal file
2555
src/apm_cli/cli.py
Normal file
File diff suppressed because it is too large
Load Diff
5
src/apm_cli/commands/__init__.py
Normal file
5
src/apm_cli/commands/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Commands package for APM CLI."""
|
||||
|
||||
from .deps import deps
|
||||
|
||||
__all__ = ['deps']
|
||||
656
src/apm_cli/commands/deps.py
Normal file
656
src/apm_cli/commands/deps.py
Normal file
@@ -0,0 +1,656 @@
|
||||
"""APM dependency management commands."""
|
||||
|
||||
import sys
|
||||
import shutil
|
||||
import click
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Dict, Any
|
||||
|
||||
# Import existing APM components
|
||||
from ..models.apm_package import APMPackage, ValidationResult, validate_apm_package
|
||||
from ..utils.console import _rich_success, _rich_error, _rich_info, _rich_warning
|
||||
|
||||
# Import APM dependency system components (with fallback)
|
||||
from ..deps.github_downloader import GitHubPackageDownloader
|
||||
from ..deps.apm_resolver import APMDependencyResolver
|
||||
|
||||
|
||||
|
||||
@click.group(help="🔗 Manage APM package dependencies")
|
||||
def deps():
|
||||
"""APM dependency management commands."""
|
||||
pass
|
||||
|
||||
|
||||
@deps.command(name="list", help="📋 List installed APM dependencies")
|
||||
def list_packages():
|
||||
"""Show all installed APM dependencies with context files and agent workflows."""
|
||||
try:
|
||||
# Import Rich components with fallback
|
||||
from rich.table import Table
|
||||
from rich.console import Console
|
||||
console = Console()
|
||||
has_rich = True
|
||||
except ImportError:
|
||||
has_rich = False
|
||||
console = None
|
||||
|
||||
try:
|
||||
project_root = Path(".")
|
||||
apm_modules_path = project_root / "apm_modules"
|
||||
|
||||
# Check if apm_modules exists
|
||||
if not apm_modules_path.exists():
|
||||
if has_rich:
|
||||
console.print("💡 No APM dependencies installed yet", style="cyan")
|
||||
console.print("Run 'specify apm install' to install dependencies from apm.yml", style="dim")
|
||||
else:
|
||||
click.echo("💡 No APM dependencies installed yet")
|
||||
click.echo("Run 'specify apm install' to install dependencies from apm.yml")
|
||||
return
|
||||
|
||||
# Load project dependencies to check for orphaned packages
|
||||
declared_deps = set()
|
||||
try:
|
||||
apm_yml_path = project_root / "apm.yml"
|
||||
if apm_yml_path.exists():
|
||||
project_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
for dep in project_package.get_apm_dependencies():
|
||||
declared_deps.add(dep.repo_url)
|
||||
except Exception:
|
||||
pass # Continue without orphan detection if apm.yml parsing fails
|
||||
|
||||
# Scan for installed packages in org-namespaced structure
|
||||
installed_packages = []
|
||||
orphaned_packages = []
|
||||
for org_dir in apm_modules_path.iterdir():
|
||||
if org_dir.is_dir() and not org_dir.name.startswith('.'):
|
||||
for package_dir in org_dir.iterdir():
|
||||
if package_dir.is_dir() and not package_dir.name.startswith('.'):
|
||||
try:
|
||||
# org/repo format
|
||||
org_repo_name = f"{org_dir.name}/{package_dir.name}"
|
||||
|
||||
# Try to load package metadata
|
||||
apm_yml_path = package_dir / "apm.yml"
|
||||
if apm_yml_path.exists():
|
||||
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
# Count context files and workflows separately
|
||||
context_count, workflow_count = _count_package_files(package_dir)
|
||||
|
||||
# Check if this package is orphaned
|
||||
is_orphaned = org_repo_name not in declared_deps
|
||||
if is_orphaned:
|
||||
orphaned_packages.append(org_repo_name)
|
||||
|
||||
installed_packages.append({
|
||||
'name': org_repo_name,
|
||||
'version': package.version or 'unknown',
|
||||
'source': 'orphaned' if is_orphaned else 'github',
|
||||
'context': context_count,
|
||||
'workflows': workflow_count,
|
||||
'path': str(package_dir),
|
||||
'is_orphaned': is_orphaned
|
||||
})
|
||||
else:
|
||||
# Package without apm.yml - show basic info
|
||||
context_count, workflow_count = _count_package_files(package_dir)
|
||||
is_orphaned = True # Assume orphaned if no apm.yml
|
||||
orphaned_packages.append(org_repo_name)
|
||||
|
||||
installed_packages.append({
|
||||
'name': org_repo_name,
|
||||
'version': 'unknown',
|
||||
'source': 'orphaned',
|
||||
'context': context_count,
|
||||
'workflows': workflow_count,
|
||||
'path': str(package_dir),
|
||||
'is_orphaned': is_orphaned
|
||||
})
|
||||
except Exception as e:
|
||||
click.echo(f"⚠️ Warning: Failed to read package {org_dir.name}/{package_dir.name}: {e}")
|
||||
|
||||
if not installed_packages:
|
||||
if has_rich:
|
||||
console.print("💡 apm_modules/ directory exists but contains no valid packages", style="cyan")
|
||||
else:
|
||||
click.echo("💡 apm_modules/ directory exists but contains no valid packages")
|
||||
return
|
||||
|
||||
# Display packages in table format
|
||||
if has_rich:
|
||||
table = Table(title="📋 APM Dependencies", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Package", style="bold white")
|
||||
table.add_column("Version", style="yellow")
|
||||
table.add_column("Source", style="blue")
|
||||
table.add_column("Context", style="green")
|
||||
table.add_column("Workflows", style="magenta")
|
||||
|
||||
for pkg in installed_packages:
|
||||
table.add_row(
|
||||
pkg['name'],
|
||||
pkg['version'],
|
||||
pkg['source'],
|
||||
f"{pkg['context']} files",
|
||||
f"{pkg['workflows']} workflows"
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
# Show orphaned packages warning
|
||||
if orphaned_packages:
|
||||
console.print(f"\n⚠️ {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):", style="yellow")
|
||||
for pkg in orphaned_packages:
|
||||
console.print(f" • {pkg}", style="dim yellow")
|
||||
console.print("\n💡 Run 'specify apm prune' to remove orphaned packages", style="cyan")
|
||||
else:
|
||||
# Fallback text table
|
||||
click.echo("📋 APM Dependencies:")
|
||||
click.echo("┌─────────────────────┬─────────┬──────────────┬─────────────┬─────────────┐")
|
||||
click.echo("│ Package │ Version │ Source │ Context │ Workflows │")
|
||||
click.echo("├─────────────────────┼─────────┼──────────────┼─────────────┼─────────────┤")
|
||||
|
||||
for pkg in installed_packages:
|
||||
name = pkg['name'][:19].ljust(19)
|
||||
version = pkg['version'][:7].ljust(7)
|
||||
source = pkg['source'][:12].ljust(12)
|
||||
context = f"{pkg['context']} files".ljust(11)
|
||||
workflows = f"{pkg['workflows']} wf".ljust(11)
|
||||
click.echo(f"│ {name} │ {version} │ {source} │ {context} │ {workflows} │")
|
||||
|
||||
click.echo("└─────────────────────┴─────────┴──────────────┴─────────────┴─────────────┘")
|
||||
|
||||
# Show orphaned packages warning
|
||||
if orphaned_packages:
|
||||
click.echo(f"\n⚠️ {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):")
|
||||
for pkg in orphaned_packages:
|
||||
click.echo(f" • {pkg}")
|
||||
click.echo("\n💡 Run 'specify apm prune' to remove orphaned packages")
|
||||
|
||||
except Exception as e:
|
||||
_rich_error(f"Error listing dependencies: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@deps.command(help="🌳 Show dependency tree structure")
|
||||
def tree():
|
||||
"""Display dependencies in hierarchical tree format showing context and workflows."""
|
||||
try:
|
||||
# Import Rich components with fallback
|
||||
from rich.tree import Tree
|
||||
from rich.console import Console
|
||||
console = Console()
|
||||
has_rich = True
|
||||
except ImportError:
|
||||
has_rich = False
|
||||
console = None
|
||||
|
||||
try:
|
||||
project_root = Path(".")
|
||||
apm_modules_path = project_root / "apm_modules"
|
||||
|
||||
# Load project info
|
||||
project_name = "my-project"
|
||||
try:
|
||||
apm_yml_path = project_root / "apm.yml"
|
||||
if apm_yml_path.exists():
|
||||
root_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
project_name = root_package.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if has_rich:
|
||||
# Create Rich tree
|
||||
root_tree = Tree(f"[bold cyan]{project_name}[/bold cyan] (local)")
|
||||
|
||||
# Check if apm_modules exists
|
||||
if not apm_modules_path.exists():
|
||||
root_tree.add("[dim]No dependencies installed[/dim]")
|
||||
else:
|
||||
# Add each dependency as a branch
|
||||
for package_dir in apm_modules_path.iterdir():
|
||||
if package_dir.is_dir():
|
||||
try:
|
||||
package_info = _get_package_display_info(package_dir)
|
||||
branch = root_tree.add(f"[green]{package_info['display_name']}[/green]")
|
||||
|
||||
# Add context files and workflows as sub-items
|
||||
context_files = _get_detailed_context_counts(package_dir)
|
||||
workflow_count = _count_workflows(package_dir)
|
||||
|
||||
# Show context files by type
|
||||
for context_type, count in context_files.items():
|
||||
if count > 0:
|
||||
branch.add(f"[dim]{count} {context_type}[/dim]")
|
||||
|
||||
# Show workflows
|
||||
if workflow_count > 0:
|
||||
branch.add(f"[bold magenta]{workflow_count} agent workflows[/bold magenta]")
|
||||
|
||||
if not any(count > 0 for count in context_files.values()) and workflow_count == 0:
|
||||
branch.add("[dim]no context or workflows[/dim]")
|
||||
|
||||
except Exception as e:
|
||||
branch = root_tree.add(f"[red]{package_dir.name}[/red] [dim](error loading)[/dim]")
|
||||
|
||||
console.print(root_tree)
|
||||
|
||||
else:
|
||||
# Fallback text tree
|
||||
click.echo(f"{project_name} (local)")
|
||||
|
||||
if not apm_modules_path.exists():
|
||||
click.echo("└── No dependencies installed")
|
||||
return
|
||||
|
||||
package_dirs = [d for d in apm_modules_path.iterdir() if d.is_dir()]
|
||||
|
||||
for i, package_dir in enumerate(package_dirs):
|
||||
is_last = i == len(package_dirs) - 1
|
||||
prefix = "└── " if is_last else "├── "
|
||||
|
||||
try:
|
||||
package_info = _get_package_display_info(package_dir)
|
||||
click.echo(f"{prefix}{package_info['display_name']}")
|
||||
|
||||
# Add context files and workflows
|
||||
context_files = _get_detailed_context_counts(package_dir)
|
||||
workflow_count = _count_workflows(package_dir)
|
||||
sub_prefix = " " if is_last else "│ "
|
||||
|
||||
items_shown = False
|
||||
for context_type, count in context_files.items():
|
||||
if count > 0:
|
||||
click.echo(f"{sub_prefix}├── {count} {context_type}")
|
||||
items_shown = True
|
||||
|
||||
if workflow_count > 0:
|
||||
click.echo(f"{sub_prefix}├── {workflow_count} agent workflows")
|
||||
items_shown = True
|
||||
|
||||
if not items_shown:
|
||||
click.echo(f"{sub_prefix}└── no context or workflows")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"{prefix}{package_dir.name} (error loading)")
|
||||
|
||||
except Exception as e:
|
||||
_rich_error(f"Error showing dependency tree: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@deps.command(help="🧹 Remove all APM dependencies")
|
||||
def clean():
|
||||
"""Remove entire apm_modules/ directory."""
|
||||
project_root = Path(".")
|
||||
apm_modules_path = project_root / "apm_modules"
|
||||
|
||||
if not apm_modules_path.exists():
|
||||
_rich_info("No apm_modules/ directory found - already clean")
|
||||
return
|
||||
|
||||
# Show what will be removed
|
||||
package_count = len([d for d in apm_modules_path.iterdir() if d.is_dir()])
|
||||
|
||||
_rich_warning(f"This will remove the entire apm_modules/ directory ({package_count} packages)")
|
||||
|
||||
# Confirmation prompt
|
||||
try:
|
||||
from rich.prompt import Confirm
|
||||
confirm = Confirm.ask("Continue?")
|
||||
except ImportError:
|
||||
confirm = click.confirm("Continue?")
|
||||
|
||||
if not confirm:
|
||||
_rich_info("Operation cancelled")
|
||||
return
|
||||
|
||||
try:
|
||||
shutil.rmtree(apm_modules_path)
|
||||
_rich_success("Successfully removed apm_modules/ directory")
|
||||
except Exception as e:
|
||||
_rich_error(f"Error removing apm_modules/: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@deps.command(help="🔄 Update APM dependencies")
|
||||
@click.argument('package', required=False)
|
||||
def update(package: Optional[str]):
|
||||
"""Update specific package or all if no package specified."""
|
||||
|
||||
project_root = Path(".")
|
||||
apm_modules_path = project_root / "apm_modules"
|
||||
|
||||
if not apm_modules_path.exists():
|
||||
_rich_info("No apm_modules/ directory found - no packages to update")
|
||||
return
|
||||
|
||||
# Get project dependencies to validate updates
|
||||
try:
|
||||
apm_yml_path = project_root / "apm.yml"
|
||||
if not apm_yml_path.exists():
|
||||
_rich_error("No apm.yml found in current directory")
|
||||
return
|
||||
|
||||
project_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
project_deps = project_package.get_apm_dependencies()
|
||||
|
||||
if not project_deps:
|
||||
_rich_info("No APM dependencies defined in apm.yml")
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
_rich_error(f"Error reading apm.yml: {e}")
|
||||
return
|
||||
|
||||
if package:
|
||||
# Update specific package
|
||||
_update_single_package(package, project_deps, apm_modules_path)
|
||||
else:
|
||||
# Update all packages
|
||||
_update_all_packages(project_deps, apm_modules_path)
|
||||
|
||||
|
||||
@deps.command(help="ℹ️ Show detailed package information")
|
||||
@click.argument('package', required=True)
|
||||
def info(package: str):
|
||||
"""Show detailed information about a specific package including context files and workflows."""
|
||||
project_root = Path(".")
|
||||
apm_modules_path = project_root / "apm_modules"
|
||||
|
||||
if not apm_modules_path.exists():
|
||||
_rich_error("No apm_modules/ directory found")
|
||||
_rich_info("Run 'specify apm install' to install dependencies first")
|
||||
sys.exit(1)
|
||||
|
||||
# Find the package directory
|
||||
package_path = None
|
||||
for package_dir in apm_modules_path.iterdir():
|
||||
if package_dir.is_dir() and package_dir.name == package:
|
||||
package_path = package_dir
|
||||
break
|
||||
|
||||
if not package_path:
|
||||
_rich_error(f"Package '{package}' not found in apm_modules/")
|
||||
_rich_info("Available packages:")
|
||||
|
||||
for package_dir in apm_modules_path.iterdir():
|
||||
if package_dir.is_dir():
|
||||
click.echo(f" - {package_dir.name}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
# Load package information
|
||||
package_info = _get_detailed_package_info(package_path)
|
||||
|
||||
# Display with Rich panel if available
|
||||
try:
|
||||
from rich.panel import Panel
|
||||
from rich.console import Console
|
||||
from rich.text import Text
|
||||
console = Console()
|
||||
|
||||
content_lines = []
|
||||
content_lines.append(f"[bold]Name:[/bold] {package_info['name']}")
|
||||
content_lines.append(f"[bold]Version:[/bold] {package_info['version']}")
|
||||
content_lines.append(f"[bold]Description:[/bold] {package_info['description']}")
|
||||
content_lines.append(f"[bold]Author:[/bold] {package_info['author']}")
|
||||
content_lines.append(f"[bold]Source:[/bold] {package_info['source']}")
|
||||
content_lines.append(f"[bold]Install Path:[/bold] {package_info['install_path']}")
|
||||
content_lines.append("")
|
||||
content_lines.append("[bold]Context Files:[/bold]")
|
||||
|
||||
for context_type, count in package_info['context_files'].items():
|
||||
if count > 0:
|
||||
content_lines.append(f" • {count} {context_type}")
|
||||
|
||||
if not any(count > 0 for count in package_info['context_files'].values()):
|
||||
content_lines.append(" • No context files found")
|
||||
|
||||
content_lines.append("")
|
||||
content_lines.append("[bold]Agent Workflows:[/bold]")
|
||||
if package_info['workflows'] > 0:
|
||||
content_lines.append(f" • {package_info['workflows']} executable workflows")
|
||||
else:
|
||||
content_lines.append(" • No agent workflows found")
|
||||
|
||||
content = "\n".join(content_lines)
|
||||
panel = Panel(content, title=f"ℹ️ Package Info: {package}", border_style="cyan")
|
||||
console.print(panel)
|
||||
|
||||
except ImportError:
|
||||
# Fallback text display
|
||||
click.echo(f"ℹ️ Package Info: {package}")
|
||||
click.echo("=" * 40)
|
||||
click.echo(f"Name: {package_info['name']}")
|
||||
click.echo(f"Version: {package_info['version']}")
|
||||
click.echo(f"Description: {package_info['description']}")
|
||||
click.echo(f"Author: {package_info['author']}")
|
||||
click.echo(f"Source: {package_info['source']}")
|
||||
click.echo(f"Install Path: {package_info['install_path']}")
|
||||
click.echo("")
|
||||
click.echo("Context Files:")
|
||||
|
||||
for context_type, count in package_info['context_files'].items():
|
||||
if count > 0:
|
||||
click.echo(f" • {count} {context_type}")
|
||||
|
||||
if not any(count > 0 for count in package_info['context_files'].values()):
|
||||
click.echo(" • No context files found")
|
||||
|
||||
click.echo("")
|
||||
click.echo("Agent Workflows:")
|
||||
if package_info['workflows'] > 0:
|
||||
click.echo(f" • {package_info['workflows']} executable workflows")
|
||||
else:
|
||||
click.echo(" • No agent workflows found")
|
||||
|
||||
except Exception as e:
|
||||
_rich_error(f"Error reading package information: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Helper functions
|
||||
|
||||
def _count_package_files(package_path: Path) -> tuple[int, int]:
|
||||
"""Count context files and workflows in a package.
|
||||
|
||||
Returns:
|
||||
tuple: (context_count, workflow_count)
|
||||
"""
|
||||
apm_dir = package_path / ".apm"
|
||||
if not apm_dir.exists():
|
||||
# Also check root directory for .prompt.md files
|
||||
workflow_count = len(list(package_path.glob("*.prompt.md")))
|
||||
return 0, workflow_count
|
||||
|
||||
context_count = 0
|
||||
context_dirs = ['instructions', 'chatmodes', 'contexts']
|
||||
|
||||
for context_dir in context_dirs:
|
||||
context_path = apm_dir / context_dir
|
||||
if context_path.exists() and context_path.is_dir():
|
||||
context_count += len(list(context_path.glob("*.md")))
|
||||
|
||||
# Count workflows in both .apm/prompts and root directory
|
||||
workflow_count = 0
|
||||
prompts_path = apm_dir / "prompts"
|
||||
if prompts_path.exists() and prompts_path.is_dir():
|
||||
workflow_count += len(list(prompts_path.glob("*.prompt.md")))
|
||||
|
||||
# Also check root directory for .prompt.md files
|
||||
workflow_count += len(list(package_path.glob("*.prompt.md")))
|
||||
|
||||
return context_count, workflow_count
|
||||
|
||||
|
||||
def _count_workflows(package_path: Path) -> int:
|
||||
"""Count agent workflows (.prompt.md files) in a package."""
|
||||
_, workflow_count = _count_package_files(package_path)
|
||||
return workflow_count
|
||||
|
||||
|
||||
def _get_detailed_context_counts(package_path: Path) -> Dict[str, int]:
|
||||
"""Get detailed context file counts by type."""
|
||||
apm_dir = package_path / ".apm"
|
||||
if not apm_dir.exists():
|
||||
return {'instructions': 0, 'chatmodes': 0, 'contexts': 0}
|
||||
|
||||
counts = {}
|
||||
context_types = {
|
||||
'instructions': ['instructions.md'],
|
||||
'chatmodes': ['chatmode.md'],
|
||||
'contexts': ['context.md', 'memory.md']
|
||||
}
|
||||
|
||||
for context_type, extensions in context_types.items():
|
||||
count = 0
|
||||
context_path = apm_dir / context_type
|
||||
if context_path.exists() and context_path.is_dir():
|
||||
for ext in extensions:
|
||||
count += len(list(context_path.glob(f"*.{ext}")))
|
||||
counts[context_type] = count
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
def _get_package_display_info(package_path: Path) -> Dict[str, str]:
|
||||
"""Get package display information."""
|
||||
try:
|
||||
apm_yml_path = package_path / "apm.yml"
|
||||
if apm_yml_path.exists():
|
||||
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
version_info = f"@{package.version}" if package.version else "@unknown"
|
||||
return {
|
||||
'display_name': f"{package.name}{version_info}",
|
||||
'name': package.name,
|
||||
'version': package.version or 'unknown'
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'display_name': f"{package_path.name}@unknown",
|
||||
'name': package_path.name,
|
||||
'version': 'unknown'
|
||||
}
|
||||
except Exception:
|
||||
return {
|
||||
'display_name': f"{package_path.name}@error",
|
||||
'name': package_path.name,
|
||||
'version': 'error'
|
||||
}
|
||||
|
||||
|
||||
def _get_detailed_package_info(package_path: Path) -> Dict[str, Any]:
|
||||
"""Get detailed package information for the info command."""
|
||||
try:
|
||||
apm_yml_path = package_path / "apm.yml"
|
||||
if apm_yml_path.exists():
|
||||
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
context_count, workflow_count = _count_package_files(package_path)
|
||||
return {
|
||||
'name': package.name,
|
||||
'version': package.version or 'unknown',
|
||||
'description': package.description or 'No description',
|
||||
'author': package.author or 'Unknown',
|
||||
'source': package.source or 'local',
|
||||
'install_path': str(package_path.resolve()),
|
||||
'context_files': _get_detailed_context_counts(package_path),
|
||||
'workflows': workflow_count
|
||||
}
|
||||
else:
|
||||
context_count, workflow_count = _count_package_files(package_path)
|
||||
return {
|
||||
'name': package_path.name,
|
||||
'version': 'unknown',
|
||||
'description': 'No apm.yml found',
|
||||
'author': 'Unknown',
|
||||
'source': 'unknown',
|
||||
'install_path': str(package_path.resolve()),
|
||||
'context_files': _get_detailed_context_counts(package_path),
|
||||
'workflows': workflow_count
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'name': package_path.name,
|
||||
'version': 'error',
|
||||
'description': f'Error loading package: {e}',
|
||||
'author': 'Unknown',
|
||||
'source': 'unknown',
|
||||
'install_path': str(package_path.resolve()),
|
||||
'context_files': {'instructions': 0, 'chatmodes': 0, 'contexts': 0},
|
||||
'workflows': 0
|
||||
}
|
||||
|
||||
|
||||
def _update_single_package(package_name: str, project_deps: List, apm_modules_path: Path):
|
||||
"""Update a specific package."""
|
||||
# Find the dependency reference for this package
|
||||
target_dep = None
|
||||
for dep in project_deps:
|
||||
if dep.get_display_name() == package_name or dep.repo_url.split('/')[-1] == package_name:
|
||||
target_dep = dep
|
||||
break
|
||||
|
||||
if not target_dep:
|
||||
_rich_error(f"Package '{package_name}' not found in apm.yml dependencies")
|
||||
return
|
||||
|
||||
# Find the installed package directory
|
||||
package_dir = None
|
||||
if target_dep.alias:
|
||||
package_dir = apm_modules_path / target_dep.alias
|
||||
else:
|
||||
package_dir = apm_modules_path / package_name
|
||||
|
||||
if not package_dir.exists():
|
||||
_rich_error(f"Package '{package_name}' not installed in apm_modules/")
|
||||
_rich_info(f"Run 'apm install' to install it first")
|
||||
return
|
||||
|
||||
try:
|
||||
downloader = GitHubPackageDownloader()
|
||||
_rich_info(f"Updating {target_dep.repo_url}...")
|
||||
|
||||
# Download latest version
|
||||
package_info = downloader.download_package(str(target_dep), package_dir)
|
||||
|
||||
_rich_success(f"✅ Updated {target_dep.repo_url}")
|
||||
|
||||
except Exception as e:
|
||||
_rich_error(f"Failed to update {package_name}: {e}")
|
||||
|
||||
|
||||
def _update_all_packages(project_deps: List, apm_modules_path: Path):
|
||||
"""Update all packages."""
|
||||
if not project_deps:
|
||||
_rich_info("No APM dependencies to update")
|
||||
return
|
||||
|
||||
_rich_info(f"Updating {len(project_deps)} APM dependencies...")
|
||||
|
||||
downloader = GitHubPackageDownloader()
|
||||
updated_count = 0
|
||||
|
||||
for dep in project_deps:
|
||||
# Determine package directory
|
||||
if dep.alias:
|
||||
package_dir = apm_modules_path / dep.alias
|
||||
else:
|
||||
package_dir = apm_modules_path / dep.repo_url.split('/')[-1]
|
||||
|
||||
if not package_dir.exists():
|
||||
_rich_warning(f"⚠️ {dep.repo_url} not installed - skipping")
|
||||
continue
|
||||
|
||||
try:
|
||||
_rich_info(f" Updating {dep.repo_url}...")
|
||||
package_info = downloader.download_package(str(dep), package_dir)
|
||||
updated_count += 1
|
||||
_rich_success(f" ✅ {dep.repo_url}")
|
||||
|
||||
except Exception as e:
|
||||
_rich_error(f" ❌ Failed to update {dep.repo_url}: {e}")
|
||||
continue
|
||||
|
||||
_rich_success(f"Updated {updated_count} of {len(project_deps)} packages")
|
||||
|
||||
29
src/apm_cli/compilation/__init__.py
Normal file
29
src/apm_cli/compilation/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""APM compilation module for generating AGENTS.md files."""
|
||||
|
||||
from .agents_compiler import AgentsCompiler, compile_agents_md, CompilationConfig, CompilationResult
|
||||
from .template_builder import (
|
||||
build_conditional_sections,
|
||||
TemplateData,
|
||||
find_chatmode_by_name
|
||||
)
|
||||
from .link_resolver import (
|
||||
resolve_markdown_links,
|
||||
validate_link_targets
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Main compilation interface
|
||||
'AgentsCompiler',
|
||||
'compile_agents_md',
|
||||
'CompilationConfig',
|
||||
'CompilationResult',
|
||||
|
||||
# Template building
|
||||
'build_conditional_sections',
|
||||
'TemplateData',
|
||||
'find_chatmode_by_name',
|
||||
|
||||
# Link resolution
|
||||
'resolve_markdown_links',
|
||||
'validate_link_targets'
|
||||
]
|
||||
630
src/apm_cli/compilation/agents_compiler.py
Normal file
630
src/apm_cli/compilation/agents_compiler.py
Normal file
@@ -0,0 +1,630 @@
|
||||
"""Main compilation orchestration for AGENTS.md generation.
|
||||
|
||||
Timestamp generation removed in favor of deterministic Build ID handled after
|
||||
full content assembly. This keeps repeated compiles byte-identical when source
|
||||
primitives & constitution are unchanged.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Dict, Any
|
||||
from ..primitives.models import PrimitiveCollection
|
||||
from ..primitives.discovery import discover_primitives
|
||||
from ..version import get_version
|
||||
from .template_builder import (
|
||||
build_conditional_sections,
|
||||
generate_agents_md_template,
|
||||
TemplateData,
|
||||
find_chatmode_by_name
|
||||
)
|
||||
from .link_resolver import resolve_markdown_links, validate_link_targets
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilationConfig:
|
||||
"""Configuration for AGENTS.md compilation."""
|
||||
output_path: str = "AGENTS.md"
|
||||
chatmode: Optional[str] = None
|
||||
resolve_links: bool = True
|
||||
dry_run: bool = False
|
||||
with_constitution: bool = True # Phase 0 feature flag
|
||||
|
||||
# Distributed compilation settings (Task 7)
|
||||
strategy: str = "distributed" # "distributed" or "single-file"
|
||||
single_agents: bool = False # Force single-file mode
|
||||
trace: bool = False # Show source attribution and conflicts
|
||||
local_only: bool = False # Ignore dependencies, compile only local primitives
|
||||
debug: bool = False # Show context optimizer analysis and metrics
|
||||
min_instructions_per_file: int = 1 # Minimum instructions per AGENTS.md file (Minimal Context Principle)
|
||||
source_attribution: bool = True # Include source file comments
|
||||
clean_orphaned: bool = False # Remove orphaned AGENTS.md files
|
||||
|
||||
def __post_init__(self):
|
||||
"""Handle CLI flag precedence after initialization."""
|
||||
if self.single_agents:
|
||||
self.strategy = "single-file"
|
||||
|
||||
@classmethod
|
||||
def from_apm_yml(cls, **overrides) -> 'CompilationConfig':
|
||||
"""Create configuration from apm.yml with command-line overrides.
|
||||
|
||||
Args:
|
||||
**overrides: Command-line arguments that override config file values.
|
||||
|
||||
Returns:
|
||||
CompilationConfig: Configuration with apm.yml values and overrides applied.
|
||||
"""
|
||||
config = cls()
|
||||
|
||||
# Try to load from apm.yml
|
||||
try:
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
|
||||
if Path('apm.yml').exists():
|
||||
with open('apm.yml', 'r') as f:
|
||||
apm_config = yaml.safe_load(f) or {}
|
||||
|
||||
# Look for compilation section
|
||||
compilation_config = apm_config.get('compilation', {})
|
||||
|
||||
# Apply config file values
|
||||
if 'output' in compilation_config:
|
||||
config.output_path = compilation_config['output']
|
||||
if 'chatmode' in compilation_config:
|
||||
config.chatmode = compilation_config['chatmode']
|
||||
if 'resolve_links' in compilation_config:
|
||||
config.resolve_links = compilation_config['resolve_links']
|
||||
|
||||
# Distributed compilation settings (Task 7)
|
||||
if 'strategy' in compilation_config:
|
||||
config.strategy = compilation_config['strategy']
|
||||
if 'single_file' in compilation_config:
|
||||
# Legacy config support - if single_file is True, override strategy
|
||||
if compilation_config['single_file']:
|
||||
config.strategy = "single-file"
|
||||
config.single_agents = True
|
||||
|
||||
# Placement settings
|
||||
placement_config = compilation_config.get('placement', {})
|
||||
if 'min_instructions_per_file' in placement_config:
|
||||
config.min_instructions_per_file = placement_config['min_instructions_per_file']
|
||||
|
||||
# Source attribution
|
||||
if 'source_attribution' in compilation_config:
|
||||
config.source_attribution = compilation_config['source_attribution']
|
||||
|
||||
except Exception:
|
||||
# If config loading fails, use defaults
|
||||
pass
|
||||
|
||||
# Apply command-line overrides (highest priority)
|
||||
for key, value in overrides.items():
|
||||
if value is not None: # Only override if explicitly provided
|
||||
setattr(config, key, value)
|
||||
|
||||
# Handle CLI flag precedence
|
||||
if config.single_agents:
|
||||
config.strategy = "single-file"
|
||||
|
||||
return config
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilationResult:
|
||||
"""Result of AGENTS.md compilation."""
|
||||
success: bool
|
||||
output_path: str
|
||||
content: str
|
||||
warnings: List[str]
|
||||
errors: List[str]
|
||||
stats: Dict[str, Any]
|
||||
|
||||
|
||||
class AgentsCompiler:
|
||||
"""Main compiler for generating AGENTS.md files."""
|
||||
|
||||
def __init__(self, base_dir: str = "."):
|
||||
"""Initialize the compiler.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory for compilation. Defaults to current directory.
|
||||
"""
|
||||
self.base_dir = Path(base_dir)
|
||||
self.warnings: List[str] = []
|
||||
self.errors: List[str] = []
|
||||
|
||||
def compile(self, config: CompilationConfig, primitives: Optional[PrimitiveCollection] = None) -> CompilationResult:
|
||||
"""Compile AGENTS.md with the given configuration.
|
||||
|
||||
Args:
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
primitives (Optional[PrimitiveCollection]): Primitives to use, or None to discover.
|
||||
|
||||
Returns:
|
||||
CompilationResult: Result of the compilation.
|
||||
"""
|
||||
self.warnings.clear()
|
||||
self.errors.clear()
|
||||
|
||||
try:
|
||||
# Use provided primitives or discover them (with dependency support)
|
||||
if primitives is None:
|
||||
if config.local_only:
|
||||
# Use basic discovery for local-only mode
|
||||
primitives = discover_primitives(str(self.base_dir))
|
||||
else:
|
||||
# Use enhanced discovery with dependencies (Task 4 integration)
|
||||
from ..primitives.discovery import discover_primitives_with_dependencies
|
||||
primitives = discover_primitives_with_dependencies(str(self.base_dir))
|
||||
|
||||
# Handle distributed compilation (Task 7 - new default behavior)
|
||||
if config.strategy == "distributed" and not config.single_agents:
|
||||
return self._compile_distributed(config, primitives)
|
||||
else:
|
||||
# Traditional single-file compilation (backward compatibility)
|
||||
return self._compile_single_file(config, primitives)
|
||||
|
||||
except Exception as e:
|
||||
self.errors.append(f"Compilation failed: {str(e)}")
|
||||
return CompilationResult(
|
||||
success=False,
|
||||
output_path="",
|
||||
content="",
|
||||
warnings=self.warnings.copy(),
|
||||
errors=self.errors.copy(),
|
||||
stats={}
|
||||
)
|
||||
|
||||
def _compile_distributed(self, config: CompilationConfig, primitives: PrimitiveCollection) -> CompilationResult:
|
||||
"""Compile using distributed AGENTS.md approach (Task 7).
|
||||
|
||||
Args:
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
primitives (PrimitiveCollection): Primitives to compile.
|
||||
|
||||
Returns:
|
||||
CompilationResult: Result of distributed compilation.
|
||||
"""
|
||||
from .distributed_compiler import DistributedAgentsCompiler
|
||||
|
||||
# Create distributed compiler
|
||||
distributed_compiler = DistributedAgentsCompiler(str(self.base_dir))
|
||||
|
||||
# Prepare configuration for distributed compilation
|
||||
distributed_config = {
|
||||
'min_instructions_per_file': config.min_instructions_per_file,
|
||||
# max_depth removed - full project analysis
|
||||
'source_attribution': config.source_attribution,
|
||||
'debug': config.debug,
|
||||
'clean_orphaned': config.clean_orphaned,
|
||||
'dry_run': config.dry_run
|
||||
}
|
||||
|
||||
# Compile distributed
|
||||
distributed_result = distributed_compiler.compile_distributed(primitives, distributed_config)
|
||||
|
||||
# Display professional compilation output (always show, not just in debug)
|
||||
compilation_results = distributed_compiler.get_compilation_results_for_display(config.dry_run)
|
||||
if compilation_results:
|
||||
if config.debug or config.trace:
|
||||
# Verbose mode with mathematical analysis
|
||||
output = distributed_compiler.output_formatter.format_verbose(compilation_results)
|
||||
elif config.dry_run:
|
||||
# Dry run mode with placement preview
|
||||
output = distributed_compiler.output_formatter.format_dry_run(compilation_results)
|
||||
else:
|
||||
# Default mode with essential information
|
||||
output = distributed_compiler.output_formatter.format_default(compilation_results)
|
||||
|
||||
# Display the professional output
|
||||
print(output)
|
||||
|
||||
if not distributed_result.success:
|
||||
self.warnings.extend(distributed_result.warnings)
|
||||
self.errors.extend(distributed_result.errors)
|
||||
return CompilationResult(
|
||||
success=False,
|
||||
output_path="",
|
||||
content="",
|
||||
warnings=self.warnings.copy(),
|
||||
errors=self.errors.copy(),
|
||||
stats=distributed_result.stats
|
||||
)
|
||||
|
||||
# Handle dry-run mode (preview placement without writing files)
|
||||
if config.dry_run:
|
||||
# Count files that would be written (directories that exist)
|
||||
successful_writes = 0
|
||||
for agents_path in distributed_result.content_map.keys():
|
||||
if agents_path.parent.exists():
|
||||
successful_writes += 1
|
||||
|
||||
# Update stats with actual files that would be written
|
||||
if distributed_result.stats:
|
||||
distributed_result.stats["agents_files_generated"] = successful_writes
|
||||
|
||||
# Don't write files in preview mode - output already shown above
|
||||
return CompilationResult(
|
||||
success=True,
|
||||
output_path="Preview mode - no files written",
|
||||
content=self._generate_placement_summary(distributed_result),
|
||||
warnings=distributed_result.warnings,
|
||||
errors=distributed_result.errors,
|
||||
stats=distributed_result.stats
|
||||
)
|
||||
|
||||
# Write distributed AGENTS.md files
|
||||
successful_writes = 0
|
||||
total_content_entries = len(distributed_result.content_map)
|
||||
|
||||
for agents_path, content in distributed_result.content_map.items():
|
||||
try:
|
||||
self._write_distributed_file(agents_path, content, config)
|
||||
successful_writes += 1
|
||||
except OSError as e:
|
||||
self.errors.append(f"Failed to write {agents_path}: {str(e)}")
|
||||
|
||||
# Update stats with actual files written
|
||||
if distributed_result.stats:
|
||||
distributed_result.stats["agents_files_generated"] = successful_writes
|
||||
|
||||
# Merge warnings and errors
|
||||
self.warnings.extend(distributed_result.warnings)
|
||||
self.errors.extend(distributed_result.errors)
|
||||
|
||||
# Create summary for backward compatibility
|
||||
summary_content = self._generate_distributed_summary(distributed_result, config)
|
||||
|
||||
return CompilationResult(
|
||||
success=len(self.errors) == 0,
|
||||
output_path=f"Distributed: {len(distributed_result.placements)} AGENTS.md files",
|
||||
content=summary_content,
|
||||
warnings=self.warnings.copy(),
|
||||
errors=self.errors.copy(),
|
||||
stats=distributed_result.stats
|
||||
)
|
||||
|
||||
def _compile_single_file(self, config: CompilationConfig, primitives: PrimitiveCollection) -> CompilationResult:
|
||||
"""Compile using traditional single-file approach (backward compatibility).
|
||||
|
||||
Args:
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
primitives (PrimitiveCollection): Primitives to compile.
|
||||
|
||||
Returns:
|
||||
CompilationResult: Result of single-file compilation.
|
||||
"""
|
||||
# Validate primitives
|
||||
validation_errors = self.validate_primitives(primitives)
|
||||
if validation_errors:
|
||||
self.errors.extend(validation_errors)
|
||||
|
||||
# Generate template data
|
||||
template_data = self._generate_template_data(primitives, config)
|
||||
|
||||
# Generate final output
|
||||
content = self.generate_output(template_data, config)
|
||||
|
||||
# Write output file (constitution injection handled externally in CLI)
|
||||
output_path = str(self.base_dir / config.output_path)
|
||||
if not config.dry_run:
|
||||
self._write_output_file(output_path, content)
|
||||
|
||||
# Compile statistics
|
||||
stats = self._compile_stats(primitives, template_data)
|
||||
|
||||
return CompilationResult(
|
||||
success=len(self.errors) == 0,
|
||||
output_path=output_path,
|
||||
content=content,
|
||||
warnings=self.warnings.copy(),
|
||||
errors=self.errors.copy(),
|
||||
stats=stats
|
||||
)
|
||||
|
||||
def validate_primitives(self, primitives: PrimitiveCollection) -> List[str]:
|
||||
"""Validate primitives for compilation.
|
||||
|
||||
Args:
|
||||
primitives (PrimitiveCollection): Collection of primitives to validate.
|
||||
|
||||
Returns:
|
||||
List[str]: List of validation errors.
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Validate each primitive
|
||||
for primitive in primitives.all_primitives():
|
||||
primitive_errors = primitive.validate()
|
||||
if primitive_errors:
|
||||
try:
|
||||
# Try to get relative path, but fall back to absolute if it fails
|
||||
file_path = str(primitive.file_path.relative_to(self.base_dir))
|
||||
except ValueError:
|
||||
# File is outside base_dir, use absolute path
|
||||
file_path = str(primitive.file_path)
|
||||
|
||||
for error in primitive_errors:
|
||||
# Treat validation errors as warnings instead of hard errors
|
||||
# This allows compilation to continue with incomplete primitives
|
||||
self.warnings.append(f"{file_path}: {error}")
|
||||
|
||||
# Validate markdown links in each primitive's content using its own directory as base
|
||||
if hasattr(primitive, 'content') and primitive.content:
|
||||
primitive_dir = primitive.file_path.parent
|
||||
link_errors = validate_link_targets(primitive.content, primitive_dir)
|
||||
if link_errors:
|
||||
try:
|
||||
file_path = str(primitive.file_path.relative_to(self.base_dir))
|
||||
except ValueError:
|
||||
file_path = str(primitive.file_path)
|
||||
|
||||
for link_error in link_errors:
|
||||
self.warnings.append(f"{file_path}: {link_error}")
|
||||
|
||||
return errors
|
||||
|
||||
def generate_output(self, template_data: TemplateData, config: CompilationConfig) -> str:
|
||||
"""Generate the final AGENTS.md output.
|
||||
|
||||
Args:
|
||||
template_data (TemplateData): Data for template generation.
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
|
||||
Returns:
|
||||
str: Generated AGENTS.md content.
|
||||
"""
|
||||
content = generate_agents_md_template(template_data)
|
||||
|
||||
# Resolve markdown links if enabled
|
||||
if config.resolve_links:
|
||||
content = resolve_markdown_links(content, self.base_dir)
|
||||
|
||||
return content
|
||||
|
||||
def _generate_template_data(self, primitives: PrimitiveCollection, config: CompilationConfig) -> TemplateData:
|
||||
"""Generate template data from primitives and configuration.
|
||||
|
||||
Args:
|
||||
primitives (PrimitiveCollection): Discovered primitives.
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
|
||||
Returns:
|
||||
TemplateData: Template data for generation.
|
||||
"""
|
||||
# Build instructions content
|
||||
instructions_content = build_conditional_sections(primitives.instructions)
|
||||
|
||||
# Metadata (version only; timestamp intentionally omitted for determinism)
|
||||
version = get_version()
|
||||
|
||||
# Handle chatmode content
|
||||
chatmode_content = None
|
||||
if config.chatmode:
|
||||
chatmode = find_chatmode_by_name(primitives.chatmodes, config.chatmode)
|
||||
if chatmode:
|
||||
chatmode_content = chatmode.content
|
||||
else:
|
||||
self.warnings.append(f"Chatmode '{config.chatmode}' not found")
|
||||
|
||||
return TemplateData(
|
||||
instructions_content=instructions_content,
|
||||
version=version,
|
||||
chatmode_content=chatmode_content
|
||||
)
|
||||
|
||||
def _write_output_file(self, output_path: str, content: str) -> None:
|
||||
"""Write the generated content to the output file.
|
||||
|
||||
Args:
|
||||
output_path (str): Path to write the output.
|
||||
content (str): Content to write.
|
||||
"""
|
||||
try:
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
except OSError as e:
|
||||
self.errors.append(f"Failed to write output file {output_path}: {str(e)}")
|
||||
|
||||
def _compile_stats(self, primitives: PrimitiveCollection, template_data: TemplateData) -> Dict[str, Any]:
|
||||
"""Compile statistics about the compilation.
|
||||
|
||||
Args:
|
||||
primitives (PrimitiveCollection): Discovered primitives.
|
||||
template_data (TemplateData): Generated template data.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Compilation statistics.
|
||||
"""
|
||||
return {
|
||||
"primitives_found": primitives.count(),
|
||||
"chatmodes": len(primitives.chatmodes),
|
||||
"instructions": len(primitives.instructions),
|
||||
"contexts": len(primitives.contexts),
|
||||
"content_length": len(template_data.instructions_content),
|
||||
# timestamp removed
|
||||
"version": template_data.version
|
||||
}
|
||||
|
||||
|
||||
def _write_distributed_file(self, agents_path: Path, content: str, config: CompilationConfig) -> None:
|
||||
"""Write a distributed AGENTS.md file with constitution injection support.
|
||||
|
||||
Args:
|
||||
agents_path (Path): Path to write the AGENTS.md file.
|
||||
content (str): Content to write.
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
"""
|
||||
try:
|
||||
# Handle constitution injection for distributed files
|
||||
final_content = content
|
||||
|
||||
if config.with_constitution:
|
||||
# Try to inject constitution if available
|
||||
try:
|
||||
from .injector import ConstitutionInjector
|
||||
injector = ConstitutionInjector(str(agents_path.parent))
|
||||
final_content, c_status, c_hash = injector.inject(
|
||||
content,
|
||||
with_constitution=True,
|
||||
output_path=agents_path
|
||||
)
|
||||
except Exception:
|
||||
# If constitution injection fails, use original content
|
||||
pass
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
agents_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write the file
|
||||
with open(agents_path, 'w', encoding='utf-8') as f:
|
||||
f.write(final_content)
|
||||
|
||||
except OSError as e:
|
||||
raise OSError(f"Failed to write distributed AGENTS.md file {agents_path}: {str(e)}")
|
||||
|
||||
def _display_placement_preview(self, distributed_result) -> None:
|
||||
"""Display placement preview for --show-placement mode.
|
||||
|
||||
Args:
|
||||
distributed_result: Result from distributed compilation.
|
||||
"""
|
||||
print("🔍 Distributed AGENTS.md Placement Preview:")
|
||||
print()
|
||||
|
||||
for placement in distributed_result.placements:
|
||||
try:
|
||||
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||
except ValueError:
|
||||
# Fallback for path resolution issues
|
||||
rel_path = placement.agents_path
|
||||
print(f"📄 {rel_path}")
|
||||
print(f" Instructions: {len(placement.instructions)}")
|
||||
print(f" Patterns: {', '.join(sorted(placement.coverage_patterns))}")
|
||||
if placement.source_attribution:
|
||||
sources = set(placement.source_attribution.values())
|
||||
print(f" Sources: {', '.join(sorted(sources))}")
|
||||
print()
|
||||
|
||||
def _display_trace_info(self, distributed_result, primitives: PrimitiveCollection) -> None:
|
||||
"""Display detailed trace information for --trace mode.
|
||||
|
||||
Args:
|
||||
distributed_result: Result from distributed compilation.
|
||||
primitives (PrimitiveCollection): Full primitive collection.
|
||||
"""
|
||||
print("🔍 Distributed Compilation Trace:")
|
||||
print()
|
||||
|
||||
for placement in distributed_result.placements:
|
||||
try:
|
||||
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||
except ValueError:
|
||||
rel_path = placement.agents_path
|
||||
print(f"📄 {rel_path}")
|
||||
|
||||
for instruction in placement.instructions:
|
||||
source = getattr(instruction, 'source', 'local')
|
||||
try:
|
||||
inst_path = instruction.file_path.relative_to(self.base_dir.resolve())
|
||||
except ValueError:
|
||||
inst_path = instruction.file_path
|
||||
|
||||
print(f" • {instruction.apply_to or 'no pattern'} <- {source} {inst_path}")
|
||||
print()
|
||||
|
||||
def _generate_placement_summary(self, distributed_result) -> str:
|
||||
"""Generate a text summary of placement results.
|
||||
|
||||
Args:
|
||||
distributed_result: Result from distributed compilation.
|
||||
|
||||
Returns:
|
||||
str: Text summary of placements.
|
||||
"""
|
||||
lines = ["Distributed AGENTS.md Placement Summary:", ""]
|
||||
|
||||
for placement in distributed_result.placements:
|
||||
try:
|
||||
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||
except ValueError:
|
||||
rel_path = placement.agents_path
|
||||
lines.append(f"📄 {rel_path}")
|
||||
lines.append(f" Instructions: {len(placement.instructions)}")
|
||||
lines.append(f" Patterns: {', '.join(sorted(placement.coverage_patterns))}")
|
||||
lines.append("")
|
||||
|
||||
lines.append(f"Total AGENTS.md files: {len(distributed_result.placements)}")
|
||||
return "\n".join(lines)
|
||||
|
||||
def _generate_distributed_summary(self, distributed_result, config: CompilationConfig) -> str:
|
||||
"""Generate a summary of distributed compilation results.
|
||||
|
||||
Args:
|
||||
distributed_result: Result from distributed compilation.
|
||||
config (CompilationConfig): Compilation configuration.
|
||||
|
||||
Returns:
|
||||
str: Summary content.
|
||||
"""
|
||||
lines = [
|
||||
"# Distributed AGENTS.md Compilation Summary",
|
||||
"",
|
||||
f"Generated {len(distributed_result.placements)} AGENTS.md files:",
|
||||
""
|
||||
]
|
||||
|
||||
for placement in distributed_result.placements:
|
||||
try:
|
||||
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||
except ValueError:
|
||||
rel_path = placement.agents_path
|
||||
lines.append(f"- {rel_path} ({len(placement.instructions)} instructions)")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
f"Total instructions: {distributed_result.stats.get('total_instructions_placed', 0)}",
|
||||
f"Total patterns: {distributed_result.stats.get('total_patterns_covered', 0)}",
|
||||
"",
|
||||
"Use 'apm compile --single-agents' for traditional single-file compilation."
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def compile_agents_md(
|
||||
primitives: Optional[PrimitiveCollection] = None,
|
||||
output_path: str = "AGENTS.md",
|
||||
chatmode: Optional[str] = None,
|
||||
dry_run: bool = False,
|
||||
base_dir: str = "."
|
||||
) -> str:
|
||||
"""Generate AGENTS.md with conditional sections.
|
||||
|
||||
Args:
|
||||
primitives (Optional[PrimitiveCollection]): Primitives to use, or None to discover.
|
||||
output_path (str): Output file path. Defaults to "AGENTS.md".
|
||||
chatmode (str): Specific chatmode to use, or None for default.
|
||||
dry_run (bool): If True, don't write output file. Defaults to False.
|
||||
base_dir (str): Base directory for compilation. Defaults to current directory.
|
||||
|
||||
Returns:
|
||||
str: Generated AGENTS.md content.
|
||||
"""
|
||||
# Create configuration - use single-file mode for backward compatibility
|
||||
config = CompilationConfig(
|
||||
output_path=output_path,
|
||||
chatmode=chatmode,
|
||||
dry_run=dry_run,
|
||||
strategy="single-file" # Force single-file mode for backward compatibility
|
||||
)
|
||||
|
||||
# Create compiler and compile
|
||||
compiler = AgentsCompiler(base_dir)
|
||||
result = compiler.compile(config, primitives)
|
||||
|
||||
if not result.success:
|
||||
raise RuntimeError(f"Compilation failed: {'; '.join(result.errors)}")
|
||||
|
||||
return result.content
|
||||
18
src/apm_cli/compilation/constants.py
Normal file
18
src/apm_cli/compilation/constants.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Shared constants for compilation extensions (constitution injection, etc.).
|
||||
|
||||
Also contains shared markers for build metadata stabilization. We intentionally
|
||||
avoid timestamps in generated artifacts to guarantee byte-level idempotency; a
|
||||
deterministic Build ID (content hash) is substituted post-generation.
|
||||
"""
|
||||
|
||||
# Constitution injection markers
|
||||
CONSTITUTION_MARKER_BEGIN = "<!-- SPEC-KIT CONSTITUTION: BEGIN -->"
|
||||
CONSTITUTION_MARKER_END = "<!-- SPEC-KIT CONSTITUTION: END -->"
|
||||
CONSTITUTION_RELATIVE_PATH = ".specify/memory/constitution.md" # repo-root relative
|
||||
|
||||
# Build ID placeholder & regex pattern (line-level). The placeholder line is
|
||||
# inserted during initial template generation; after all transformations
|
||||
# (constitution injection, link resolution, etc.) we compute a SHA256 of the
|
||||
# final content with this line removed and then replace it with the truncated
|
||||
# hash. This ensures the hash is not self-referential and remains stable.
|
||||
BUILD_ID_PLACEHOLDER = "<!-- Build ID: __BUILD_ID__ -->"
|
||||
33
src/apm_cli/compilation/constitution.py
Normal file
33
src/apm_cli/compilation/constitution.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""Utilities for reading Spec Kit style constitution file."""
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from .constants import CONSTITUTION_RELATIVE_PATH
|
||||
|
||||
|
||||
def find_constitution(base_dir: Path) -> Path:
|
||||
"""Return path to constitution.md if present, else Path that does not exist.
|
||||
|
||||
We keep logic trivial for Phase 0: fixed location under memory/.
|
||||
Later phases may support multiple shards / namespacing.
|
||||
"""
|
||||
return base_dir / CONSTITUTION_RELATIVE_PATH
|
||||
|
||||
|
||||
def read_constitution(base_dir: Path) -> Optional[str]:
|
||||
"""Read full constitution content if file exists.
|
||||
|
||||
Args:
|
||||
base_dir: Repository root path.
|
||||
Returns:
|
||||
Full file text or None if absent.
|
||||
"""
|
||||
path = find_constitution(base_dir)
|
||||
if not path.exists() or not path.is_file():
|
||||
return None
|
||||
try:
|
||||
return path.read_text(encoding="utf-8")
|
||||
except OSError:
|
||||
return None
|
||||
96
src/apm_cli/compilation/constitution_block.py
Normal file
96
src/apm_cli/compilation/constitution_block.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Rendering & parsing of injected constitution block in AGENTS.md."""
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
from .constants import (
|
||||
CONSTITUTION_MARKER_BEGIN,
|
||||
CONSTITUTION_MARKER_END,
|
||||
CONSTITUTION_RELATIVE_PATH,
|
||||
)
|
||||
|
||||
|
||||
HASH_PREFIX = "hash:"
|
||||
|
||||
|
||||
def compute_constitution_hash(content: str) -> str:
|
||||
"""Compute stable truncated SHA256 hash of full constitution content."""
|
||||
sha = hashlib.sha256(content.encode("utf-8"))
|
||||
return sha.hexdigest()[:12]
|
||||
|
||||
|
||||
def render_block(constitution_content: str) -> str:
|
||||
"""Render full constitution block with markers and hash line.
|
||||
|
||||
The block mirrors spec requirement: entire file as-is within markers.
|
||||
"""
|
||||
h = compute_constitution_hash(constitution_content)
|
||||
header_meta = f"{HASH_PREFIX} {h} path: {CONSTITUTION_RELATIVE_PATH}"
|
||||
# Ensure trailing newline for clean separation from compiled content
|
||||
body = constitution_content.rstrip() + "\n"
|
||||
return (
|
||||
f"{CONSTITUTION_MARKER_BEGIN}\n"
|
||||
f"{header_meta}\n"
|
||||
f"{body}"
|
||||
f"{CONSTITUTION_MARKER_END}\n"
|
||||
"\n" # blank line after block
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExistingBlock:
|
||||
raw: str
|
||||
hash: Optional[str]
|
||||
start_index: int
|
||||
end_index: int
|
||||
|
||||
|
||||
BLOCK_REGEX = re.compile(
|
||||
rf"({re.escape(CONSTITUTION_MARKER_BEGIN)})(.*?)({re.escape(CONSTITUTION_MARKER_END)})",
|
||||
re.DOTALL,
|
||||
)
|
||||
|
||||
HASH_LINE_REGEX = re.compile(r"hash:\s*([0-9a-fA-F]{6,64})")
|
||||
|
||||
|
||||
def find_existing_block(content: str) -> Optional[ExistingBlock]:
|
||||
"""Locate existing constitution block and extract its hash if present."""
|
||||
match = BLOCK_REGEX.search(content)
|
||||
if not match:
|
||||
return None
|
||||
block_text = match.group(0)
|
||||
hash_match = HASH_LINE_REGEX.search(block_text)
|
||||
h = hash_match.group(1) if hash_match else None
|
||||
return ExistingBlock(raw=block_text, hash=h, start_index=match.start(), end_index=match.end())
|
||||
|
||||
|
||||
def inject_or_update(existing_agents: str, new_block: str, place_top: bool = True) -> tuple[str, str]:
|
||||
"""Insert or update constitution block in existing AGENTS.md content.
|
||||
|
||||
Args:
|
||||
existing_agents: Current AGENTS.md text (may be empty).
|
||||
new_block: Rendered constitution block (already ends with newline).
|
||||
place_top: Always True for Phase 0 (prepend at top).
|
||||
Returns:
|
||||
(updated_text, status) where status in CREATED|UPDATED|UNCHANGED.
|
||||
"""
|
||||
existing_block = find_existing_block(existing_agents)
|
||||
if existing_block:
|
||||
if existing_block.raw == new_block.rstrip(): # exclude trailing blank block newline
|
||||
return existing_agents, "UNCHANGED"
|
||||
# Replace existing block span with new block
|
||||
updated = existing_agents[: existing_block.start_index] + new_block.rstrip() + existing_agents[existing_block.end_index :]
|
||||
# Ensure trailing newline after block + rest
|
||||
if not updated.startswith(new_block):
|
||||
# If markers were not at top previously and we want top placement, move them
|
||||
if place_top:
|
||||
body_without_block = updated.replace(new_block.rstrip(), "").lstrip("\n")
|
||||
updated = new_block + body_without_block
|
||||
return updated, "UPDATED"
|
||||
# No existing block
|
||||
if place_top:
|
||||
return new_block + existing_agents.lstrip("\n"), "CREATED"
|
||||
return existing_agents + ("\n" if not existing_agents.endswith("\n") else "") + new_block, "CREATED"
|
||||
1163
src/apm_cli/compilation/context_optimizer.py
Normal file
1163
src/apm_cli/compilation/context_optimizer.py
Normal file
File diff suppressed because it is too large
Load Diff
685
src/apm_cli/compilation/distributed_compiler.py
Normal file
685
src/apm_cli/compilation/distributed_compiler.py
Normal file
@@ -0,0 +1,685 @@
|
||||
"""Distributed AGENTS.md compilation system following the Minimal Context Principle.
|
||||
|
||||
This module implements hierarchical directory-based distribution to generate multiple
|
||||
AGENTS.md files across a project's directory structure, following the AGENTS.md standard
|
||||
for nested agent context files.
|
||||
"""
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set, Tuple
|
||||
from collections import defaultdict
|
||||
|
||||
from ..primitives.models import Instruction, PrimitiveCollection
|
||||
from ..version import get_version
|
||||
from .template_builder import TemplateData, find_chatmode_by_name
|
||||
from .constants import BUILD_ID_PLACEHOLDER
|
||||
from .context_optimizer import ContextOptimizer
|
||||
from ..output.formatters import CompilationFormatter
|
||||
from ..output.models import CompilationResults
|
||||
|
||||
|
||||
@dataclass
|
||||
class DirectoryMap:
|
||||
"""Mapping of directory structure analysis."""
|
||||
directories: Dict[Path, Set[str]] # directory -> set of applicable file patterns
|
||||
depth_map: Dict[Path, int] # directory -> depth level
|
||||
parent_map: Dict[Path, Optional[Path]] # directory -> parent directory
|
||||
|
||||
def get_max_depth(self) -> int:
|
||||
"""Get maximum depth in the directory structure."""
|
||||
return max(self.depth_map.values()) if self.depth_map else 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlacementResult:
|
||||
"""Result of AGENTS.md placement analysis."""
|
||||
agents_path: Path
|
||||
instructions: List[Instruction]
|
||||
inherited_instructions: List[Instruction] = field(default_factory=list)
|
||||
coverage_patterns: Set[str] = field(default_factory=set)
|
||||
source_attribution: Dict[str, str] = field(default_factory=dict) # instruction_id -> source
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilationResult:
|
||||
"""Result of distributed AGENTS.md compilation."""
|
||||
success: bool
|
||||
placements: List[PlacementResult]
|
||||
content_map: Dict[Path, str] # agents_path -> content
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
stats: Dict[str, float] = field(default_factory=dict) # Support optimization metrics
|
||||
|
||||
|
||||
class DistributedAgentsCompiler:
|
||||
"""Main compiler for generating distributed AGENTS.md files."""
|
||||
|
||||
def __init__(self, base_dir: str = "."):
|
||||
"""Initialize the distributed AGENTS.md compiler.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory for compilation.
|
||||
"""
|
||||
try:
|
||||
self.base_dir = Path(base_dir).resolve()
|
||||
except (OSError, FileNotFoundError):
|
||||
self.base_dir = Path(base_dir).absolute()
|
||||
|
||||
self.warnings: List[str] = []
|
||||
self.errors: List[str] = []
|
||||
self.total_files_written = 0
|
||||
self.context_optimizer = ContextOptimizer(str(self.base_dir))
|
||||
self.output_formatter = CompilationFormatter()
|
||||
self._placement_map = None
|
||||
|
||||
def compile_distributed(
|
||||
self,
|
||||
primitives: PrimitiveCollection,
|
||||
config: Optional[dict] = None
|
||||
) -> CompilationResult:
|
||||
"""Compile primitives into distributed AGENTS.md files.
|
||||
|
||||
Args:
|
||||
primitives (PrimitiveCollection): Collection of primitives to compile.
|
||||
config (Optional[dict]): Configuration for distributed compilation.
|
||||
- clean_orphaned (bool): Remove orphaned AGENTS.md files. Default: False
|
||||
- dry_run (bool): Preview mode, don't write files. Default: False
|
||||
|
||||
Returns:
|
||||
CompilationResult: Result of the distributed compilation.
|
||||
"""
|
||||
self.warnings.clear()
|
||||
self.errors.clear()
|
||||
|
||||
try:
|
||||
# Configuration with defaults aligned to Minimal Context Principle
|
||||
config = config or {}
|
||||
min_instructions = config.get('min_instructions_per_file', 1) # Default to 1 for minimal context
|
||||
source_attribution = config.get('source_attribution', True)
|
||||
debug = config.get('debug', False)
|
||||
clean_orphaned = config.get('clean_orphaned', False)
|
||||
dry_run = config.get('dry_run', False)
|
||||
|
||||
# Phase 1: Directory structure analysis
|
||||
directory_map = self.analyze_directory_structure(primitives.instructions)
|
||||
|
||||
# Phase 2: Determine optimal AGENTS.md placement
|
||||
placement_map = self.determine_agents_placement(
|
||||
primitives.instructions,
|
||||
directory_map,
|
||||
min_instructions=min_instructions,
|
||||
debug=debug
|
||||
)
|
||||
|
||||
# Phase 3: Generate distributed AGENTS.md files
|
||||
placements = self.generate_distributed_agents_files(
|
||||
placement_map,
|
||||
primitives,
|
||||
source_attribution=source_attribution
|
||||
)
|
||||
|
||||
# Phase 4: Handle orphaned file cleanup
|
||||
generated_paths = [p.agents_path for p in placements]
|
||||
orphaned_files = self._find_orphaned_agents_files(generated_paths)
|
||||
|
||||
if orphaned_files:
|
||||
# Always show warnings about orphaned files
|
||||
warning_messages = self._generate_orphan_warnings(orphaned_files)
|
||||
if warning_messages:
|
||||
self.warnings.extend(warning_messages)
|
||||
|
||||
# Only perform actual cleanup if not dry_run and clean_orphaned is True
|
||||
if not dry_run and clean_orphaned:
|
||||
cleanup_messages = self._cleanup_orphaned_files(orphaned_files, dry_run=False)
|
||||
if cleanup_messages:
|
||||
self.warnings.extend(cleanup_messages)
|
||||
|
||||
# Phase 5: Validate coverage
|
||||
coverage_validation = self._validate_coverage(placements, primitives.instructions)
|
||||
if coverage_validation:
|
||||
self.warnings.extend(coverage_validation)
|
||||
|
||||
# Compile statistics
|
||||
stats = self._compile_distributed_stats(placements, primitives)
|
||||
|
||||
return CompilationResult(
|
||||
success=len(self.errors) == 0,
|
||||
placements=placements,
|
||||
content_map={p.agents_path: self._generate_agents_content(p, primitives) for p in placements},
|
||||
warnings=self.warnings.copy(),
|
||||
errors=self.errors.copy(),
|
||||
stats=stats
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.errors.append(f"Distributed compilation failed: {str(e)}")
|
||||
return CompilationResult(
|
||||
success=False,
|
||||
placements=[],
|
||||
content_map={},
|
||||
warnings=self.warnings.copy(),
|
||||
errors=self.errors.copy(),
|
||||
stats={}
|
||||
)
|
||||
|
||||
def analyze_directory_structure(self, instructions: List[Instruction]) -> DirectoryMap:
|
||||
"""Analyze project directory structure based on instruction patterns.
|
||||
|
||||
Args:
|
||||
instructions (List[Instruction]): List of instructions to analyze.
|
||||
|
||||
Returns:
|
||||
DirectoryMap: Analysis of the directory structure.
|
||||
"""
|
||||
directories: Dict[Path, Set[str]] = defaultdict(set)
|
||||
depth_map: Dict[Path, int] = {}
|
||||
parent_map: Dict[Path, Optional[Path]] = {}
|
||||
|
||||
# Analyze each instruction's applyTo pattern
|
||||
for instruction in instructions:
|
||||
if not instruction.apply_to:
|
||||
continue
|
||||
|
||||
pattern = instruction.apply_to
|
||||
|
||||
# Extract directory paths from pattern
|
||||
dirs = self._extract_directories_from_pattern(pattern)
|
||||
|
||||
for dir_path in dirs:
|
||||
abs_dir = self.base_dir / dir_path
|
||||
directories[abs_dir].add(pattern)
|
||||
|
||||
# Calculate depth and parent relationships
|
||||
depth = len(abs_dir.relative_to(self.base_dir).parts)
|
||||
depth_map[abs_dir] = depth
|
||||
|
||||
if depth > 0:
|
||||
parent_dir = abs_dir.parent
|
||||
parent_map[abs_dir] = parent_dir
|
||||
# Ensure parent is also tracked
|
||||
if parent_dir not in directories:
|
||||
directories[parent_dir] = set()
|
||||
else:
|
||||
parent_map[abs_dir] = None
|
||||
|
||||
# Add base directory
|
||||
directories[self.base_dir].update(instruction.apply_to for instruction in instructions if instruction.apply_to)
|
||||
depth_map[self.base_dir] = 0
|
||||
parent_map[self.base_dir] = None
|
||||
|
||||
return DirectoryMap(
|
||||
directories=dict(directories),
|
||||
depth_map=depth_map,
|
||||
parent_map=parent_map
|
||||
)
|
||||
|
||||
def determine_agents_placement(
|
||||
self,
|
||||
instructions: List[Instruction],
|
||||
directory_map: DirectoryMap,
|
||||
min_instructions: int = 1,
|
||||
debug: bool = False
|
||||
) -> Dict[Path, List[Instruction]]:
|
||||
"""Determine optimal AGENTS.md file placement using Context Optimization Engine.
|
||||
|
||||
Following the Minimal Context Principle and Context Optimization, creates
|
||||
focused AGENTS.md files that minimize context pollution while maximizing
|
||||
relevance for agents working in specific directories.
|
||||
|
||||
Args:
|
||||
instructions (List[Instruction]): List of instructions to place.
|
||||
directory_map (DirectoryMap): Directory structure analysis.
|
||||
min_instructions (int): Minimum instructions (default 1 for minimal context).
|
||||
max_depth (int): Maximum depth for placement.
|
||||
|
||||
Returns:
|
||||
Dict[Path, List[Instruction]]: Optimized mapping of directory paths to instructions.
|
||||
"""
|
||||
# Use the Context Optimization Engine for intelligent placement
|
||||
optimized_placement = self.context_optimizer.optimize_instruction_placement(
|
||||
instructions,
|
||||
verbose=debug,
|
||||
enable_timing=debug # Enable timing when debug mode is on
|
||||
)
|
||||
|
||||
# Special case: if no instructions but constitution exists, create root placement
|
||||
if not optimized_placement:
|
||||
from .constitution import find_constitution
|
||||
constitution_path = find_constitution(Path(self.base_dir))
|
||||
if constitution_path.exists():
|
||||
# Create an empty placement for the root directory to enable verbose output
|
||||
optimized_placement = {Path(self.base_dir): []}
|
||||
|
||||
# Store optimization results for output formatting later
|
||||
# Update with proper dry run status in the final result
|
||||
self._placement_map = optimized_placement
|
||||
|
||||
# Remove the verbose warning log - we'll show this in professional output instead
|
||||
|
||||
# Filter out directories with too few instructions if specified
|
||||
if min_instructions > 1:
|
||||
filtered_placement = {}
|
||||
for dir_path, dir_instructions in optimized_placement.items():
|
||||
if len(dir_instructions) >= min_instructions or dir_path == self.base_dir:
|
||||
filtered_placement[dir_path] = dir_instructions
|
||||
else:
|
||||
# Move instructions to parent directory
|
||||
parent_dir = dir_path.parent if dir_path != self.base_dir else self.base_dir
|
||||
if parent_dir not in filtered_placement:
|
||||
filtered_placement[parent_dir] = []
|
||||
filtered_placement[parent_dir].extend(dir_instructions)
|
||||
|
||||
return filtered_placement
|
||||
|
||||
return optimized_placement
|
||||
|
||||
def generate_distributed_agents_files(
|
||||
self,
|
||||
placement_map: Dict[Path, List[Instruction]],
|
||||
primitives: PrimitiveCollection,
|
||||
source_attribution: bool = True
|
||||
) -> List[PlacementResult]:
|
||||
"""Generate distributed AGENTS.md file contents.
|
||||
|
||||
Args:
|
||||
placement_map (Dict[Path, List[Instruction]]): Directory to instructions mapping.
|
||||
primitives (PrimitiveCollection): Full primitive collection.
|
||||
source_attribution (bool): Whether to include source attribution.
|
||||
|
||||
Returns:
|
||||
List[PlacementResult]: List of placement results with content.
|
||||
"""
|
||||
placements = []
|
||||
|
||||
# Special case: if no instructions but constitution exists, create root placement
|
||||
if not placement_map:
|
||||
from .constitution import find_constitution
|
||||
constitution_path = find_constitution(Path(self.base_dir))
|
||||
if constitution_path.exists():
|
||||
# Create a root placement for constitution-only projects
|
||||
root_path = Path(self.base_dir)
|
||||
agents_path = root_path / "AGENTS.md"
|
||||
|
||||
placement = PlacementResult(
|
||||
agents_path=agents_path,
|
||||
instructions=[], # No instructions, just constitution
|
||||
coverage_patterns=set(), # No patterns since no instructions
|
||||
source_attribution={"constitution": "constitution.md"} if source_attribution else {}
|
||||
)
|
||||
|
||||
placements.append(placement)
|
||||
else:
|
||||
# Normal case: create placements for each entry in placement_map
|
||||
for dir_path, instructions in placement_map.items():
|
||||
agents_path = dir_path / "AGENTS.md"
|
||||
|
||||
# Build source attribution map if enabled
|
||||
source_map = {}
|
||||
if source_attribution:
|
||||
for instruction in instructions:
|
||||
source_info = getattr(instruction, 'source', 'local')
|
||||
source_map[str(instruction.file_path)] = source_info
|
||||
|
||||
# Extract coverage patterns
|
||||
patterns = set()
|
||||
for instruction in instructions:
|
||||
if instruction.apply_to:
|
||||
patterns.add(instruction.apply_to)
|
||||
|
||||
placement = PlacementResult(
|
||||
agents_path=agents_path,
|
||||
instructions=instructions,
|
||||
coverage_patterns=patterns,
|
||||
source_attribution=source_map
|
||||
)
|
||||
|
||||
placements.append(placement)
|
||||
|
||||
return placements
|
||||
|
||||
def get_compilation_results_for_display(self, is_dry_run: bool = False) -> Optional[CompilationResults]:
|
||||
"""Get compilation results for CLI display integration.
|
||||
|
||||
Args:
|
||||
is_dry_run: Whether this is a dry run.
|
||||
|
||||
Returns:
|
||||
CompilationResults if available, None otherwise.
|
||||
"""
|
||||
if self._placement_map:
|
||||
# Generate fresh compilation results with correct dry run status
|
||||
compilation_results = self.context_optimizer.get_compilation_results(
|
||||
self._placement_map,
|
||||
is_dry_run=is_dry_run
|
||||
)
|
||||
|
||||
# Merge distributed compiler's warnings (like orphan warnings) with optimizer warnings
|
||||
all_warnings = compilation_results.warnings + self.warnings
|
||||
|
||||
# Create new compilation results with merged warnings
|
||||
from ..output.models import CompilationResults
|
||||
return CompilationResults(
|
||||
project_analysis=compilation_results.project_analysis,
|
||||
optimization_decisions=compilation_results.optimization_decisions,
|
||||
placement_summaries=compilation_results.placement_summaries,
|
||||
optimization_stats=compilation_results.optimization_stats,
|
||||
warnings=all_warnings,
|
||||
errors=compilation_results.errors + self.errors,
|
||||
is_dry_run=is_dry_run
|
||||
)
|
||||
return None
|
||||
|
||||
def _extract_directories_from_pattern(self, pattern: str) -> List[Path]:
|
||||
"""Extract potential directory paths from a file pattern.
|
||||
|
||||
Args:
|
||||
pattern (str): File pattern like "src/**/*.py" or "docs/*.md"
|
||||
|
||||
Returns:
|
||||
List[Path]: List of directory paths that could contain matching files.
|
||||
"""
|
||||
directories = []
|
||||
|
||||
# Remove filename part and wildcards to get directory structure
|
||||
# Examples:
|
||||
# "src/**/*.py" -> ["src"]
|
||||
# "docs/*.md" -> ["docs"]
|
||||
# "**/*.py" -> ["."] (current directory)
|
||||
# "*.py" -> ["."] (current directory)
|
||||
|
||||
if pattern.startswith("**/"):
|
||||
# Global pattern - applies to all directories
|
||||
directories.append(Path("."))
|
||||
elif "/" in pattern:
|
||||
# Extract directory part
|
||||
dir_part = pattern.split("/")[0]
|
||||
if not dir_part.startswith("*"):
|
||||
directories.append(Path(dir_part))
|
||||
else:
|
||||
directories.append(Path("."))
|
||||
else:
|
||||
# No directory part - applies to current directory
|
||||
directories.append(Path("."))
|
||||
|
||||
return directories
|
||||
|
||||
def _find_best_directory(
|
||||
self,
|
||||
instruction: Instruction,
|
||||
directory_map: DirectoryMap,
|
||||
max_depth: int
|
||||
) -> Path:
|
||||
"""Find the best directory for placing an instruction.
|
||||
|
||||
Args:
|
||||
instruction (Instruction): Instruction to place.
|
||||
directory_map (DirectoryMap): Directory structure analysis.
|
||||
max_depth (int): Maximum allowed depth.
|
||||
|
||||
Returns:
|
||||
Path: Best directory path for the instruction.
|
||||
"""
|
||||
if not instruction.apply_to:
|
||||
return self.base_dir
|
||||
|
||||
pattern = instruction.apply_to
|
||||
best_dir = self.base_dir
|
||||
best_specificity = 0
|
||||
|
||||
for dir_path in directory_map.directories:
|
||||
# Skip directories that are too deep
|
||||
if directory_map.depth_map.get(dir_path, 0) > max_depth:
|
||||
continue
|
||||
|
||||
# Check if this directory could contain files matching the pattern
|
||||
if pattern in directory_map.directories[dir_path]:
|
||||
# Prefer more specific (deeper) directories
|
||||
specificity = directory_map.depth_map.get(dir_path, 0)
|
||||
if specificity > best_specificity:
|
||||
best_specificity = specificity
|
||||
best_dir = dir_path
|
||||
|
||||
return best_dir
|
||||
|
||||
def _generate_agents_content(
|
||||
self,
|
||||
placement: PlacementResult,
|
||||
primitives: PrimitiveCollection
|
||||
) -> str:
|
||||
"""Generate AGENTS.md content for a specific placement.
|
||||
|
||||
Args:
|
||||
placement (PlacementResult): Placement result with instructions.
|
||||
primitives (PrimitiveCollection): Full primitive collection.
|
||||
|
||||
Returns:
|
||||
str: Generated AGENTS.md content.
|
||||
"""
|
||||
sections = []
|
||||
|
||||
# Header with source attribution
|
||||
sections.append("# AGENTS.md")
|
||||
sections.append("<!-- Generated by APM CLI from distributed .apm/ primitives -->")
|
||||
sections.append(BUILD_ID_PLACEHOLDER)
|
||||
sections.append(f"<!-- APM Version: {get_version()} -->")
|
||||
|
||||
# Add source attribution summary if enabled
|
||||
if placement.source_attribution:
|
||||
sources = set(placement.source_attribution.values())
|
||||
if len(sources) > 1:
|
||||
sections.append(f"<!-- Sources: {', '.join(sorted(sources))} -->")
|
||||
else:
|
||||
sections.append(f"<!-- Source: {list(sources)[0] if sources else 'local'} -->")
|
||||
|
||||
sections.append("")
|
||||
|
||||
# Group instructions by pattern
|
||||
pattern_groups: Dict[str, List[Instruction]] = defaultdict(list)
|
||||
for instruction in placement.instructions:
|
||||
if instruction.apply_to:
|
||||
pattern_groups[instruction.apply_to].append(instruction)
|
||||
|
||||
# Generate sections for each pattern
|
||||
for pattern, pattern_instructions in sorted(pattern_groups.items()):
|
||||
sections.append(f"## Files matching `{pattern}`")
|
||||
sections.append("")
|
||||
|
||||
for instruction in pattern_instructions:
|
||||
content = instruction.content.strip()
|
||||
if content:
|
||||
# Add source attribution for individual instructions
|
||||
if placement.source_attribution:
|
||||
source = placement.source_attribution.get(str(instruction.file_path), 'local')
|
||||
try:
|
||||
rel_path = instruction.file_path.relative_to(self.base_dir)
|
||||
except ValueError:
|
||||
rel_path = instruction.file_path
|
||||
|
||||
sections.append(f"<!-- Source: {source} {rel_path} -->")
|
||||
|
||||
sections.append(content)
|
||||
sections.append("")
|
||||
|
||||
# Footer
|
||||
sections.append("---")
|
||||
sections.append("*This file was generated by APM CLI. Do not edit manually.*")
|
||||
sections.append("*To regenerate: `specify apm compile`*")
|
||||
sections.append("")
|
||||
|
||||
return "\n".join(sections)
|
||||
|
||||
def _validate_coverage(
|
||||
self,
|
||||
placements: List[PlacementResult],
|
||||
all_instructions: List[Instruction]
|
||||
) -> List[str]:
|
||||
"""Validate that all instructions are covered by placements.
|
||||
|
||||
Args:
|
||||
placements (List[PlacementResult]): Generated placements.
|
||||
all_instructions (List[Instruction]): All available instructions.
|
||||
|
||||
Returns:
|
||||
List[str]: List of coverage warnings.
|
||||
"""
|
||||
warnings = []
|
||||
placed_instructions = set()
|
||||
|
||||
for placement in placements:
|
||||
placed_instructions.update(str(inst.file_path) for inst in placement.instructions)
|
||||
|
||||
all_instruction_paths = set(str(inst.file_path) for inst in all_instructions)
|
||||
|
||||
missing_instructions = all_instruction_paths - placed_instructions
|
||||
if missing_instructions:
|
||||
warnings.append(f"Instructions not placed in any AGENTS.md: {', '.join(missing_instructions)}")
|
||||
|
||||
return warnings
|
||||
|
||||
def _find_orphaned_agents_files(self, generated_paths: List[Path]) -> List[Path]:
|
||||
"""Find existing AGENTS.md files that weren't generated in the current compilation.
|
||||
|
||||
Args:
|
||||
generated_paths (List[Path]): List of AGENTS.md files generated in current run.
|
||||
|
||||
Returns:
|
||||
List[Path]: List of orphaned AGENTS.md files that should be cleaned up.
|
||||
"""
|
||||
orphaned_files = []
|
||||
generated_set = set(generated_paths)
|
||||
|
||||
# Find all existing AGENTS.md files in the project
|
||||
for agents_file in self.base_dir.rglob("AGENTS.md"):
|
||||
# Skip files that are outside our project or in special directories
|
||||
try:
|
||||
relative_path = agents_file.relative_to(self.base_dir)
|
||||
|
||||
# Skip files in certain directories that shouldn't be cleaned
|
||||
skip_dirs = {".git", ".apm", "node_modules", "__pycache__", ".pytest_cache", "apm_modules"}
|
||||
if any(part in skip_dirs for part in relative_path.parts):
|
||||
continue
|
||||
|
||||
# If this existing file wasn't generated in current run, it's orphaned
|
||||
if agents_file not in generated_set:
|
||||
orphaned_files.append(agents_file)
|
||||
|
||||
except ValueError:
|
||||
# File is outside base_dir, skip it
|
||||
continue
|
||||
|
||||
return orphaned_files
|
||||
|
||||
def _generate_orphan_warnings(self, orphaned_files: List[Path]) -> List[str]:
|
||||
"""Generate warning messages for orphaned AGENTS.md files.
|
||||
|
||||
Args:
|
||||
orphaned_files (List[Path]): List of orphaned files to warn about.
|
||||
|
||||
Returns:
|
||||
List[str]: List of warning messages.
|
||||
"""
|
||||
warning_messages = []
|
||||
|
||||
if not orphaned_files:
|
||||
return warning_messages
|
||||
|
||||
# Professional warning format with readable list for multiple files
|
||||
if len(orphaned_files) == 1:
|
||||
rel_path = orphaned_files[0].relative_to(self.base_dir)
|
||||
warning_messages.append(f"Orphaned AGENTS.md found: {rel_path} - run 'apm compile --clean' to remove")
|
||||
else:
|
||||
# For multiple files, create a single multi-line warning message
|
||||
file_list = []
|
||||
for file_path in orphaned_files[:5]: # Show first 5
|
||||
rel_path = file_path.relative_to(self.base_dir)
|
||||
file_list.append(f" • {rel_path}")
|
||||
if len(orphaned_files) > 5:
|
||||
file_list.append(f" • ...and {len(orphaned_files) - 5} more")
|
||||
|
||||
# Create one cohesive warning message
|
||||
files_text = "\n".join(file_list)
|
||||
warning_messages.append(f"Found {len(orphaned_files)} orphaned AGENTS.md files:\n{files_text}\n Run 'apm compile --clean' to remove orphaned files")
|
||||
|
||||
return warning_messages
|
||||
|
||||
def _cleanup_orphaned_files(self, orphaned_files: List[Path], dry_run: bool = False) -> List[str]:
|
||||
"""Actually remove orphaned AGENTS.md files.
|
||||
|
||||
Args:
|
||||
orphaned_files (List[Path]): List of orphaned files to remove.
|
||||
dry_run (bool): If True, don't actually remove files, just report what would be removed.
|
||||
|
||||
Returns:
|
||||
List[str]: List of cleanup status messages.
|
||||
"""
|
||||
cleanup_messages = []
|
||||
|
||||
if not orphaned_files:
|
||||
return cleanup_messages
|
||||
|
||||
if dry_run:
|
||||
# In dry-run mode, just report what would be cleaned
|
||||
cleanup_messages.append(f"🧹 Would clean up {len(orphaned_files)} orphaned AGENTS.md files")
|
||||
for file_path in orphaned_files:
|
||||
rel_path = file_path.relative_to(self.base_dir)
|
||||
cleanup_messages.append(f" • {rel_path}")
|
||||
else:
|
||||
# Actually perform the cleanup
|
||||
cleanup_messages.append(f"🧹 Cleaning up {len(orphaned_files)} orphaned AGENTS.md files")
|
||||
for file_path in orphaned_files:
|
||||
try:
|
||||
rel_path = file_path.relative_to(self.base_dir)
|
||||
file_path.unlink()
|
||||
cleanup_messages.append(f" ✓ Removed {rel_path}")
|
||||
except Exception as e:
|
||||
cleanup_messages.append(f" ✗ Failed to remove {rel_path}: {str(e)}")
|
||||
|
||||
return cleanup_messages
|
||||
|
||||
def _compile_distributed_stats(
|
||||
self,
|
||||
placements: List[PlacementResult],
|
||||
primitives: PrimitiveCollection
|
||||
) -> Dict[str, float]:
|
||||
"""Compile statistics about the distributed compilation with optimization metrics.
|
||||
|
||||
Args:
|
||||
placements (List[PlacementResult]): Generated placements.
|
||||
primitives (PrimitiveCollection): Full primitive collection.
|
||||
|
||||
Returns:
|
||||
Dict[str, float]: Compilation statistics including optimization metrics.
|
||||
"""
|
||||
total_instructions = sum(len(p.instructions) for p in placements)
|
||||
total_patterns = sum(len(p.coverage_patterns) for p in placements)
|
||||
|
||||
# Get optimization metrics
|
||||
placement_map = {Path(p.agents_path.parent): p.instructions for p in placements}
|
||||
optimization_stats = self.context_optimizer.get_optimization_stats(placement_map)
|
||||
|
||||
# Combine traditional stats with optimization metrics
|
||||
stats = {
|
||||
"agents_files_generated": len(placements),
|
||||
"total_instructions_placed": total_instructions,
|
||||
"total_patterns_covered": total_patterns,
|
||||
"primitives_found": primitives.count(),
|
||||
"chatmodes": len(primitives.chatmodes),
|
||||
"instructions": len(primitives.instructions),
|
||||
"contexts": len(primitives.contexts)
|
||||
}
|
||||
|
||||
# Add optimization metrics from OptimizationStats object
|
||||
if optimization_stats:
|
||||
stats.update({
|
||||
"average_context_efficiency": optimization_stats.average_context_efficiency,
|
||||
"pollution_improvement": optimization_stats.pollution_improvement,
|
||||
"baseline_efficiency": optimization_stats.baseline_efficiency,
|
||||
"placement_accuracy": optimization_stats.placement_accuracy,
|
||||
"generation_time_ms": optimization_stats.generation_time_ms,
|
||||
"total_agents_files": optimization_stats.total_agents_files,
|
||||
"directories_analyzed": optimization_stats.directories_analyzed
|
||||
})
|
||||
|
||||
return stats
|
||||
91
src/apm_cli/compilation/injector.py
Normal file
91
src/apm_cli/compilation/injector.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""High-level constitution injection workflow used by compile command."""
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional, Literal
|
||||
|
||||
from .constitution import read_constitution
|
||||
from .constitution_block import render_block, find_existing_block
|
||||
from .constants import CONSTITUTION_MARKER_BEGIN, CONSTITUTION_MARKER_END
|
||||
|
||||
InjectionStatus = Literal["CREATED", "UPDATED", "UNCHANGED", "SKIPPED", "MISSING"]
|
||||
|
||||
|
||||
class ConstitutionInjector:
|
||||
"""Encapsulates constitution detection + injection logic."""
|
||||
|
||||
def __init__(self, base_dir: str):
|
||||
self.base_dir = Path(base_dir)
|
||||
|
||||
def inject(self, compiled_content: str, with_constitution: bool, output_path: Path) -> tuple[str, InjectionStatus, Optional[str]]:
|
||||
"""Return final AGENTS.md content after optional injection.
|
||||
|
||||
Args:
|
||||
compiled_content: Newly compiled content (without constitution block).
|
||||
with_constitution: Whether to perform injection (True) or preserve existing block (False).
|
||||
output_path: Existing AGENTS.md path (may not exist) for preservation logic.
|
||||
Returns:
|
||||
(final_content, status, hash_or_none)
|
||||
"""
|
||||
existing_content = ""
|
||||
if output_path.exists():
|
||||
try:
|
||||
existing_content = output_path.read_text(encoding="utf-8")
|
||||
except OSError:
|
||||
existing_content = ""
|
||||
|
||||
# Helper to split header/body from freshly compiled content.
|
||||
def _split_header(content: str) -> tuple[str, str]:
|
||||
# Header ends at the first double newline (blank line separating header from body)
|
||||
marker = "\n\n"
|
||||
if marker in content:
|
||||
idx = content.index(marker)
|
||||
return content[: idx + len(marker)], content[idx + len(marker) :]
|
||||
# Fallback: treat whole content as header
|
||||
return content, ""
|
||||
|
||||
header_part, body_part = _split_header(compiled_content)
|
||||
|
||||
if not with_constitution:
|
||||
# If skipping, we preserve existing block if present but enforce ordering: header first, block (if any), then body.
|
||||
existing_block = find_existing_block(existing_content)
|
||||
if existing_block:
|
||||
final = header_part + existing_block.raw.rstrip() + "\n\n" + body_part.lstrip("\n")
|
||||
return final, "SKIPPED", None
|
||||
return compiled_content, "SKIPPED", None
|
||||
|
||||
constitution_text = read_constitution(self.base_dir)
|
||||
if constitution_text is None:
|
||||
existing_block = find_existing_block(existing_content)
|
||||
if existing_block:
|
||||
final = header_part + existing_block.raw.rstrip() + "\n\n" + body_part.lstrip("\n")
|
||||
return final, "MISSING", None
|
||||
return compiled_content, "MISSING", None
|
||||
|
||||
new_block = render_block(constitution_text)
|
||||
existing_block = find_existing_block(existing_content)
|
||||
|
||||
if existing_block:
|
||||
# Compare raw block bodies (strip trailing newlines for stable compare)
|
||||
if existing_block.raw.rstrip() == new_block.rstrip():
|
||||
status = "UNCHANGED"
|
||||
block_to_use = existing_block.raw.rstrip()
|
||||
else:
|
||||
status = "UPDATED"
|
||||
block_to_use = new_block.rstrip()
|
||||
else:
|
||||
status = "CREATED"
|
||||
block_to_use = new_block.rstrip()
|
||||
|
||||
hash_line = new_block.splitlines()[1] if len(new_block.splitlines()) > 1 else ""
|
||||
hash_value = None
|
||||
if hash_line.startswith("hash:"):
|
||||
parts = hash_line.split()
|
||||
if len(parts) >= 2:
|
||||
hash_value = parts[1]
|
||||
|
||||
final_content = header_part + block_to_use + "\n\n" + body_part.lstrip("\n")
|
||||
# Ensure single trailing newline
|
||||
if not final_content.endswith("\n"):
|
||||
final_content += "\n"
|
||||
return final_content, status, hash_value
|
||||
181
src/apm_cli/compilation/link_resolver.py
Normal file
181
src/apm_cli/compilation/link_resolver.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""Markdown link resolution for AGENTS.md compilation."""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
|
||||
def resolve_markdown_links(content: str, base_path: Path) -> str:
|
||||
"""Resolve markdown links and inline referenced content.
|
||||
|
||||
Args:
|
||||
content (str): Content with markdown links to resolve.
|
||||
base_path (Path): Base directory for resolving relative paths.
|
||||
|
||||
Returns:
|
||||
str: Content with resolved links and inlined content where appropriate.
|
||||
"""
|
||||
# Pattern to match markdown links: [text](path)
|
||||
link_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
|
||||
|
||||
def replace_link(match):
|
||||
text = match.group(1)
|
||||
path = match.group(2)
|
||||
|
||||
# Skip external URLs
|
||||
if path.startswith(('http://', 'https://', 'ftp://', 'mailto:')):
|
||||
return match.group(0) # Return original link
|
||||
|
||||
# Skip anchors
|
||||
if path.startswith('#'):
|
||||
return match.group(0) # Return original link
|
||||
|
||||
# Resolve relative path
|
||||
full_path = _resolve_path(path, base_path)
|
||||
|
||||
if full_path and full_path.exists() and full_path.is_file():
|
||||
# For certain file types, inline the content
|
||||
if full_path.suffix.lower() in ['.md', '.txt']:
|
||||
try:
|
||||
file_content = full_path.read_text(encoding='utf-8')
|
||||
# Remove frontmatter if present
|
||||
file_content = _remove_frontmatter(file_content)
|
||||
return f"**{text}**:\n\n{file_content}"
|
||||
except (OSError, UnicodeDecodeError):
|
||||
# Fall back to original link if file can't be read
|
||||
return match.group(0)
|
||||
else:
|
||||
# For other file types, keep the link but update path if needed
|
||||
return match.group(0)
|
||||
else:
|
||||
# File doesn't exist, keep original link (will be caught by validation)
|
||||
return match.group(0)
|
||||
|
||||
return re.sub(link_pattern, replace_link, content)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def validate_link_targets(content: str, base_path: Path) -> List[str]:
|
||||
"""Validate that all referenced files exist.
|
||||
|
||||
Args:
|
||||
content (str): Content to validate links in.
|
||||
base_path (Path): Base directory for resolving relative paths.
|
||||
|
||||
Returns:
|
||||
List[str]: List of error messages for missing or invalid links.
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Check markdown links
|
||||
link_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
|
||||
for match in re.finditer(link_pattern, content):
|
||||
text = match.group(1)
|
||||
path = match.group(2)
|
||||
|
||||
# Skip external URLs and anchors
|
||||
if (path.startswith(('http://', 'https://', 'ftp://', 'mailto:')) or
|
||||
path.startswith('#')):
|
||||
continue
|
||||
|
||||
# Resolve and check path
|
||||
full_path = _resolve_path(path, base_path)
|
||||
if not full_path or not full_path.exists():
|
||||
errors.append(f"Referenced file not found: {path} (in link '{text}')")
|
||||
elif not full_path.is_file() and not full_path.is_dir():
|
||||
errors.append(f"Referenced path is neither a file nor directory: {path} (in link '{text}')")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def _resolve_path(path: str, base_path: Path) -> Optional[Path]:
|
||||
"""Resolve a relative path against a base path.
|
||||
|
||||
Args:
|
||||
path (str): Relative path to resolve.
|
||||
base_path (Path): Base directory for resolution.
|
||||
|
||||
Returns:
|
||||
Optional[Path]: Resolved path or None if invalid.
|
||||
"""
|
||||
try:
|
||||
if Path(path).is_absolute():
|
||||
return Path(path)
|
||||
else:
|
||||
return base_path / path
|
||||
except (OSError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _remove_frontmatter(content: str) -> str:
|
||||
"""Remove YAML frontmatter from content.
|
||||
|
||||
Args:
|
||||
content (str): Content that may contain frontmatter.
|
||||
|
||||
Returns:
|
||||
str: Content without frontmatter.
|
||||
"""
|
||||
# Remove YAML frontmatter (--- at start, --- at end)
|
||||
if content.startswith('---\n'):
|
||||
lines = content.split('\n')
|
||||
in_frontmatter = True
|
||||
content_lines = []
|
||||
|
||||
for i, line in enumerate(lines[1:], 1): # Skip first ---
|
||||
if line.strip() == '---' and in_frontmatter:
|
||||
in_frontmatter = False
|
||||
continue
|
||||
if not in_frontmatter:
|
||||
content_lines.append(line)
|
||||
|
||||
content = '\n'.join(content_lines)
|
||||
|
||||
return content.strip()
|
||||
|
||||
|
||||
def _detect_circular_references(content: str, base_path: Path, visited: Optional[set] = None) -> List[str]:
|
||||
"""Detect circular references in markdown links.
|
||||
|
||||
Args:
|
||||
content (str): Content to check for circular references.
|
||||
base_path (Path): Base directory for resolving paths.
|
||||
visited (Optional[set]): Set of already visited files.
|
||||
|
||||
Returns:
|
||||
List[str]: List of circular reference errors.
|
||||
"""
|
||||
if visited is None:
|
||||
visited = set()
|
||||
|
||||
errors = []
|
||||
current_file = base_path
|
||||
|
||||
if current_file in visited:
|
||||
errors.append(f"Circular reference detected: {current_file}")
|
||||
return errors
|
||||
|
||||
visited.add(current_file)
|
||||
|
||||
# Check markdown links for potential circular references
|
||||
link_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
|
||||
for match in re.finditer(link_pattern, content):
|
||||
path = match.group(2)
|
||||
|
||||
# Skip external URLs and anchors
|
||||
if (path.startswith(('http://', 'https://', 'ftp://', 'mailto:')) or
|
||||
path.startswith('#')):
|
||||
continue
|
||||
|
||||
full_path = _resolve_path(path, base_path.parent if base_path.is_file() else base_path)
|
||||
if full_path and full_path.exists() and full_path.is_file():
|
||||
if full_path.suffix.lower() in ['.md', '.txt']:
|
||||
try:
|
||||
linked_content = full_path.read_text(encoding='utf-8')
|
||||
errors.extend(_detect_circular_references(linked_content, full_path, visited.copy()))
|
||||
except (OSError, UnicodeDecodeError):
|
||||
continue
|
||||
|
||||
return errors
|
||||
138
src/apm_cli/compilation/template_builder.py
Normal file
138
src/apm_cli/compilation/template_builder.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""Template building system for AGENTS.md compilation."""
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional, Tuple
|
||||
from ..primitives.models import Instruction, Chatmode
|
||||
|
||||
|
||||
@dataclass
|
||||
class TemplateData:
|
||||
"""Data structure for template generation."""
|
||||
instructions_content: str
|
||||
# Removed volatile timestamp for deterministic builds
|
||||
version: str
|
||||
chatmode_content: Optional[str] = None
|
||||
|
||||
|
||||
def build_conditional_sections(instructions: List[Instruction]) -> str:
|
||||
"""Build sections grouped by applyTo patterns.
|
||||
|
||||
Args:
|
||||
instructions (List[Instruction]): List of instruction primitives.
|
||||
|
||||
Returns:
|
||||
str: Formatted conditional sections content.
|
||||
"""
|
||||
if not instructions:
|
||||
return ""
|
||||
|
||||
# Group instructions by pattern - use raw patterns
|
||||
pattern_groups = _group_instructions_by_pattern(instructions)
|
||||
|
||||
sections = []
|
||||
|
||||
for pattern, pattern_instructions in pattern_groups.items():
|
||||
sections.append(f"## Files matching `{pattern}`")
|
||||
sections.append("")
|
||||
|
||||
# Combine content from all instructions for this pattern
|
||||
for instruction in pattern_instructions:
|
||||
content = instruction.content.strip()
|
||||
if content:
|
||||
# Add source file comment before the content
|
||||
try:
|
||||
# Try to get relative path for cleaner display
|
||||
if instruction.file_path.is_absolute():
|
||||
relative_path = instruction.file_path.relative_to(Path.cwd())
|
||||
else:
|
||||
relative_path = instruction.file_path
|
||||
except (ValueError, OSError):
|
||||
# Fall back to absolute or given path if relative fails
|
||||
relative_path = instruction.file_path
|
||||
|
||||
sections.append(f"<!-- Source: {relative_path} -->")
|
||||
sections.append(content)
|
||||
sections.append(f"<!-- End source: {relative_path} -->")
|
||||
sections.append("")
|
||||
|
||||
return "\n".join(sections)
|
||||
|
||||
|
||||
def find_chatmode_by_name(chatmodes: List[Chatmode], chatmode_name: str) -> Optional[Chatmode]:
|
||||
"""Find a chatmode by name.
|
||||
|
||||
Args:
|
||||
chatmodes (List[Chatmode]): List of available chatmodes.
|
||||
chatmode_name (str): Name of the chatmode to find.
|
||||
|
||||
Returns:
|
||||
Optional[Chatmode]: The found chatmode, or None if not found.
|
||||
"""
|
||||
for chatmode in chatmodes:
|
||||
if chatmode.name == chatmode_name:
|
||||
return chatmode
|
||||
return None
|
||||
|
||||
|
||||
def _group_instructions_by_pattern(instructions: List[Instruction]) -> Dict[str, List[Instruction]]:
|
||||
"""Group instructions by applyTo patterns.
|
||||
|
||||
Args:
|
||||
instructions (List[Instruction]): List of instructions to group.
|
||||
|
||||
Returns:
|
||||
Dict[str, List[Instruction]]: Grouped instructions with raw patterns as keys.
|
||||
"""
|
||||
pattern_groups: Dict[str, List[Instruction]] = {}
|
||||
|
||||
for instruction in instructions:
|
||||
if not instruction.apply_to:
|
||||
continue
|
||||
|
||||
pattern = instruction.apply_to
|
||||
|
||||
if pattern not in pattern_groups:
|
||||
pattern_groups[pattern] = []
|
||||
|
||||
pattern_groups[pattern].append(instruction)
|
||||
|
||||
return pattern_groups
|
||||
|
||||
|
||||
def generate_agents_md_template(template_data: TemplateData) -> str:
|
||||
"""Generate the complete AGENTS.md file content.
|
||||
|
||||
Args:
|
||||
template_data (TemplateData): Data for template generation.
|
||||
|
||||
Returns:
|
||||
str: Complete AGENTS.md file content.
|
||||
"""
|
||||
sections = []
|
||||
|
||||
# Header
|
||||
sections.append("# AGENTS.md")
|
||||
sections.append(f"<!-- Generated by APM CLI from .apm/ primitives -->")
|
||||
from .constants import BUILD_ID_PLACEHOLDER
|
||||
sections.append(BUILD_ID_PLACEHOLDER)
|
||||
sections.append(f"<!-- APM Version: {template_data.version} -->")
|
||||
sections.append("")
|
||||
|
||||
# Chatmode content (if provided)
|
||||
if template_data.chatmode_content:
|
||||
sections.append(template_data.chatmode_content.strip())
|
||||
sections.append("")
|
||||
|
||||
# Instructions content (grouped by patterns)
|
||||
if template_data.instructions_content:
|
||||
sections.append(template_data.instructions_content)
|
||||
|
||||
# Footer
|
||||
sections.append("---")
|
||||
sections.append("*This file was generated by APM CLI. Do not edit manually.*")
|
||||
sections.append("*To regenerate: `specify apm compile`*")
|
||||
sections.append("")
|
||||
|
||||
return "\n".join(sections)
|
||||
60
src/apm_cli/config.py
Normal file
60
src/apm_cli/config.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""Configuration management for APM-CLI."""
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
CONFIG_DIR = os.path.expanduser("~/.apm-cli")
|
||||
CONFIG_FILE = os.path.join(CONFIG_DIR, "config.json")
|
||||
|
||||
|
||||
def ensure_config_exists():
|
||||
"""Ensure the configuration directory and file exist."""
|
||||
if not os.path.exists(CONFIG_DIR):
|
||||
os.makedirs(CONFIG_DIR)
|
||||
|
||||
if not os.path.exists(CONFIG_FILE):
|
||||
with open(CONFIG_FILE, "w") as f:
|
||||
json.dump({"default_client": "vscode"}, f)
|
||||
|
||||
|
||||
def get_config():
|
||||
"""Get the current configuration.
|
||||
|
||||
Returns:
|
||||
dict: Current configuration.
|
||||
"""
|
||||
ensure_config_exists()
|
||||
with open(CONFIG_FILE, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def update_config(updates):
|
||||
"""Update the configuration with new values.
|
||||
|
||||
Args:
|
||||
updates (dict): Dictionary of configuration values to update.
|
||||
"""
|
||||
config = get_config()
|
||||
config.update(updates)
|
||||
|
||||
with open(CONFIG_FILE, "w") as f:
|
||||
json.dump(config, f, indent=2)
|
||||
|
||||
|
||||
def get_default_client():
|
||||
"""Get the default MCP client.
|
||||
|
||||
Returns:
|
||||
str: Default MCP client type.
|
||||
"""
|
||||
return get_config().get("default_client", "vscode")
|
||||
|
||||
|
||||
def set_default_client(client_type):
|
||||
"""Set the default MCP client.
|
||||
|
||||
Args:
|
||||
client_type (str): Type of client to set as default.
|
||||
"""
|
||||
update_config({"default_client": client_type})
|
||||
1
src/apm_cli/core/__init__.py
Normal file
1
src/apm_cli/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core package."""
|
||||
165
src/apm_cli/core/conflict_detector.py
Normal file
165
src/apm_cli/core/conflict_detector.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""MCP server conflict detection and resolution."""
|
||||
|
||||
from typing import Dict, Any
|
||||
from ..adapters.client.base import MCPClientAdapter
|
||||
|
||||
|
||||
class MCPConflictDetector:
|
||||
"""Handles detection and resolution of MCP server configuration conflicts."""
|
||||
|
||||
def __init__(self, runtime_adapter: MCPClientAdapter):
|
||||
"""Initialize the conflict detector.
|
||||
|
||||
Args:
|
||||
runtime_adapter: The MCP client adapter for the target runtime.
|
||||
"""
|
||||
self.adapter = runtime_adapter
|
||||
|
||||
def check_server_exists(self, server_reference: str) -> bool:
|
||||
"""Check if a server already exists in the configuration.
|
||||
|
||||
Args:
|
||||
server_reference: Server reference to check (e.g., 'github', 'io.github.github/github-mcp-server').
|
||||
|
||||
Returns:
|
||||
True if server already exists, False otherwise.
|
||||
"""
|
||||
existing_servers = self.get_existing_server_configs()
|
||||
|
||||
# Try to get server info from registry for UUID comparison
|
||||
try:
|
||||
server_info = self.adapter.registry_client.find_server_by_reference(server_reference)
|
||||
if server_info and "id" in server_info:
|
||||
server_uuid = server_info["id"]
|
||||
|
||||
# Check if any existing server has the same UUID
|
||||
for existing_name, existing_config in existing_servers.items():
|
||||
if isinstance(existing_config, dict) and existing_config.get("id") == server_uuid:
|
||||
return True
|
||||
except Exception:
|
||||
# If registry lookup fails, fall back to canonical name comparison
|
||||
canonical_name = self.get_canonical_server_name(server_reference)
|
||||
|
||||
# Check for exact canonical name match
|
||||
if canonical_name in existing_servers:
|
||||
return True
|
||||
|
||||
# Check if any existing server resolves to the same canonical name
|
||||
for existing_name in existing_servers.keys():
|
||||
if existing_name != canonical_name: # Avoid duplicate checking
|
||||
try:
|
||||
existing_canonical = self.get_canonical_server_name(existing_name)
|
||||
if existing_canonical == canonical_name:
|
||||
return True
|
||||
except Exception:
|
||||
# If we can't resolve an existing server name, skip it
|
||||
continue
|
||||
|
||||
return False
|
||||
|
||||
def get_canonical_server_name(self, server_ref: str) -> str:
|
||||
"""Get canonical server name from MCP Registry.
|
||||
|
||||
Args:
|
||||
server_ref: Server reference to resolve.
|
||||
|
||||
Returns:
|
||||
Canonical server name if found in registry, otherwise the original reference.
|
||||
"""
|
||||
try:
|
||||
# Use existing registry client that's already initialized in adapters
|
||||
server_info = self.adapter.registry_client.find_server_by_reference(server_ref)
|
||||
|
||||
if server_info:
|
||||
# Use the server name from x-github.name field, or fallback to server.name
|
||||
if "x-github" in server_info and "name" in server_info["x-github"]:
|
||||
return server_info["x-github"]["name"]
|
||||
elif "name" in server_info:
|
||||
return server_info["name"]
|
||||
except Exception:
|
||||
# Graceful fallback on registry failure
|
||||
pass
|
||||
|
||||
# Fallback: return the reference as-is if not found in registry
|
||||
return server_ref
|
||||
|
||||
def get_existing_server_configs(self) -> Dict[str, Any]:
|
||||
"""Extract all existing server configurations.
|
||||
|
||||
Returns:
|
||||
Dictionary of existing server configurations keyed by server name.
|
||||
"""
|
||||
# Get fresh config each time
|
||||
existing_config = self.adapter.get_current_config()
|
||||
|
||||
# Determine runtime type from adapter class name or type
|
||||
adapter_class_name = getattr(self.adapter, '__class__', type(self.adapter)).__name__.lower()
|
||||
|
||||
if "copilot" in adapter_class_name:
|
||||
return existing_config.get("mcpServers", {})
|
||||
elif "codex" in adapter_class_name:
|
||||
# Extract mcp_servers section from TOML config, handling both nested and flat formats
|
||||
servers = {}
|
||||
|
||||
# Direct mcp_servers section
|
||||
if "mcp_servers" in existing_config:
|
||||
servers.update(existing_config["mcp_servers"])
|
||||
|
||||
# Handle TOML-style nested keys like 'mcp_servers.github' and 'mcp_servers."quoted-name"'
|
||||
for key, value in existing_config.items():
|
||||
if key.startswith("mcp_servers."):
|
||||
# Extract server name from key
|
||||
server_name = key[len("mcp_servers."):]
|
||||
# Remove quotes if present
|
||||
if server_name.startswith('"') and server_name.endswith('"'):
|
||||
server_name = server_name[1:-1]
|
||||
|
||||
# Only add if it looks like server config (has command or args)
|
||||
if isinstance(value, dict) and ('command' in value or 'args' in value):
|
||||
servers[server_name] = value
|
||||
|
||||
return servers
|
||||
elif "vscode" in adapter_class_name:
|
||||
return existing_config.get("servers", {})
|
||||
|
||||
return {}
|
||||
|
||||
def get_conflict_summary(self, server_reference: str) -> Dict[str, Any]:
|
||||
"""Get detailed information about a conflict.
|
||||
|
||||
Args:
|
||||
server_reference: Server reference to analyze.
|
||||
|
||||
Returns:
|
||||
Dictionary with conflict details.
|
||||
"""
|
||||
canonical_name = self.get_canonical_server_name(server_reference)
|
||||
existing_servers = self.get_existing_server_configs()
|
||||
|
||||
conflict_info = {
|
||||
"exists": False,
|
||||
"canonical_name": canonical_name,
|
||||
"conflicting_servers": []
|
||||
}
|
||||
|
||||
# Check for exact canonical name match
|
||||
if canonical_name in existing_servers:
|
||||
conflict_info["exists"] = True
|
||||
conflict_info["conflicting_servers"].append({
|
||||
"name": canonical_name,
|
||||
"type": "exact_match"
|
||||
})
|
||||
|
||||
# Check if any existing server resolves to the same canonical name
|
||||
for existing_name in existing_servers.keys():
|
||||
if existing_name != canonical_name: # Avoid duplicate reporting
|
||||
existing_canonical = self.get_canonical_server_name(existing_name)
|
||||
if existing_canonical == canonical_name:
|
||||
conflict_info["exists"] = True
|
||||
conflict_info["conflicting_servers"].append({
|
||||
"name": existing_name,
|
||||
"type": "canonical_match",
|
||||
"resolves_to": existing_canonical
|
||||
})
|
||||
|
||||
return conflict_info
|
||||
96
src/apm_cli/core/docker_args.py
Normal file
96
src/apm_cli/core/docker_args.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Docker arguments processing utilities for MCP configuration."""
|
||||
|
||||
from typing import List, Dict, Tuple
|
||||
|
||||
|
||||
class DockerArgsProcessor:
|
||||
"""Handles Docker argument processing with deduplication."""
|
||||
|
||||
@staticmethod
|
||||
def process_docker_args(base_args: List[str], env_vars: Dict[str, str]) -> List[str]:
|
||||
"""Process Docker arguments with environment variable deduplication and required flags.
|
||||
|
||||
Args:
|
||||
base_args: Base Docker arguments list.
|
||||
env_vars: Environment variables to inject.
|
||||
|
||||
Returns:
|
||||
Updated arguments with environment variables injected without duplicates and required flags.
|
||||
"""
|
||||
result = []
|
||||
env_vars_added = set()
|
||||
has_interactive = False
|
||||
has_rm = False
|
||||
|
||||
# Check for existing -i and --rm flags
|
||||
for i, arg in enumerate(base_args):
|
||||
if arg == "-i" or arg == "--interactive":
|
||||
has_interactive = True
|
||||
elif arg == "--rm":
|
||||
has_rm = True
|
||||
|
||||
for arg in base_args:
|
||||
result.append(arg)
|
||||
|
||||
# When we encounter "run", inject required flags and environment variables
|
||||
if arg == "run":
|
||||
# Add -i flag if not present
|
||||
if not has_interactive:
|
||||
result.append("-i")
|
||||
|
||||
# Add --rm flag if not present
|
||||
if not has_rm:
|
||||
result.append("--rm")
|
||||
|
||||
# Add environment variables
|
||||
for env_name, env_value in env_vars.items():
|
||||
if env_name not in env_vars_added:
|
||||
result.extend(["-e", f"{env_name}={env_value}"])
|
||||
env_vars_added.add(env_name)
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def extract_env_vars_from_args(args: List[str]) -> Tuple[List[str], Dict[str, str]]:
|
||||
"""Extract environment variables from Docker args.
|
||||
|
||||
Args:
|
||||
args: Docker arguments that may contain -e flags.
|
||||
|
||||
Returns:
|
||||
Tuple of (clean_args, env_vars) where clean_args has -e flags removed
|
||||
and env_vars contains the extracted environment variables.
|
||||
"""
|
||||
clean_args = []
|
||||
env_vars = {}
|
||||
i = 0
|
||||
|
||||
while i < len(args):
|
||||
if args[i] == "-e" and i + 1 < len(args):
|
||||
env_spec = args[i + 1]
|
||||
if "=" in env_spec:
|
||||
key, value = env_spec.split("=", 1)
|
||||
env_vars[key] = value
|
||||
else:
|
||||
env_vars[env_spec] = "${" + env_spec + "}"
|
||||
i += 2 # Skip both -e and the env spec
|
||||
else:
|
||||
clean_args.append(args[i])
|
||||
i += 1
|
||||
|
||||
return clean_args, env_vars
|
||||
|
||||
@staticmethod
|
||||
def merge_env_vars(existing_env: Dict[str, str], new_env: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Merge environment variables, prioritizing resolved values over templates.
|
||||
|
||||
Args:
|
||||
existing_env: Existing environment variables (often templates from registry).
|
||||
new_env: New environment variables to merge (resolved actual values).
|
||||
|
||||
Returns:
|
||||
Merged environment variables with resolved values taking precedence.
|
||||
"""
|
||||
merged = existing_env.copy()
|
||||
merged.update(new_env) # Resolved values take precedence over templates
|
||||
return merged
|
||||
99
src/apm_cli/core/operations.py
Normal file
99
src/apm_cli/core/operations.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""Core operations for APM-CLI."""
|
||||
|
||||
from ..factory import ClientFactory, PackageManagerFactory
|
||||
from .safe_installer import SafeMCPInstaller
|
||||
|
||||
|
||||
def configure_client(client_type, config_updates):
|
||||
"""Configure an MCP client.
|
||||
|
||||
Args:
|
||||
client_type (str): Type of client to configure.
|
||||
config_updates (dict): Configuration updates to apply.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = ClientFactory.create_client(client_type)
|
||||
client.update_config(config_updates)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error configuring client: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def install_package(client_type, package_name, version=None, shared_env_vars=None, server_info_cache=None, shared_runtime_vars=None):
|
||||
"""Install an MCP package for a specific client type.
|
||||
|
||||
Args:
|
||||
client_type (str): Type of client to configure.
|
||||
package_name (str): Name of the package to install.
|
||||
version (str, optional): Version of the package to install.
|
||||
shared_env_vars (dict, optional): Pre-collected environment variables to use.
|
||||
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||
shared_runtime_vars (dict, optional): Pre-collected runtime variables to use.
|
||||
|
||||
Returns:
|
||||
dict: Result with 'success' (bool), 'installed' (bool), 'skipped' (bool) keys.
|
||||
"""
|
||||
try:
|
||||
# Use safe installer with conflict detection
|
||||
safe_installer = SafeMCPInstaller(client_type)
|
||||
|
||||
# Pass shared environment and runtime variables and server info cache if available
|
||||
if shared_env_vars is not None or server_info_cache is not None or shared_runtime_vars is not None:
|
||||
summary = safe_installer.install_servers(
|
||||
[package_name],
|
||||
env_overrides=shared_env_vars,
|
||||
server_info_cache=server_info_cache,
|
||||
runtime_vars=shared_runtime_vars
|
||||
)
|
||||
else:
|
||||
summary = safe_installer.install_servers([package_name])
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'installed': len(summary.installed) > 0,
|
||||
'skipped': len(summary.skipped) > 0,
|
||||
'failed': len(summary.failed) > 0
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error installing package {package_name} for {client_type}: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'installed': False,
|
||||
'skipped': False,
|
||||
'failed': True
|
||||
}
|
||||
|
||||
|
||||
def uninstall_package(client_type, package_name):
|
||||
"""Uninstall an MCP package.
|
||||
|
||||
Args:
|
||||
client_type (str): Type of client to configure.
|
||||
package_name (str): Name of the package to uninstall.
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = ClientFactory.create_client(client_type)
|
||||
package_manager = PackageManagerFactory.create_package_manager()
|
||||
|
||||
# Uninstall the package
|
||||
result = package_manager.uninstall(package_name)
|
||||
|
||||
# Remove any legacy config entries if they exist
|
||||
current_config = client.get_current_config()
|
||||
config_updates = {}
|
||||
if f"mcp.package.{package_name}.enabled" in current_config:
|
||||
config_updates = {f"mcp.package.{package_name}.enabled": None} # Set to None to remove the entry
|
||||
client.update_config(config_updates)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Error uninstalling package: {e}")
|
||||
return False
|
||||
136
src/apm_cli/core/safe_installer.py
Normal file
136
src/apm_cli/core/safe_installer.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""Safe MCP server installation with conflict detection."""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from dataclasses import dataclass
|
||||
from ..factory import ClientFactory
|
||||
from .conflict_detector import MCPConflictDetector
|
||||
from ..utils.console import _rich_warning, _rich_success, _rich_error, _rich_info
|
||||
|
||||
|
||||
@dataclass
|
||||
class InstallationSummary:
|
||||
"""Summary of MCP server installation results."""
|
||||
|
||||
def __init__(self):
|
||||
self.installed = []
|
||||
self.skipped = []
|
||||
self.failed = []
|
||||
|
||||
def add_installed(self, server_ref: str):
|
||||
"""Add a server to the installed list."""
|
||||
self.installed.append(server_ref)
|
||||
|
||||
def add_skipped(self, server_ref: str, reason: str):
|
||||
"""Add a server to the skipped list."""
|
||||
self.skipped.append({"server": server_ref, "reason": reason})
|
||||
|
||||
def add_failed(self, server_ref: str, reason: str):
|
||||
"""Add a server to the failed list."""
|
||||
self.failed.append({"server": server_ref, "reason": reason})
|
||||
|
||||
def has_any_changes(self) -> bool:
|
||||
"""Check if any installations or failures occurred."""
|
||||
return len(self.installed) > 0 or len(self.failed) > 0
|
||||
|
||||
def log_summary(self):
|
||||
"""Log a summary of installation results."""
|
||||
if self.installed:
|
||||
_rich_success(f"✅ Installed: {', '.join(self.installed)}")
|
||||
|
||||
if self.skipped:
|
||||
for item in self.skipped:
|
||||
_rich_warning(f"⚠️ Skipped {item['server']}: {item['reason']}")
|
||||
|
||||
if self.failed:
|
||||
for item in self.failed:
|
||||
_rich_error(f"❌ Failed {item['server']}: {item['reason']}")
|
||||
|
||||
|
||||
class SafeMCPInstaller:
|
||||
"""Safe MCP server installation with conflict detection."""
|
||||
|
||||
def __init__(self, runtime: str):
|
||||
"""Initialize the safe installer.
|
||||
|
||||
Args:
|
||||
runtime: Target runtime (copilot, codex, vscode).
|
||||
"""
|
||||
self.runtime = runtime
|
||||
self.adapter = ClientFactory.create_client(runtime)
|
||||
self.conflict_detector = MCPConflictDetector(self.adapter)
|
||||
|
||||
def install_servers(self, server_references: List[str], env_overrides: Dict[str, str] = None, server_info_cache: Dict[str, Any] = None, runtime_vars: Dict[str, str] = None) -> InstallationSummary:
|
||||
"""Install MCP servers with conflict detection.
|
||||
|
||||
Args:
|
||||
server_references: List of server references to install.
|
||||
env_overrides: Optional dictionary of environment variable overrides.
|
||||
server_info_cache: Optional pre-fetched server info to avoid duplicate registry calls.
|
||||
runtime_vars: Optional dictionary of runtime variable values.
|
||||
|
||||
Returns:
|
||||
InstallationSummary with detailed results.
|
||||
"""
|
||||
summary = InstallationSummary()
|
||||
|
||||
for server_ref in server_references:
|
||||
if self.conflict_detector.check_server_exists(server_ref):
|
||||
summary.add_skipped(server_ref, "already configured")
|
||||
self._log_skip(server_ref)
|
||||
continue
|
||||
|
||||
try:
|
||||
# Pass environment overrides, server info cache, and runtime variables if provided
|
||||
kwargs = {}
|
||||
if env_overrides is not None:
|
||||
kwargs['env_overrides'] = env_overrides
|
||||
if server_info_cache is not None:
|
||||
kwargs['server_info_cache'] = server_info_cache
|
||||
if runtime_vars is not None:
|
||||
kwargs['runtime_vars'] = runtime_vars
|
||||
|
||||
result = self.adapter.configure_mcp_server(server_ref, **kwargs)
|
||||
|
||||
if result:
|
||||
summary.add_installed(server_ref)
|
||||
self._log_success(server_ref)
|
||||
else:
|
||||
summary.add_failed(server_ref, "configuration failed")
|
||||
self._log_failure(server_ref)
|
||||
except Exception as e:
|
||||
summary.add_failed(server_ref, str(e))
|
||||
self._log_error(server_ref, e)
|
||||
|
||||
return summary
|
||||
|
||||
def _log_skip(self, server_ref: str):
|
||||
"""Log when a server is skipped due to existing configuration."""
|
||||
_rich_warning(f" {server_ref} already configured, skipping")
|
||||
|
||||
def _log_success(self, server_ref: str):
|
||||
"""Log successful server installation."""
|
||||
_rich_success(f" ✓ {server_ref}")
|
||||
|
||||
def _log_failure(self, server_ref: str):
|
||||
"""Log failed server installation."""
|
||||
_rich_warning(f" ✗ {server_ref} installation failed")
|
||||
|
||||
def _log_error(self, server_ref: str, error: Exception):
|
||||
"""Log error during server installation."""
|
||||
_rich_error(f" ✗ {server_ref}: {error}")
|
||||
|
||||
def check_conflicts_only(self, server_references: List[str]) -> Dict[str, Any]:
|
||||
"""Check for conflicts without installing.
|
||||
|
||||
Args:
|
||||
server_references: List of server references to check.
|
||||
|
||||
Returns:
|
||||
Dictionary with conflict information for each server.
|
||||
"""
|
||||
conflicts = {}
|
||||
|
||||
for server_ref in server_references:
|
||||
conflicts[server_ref] = self.conflict_detector.get_conflict_summary(server_ref)
|
||||
|
||||
return conflicts
|
||||
500
src/apm_cli/core/script_runner.py
Normal file
500
src/apm_cli/core/script_runner.py
Normal file
@@ -0,0 +1,500 @@
|
||||
"""Script runner for APM NPM-like script execution."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
from .token_manager import setup_runtime_environment
|
||||
from ..output.script_formatters import ScriptExecutionFormatter
|
||||
|
||||
|
||||
class ScriptRunner:
|
||||
"""Executes APM scripts with auto-compilation of .prompt.md files."""
|
||||
|
||||
def __init__(self, compiler=None, use_color: bool = True):
|
||||
"""Initialize script runner with optional compiler.
|
||||
|
||||
Args:
|
||||
compiler: Optional prompt compiler instance
|
||||
use_color: Whether to use colored output
|
||||
"""
|
||||
self.compiler = compiler or PromptCompiler()
|
||||
self.formatter = ScriptExecutionFormatter(use_color=use_color)
|
||||
|
||||
def run_script(self, script_name: str, params: Dict[str, str]) -> bool:
|
||||
"""Run a script from apm.yml with parameter substitution.
|
||||
|
||||
Args:
|
||||
script_name: Name of the script to run
|
||||
params: Parameters for compilation and script execution
|
||||
|
||||
Returns:
|
||||
bool: True if script executed successfully
|
||||
"""
|
||||
# Display script execution header
|
||||
header_lines = self.formatter.format_script_header(script_name, params)
|
||||
for line in header_lines:
|
||||
print(line)
|
||||
|
||||
# Load apm.yml configuration
|
||||
config = self._load_config()
|
||||
if not config:
|
||||
raise RuntimeError("No apm.yml found in current directory")
|
||||
|
||||
scripts = config.get('scripts', {})
|
||||
if script_name not in scripts:
|
||||
available = ', '.join(scripts.keys()) if scripts else 'none'
|
||||
raise RuntimeError(f"Script '{script_name}' not found. Available scripts: {available}")
|
||||
|
||||
# Get the script command
|
||||
command = scripts[script_name]
|
||||
|
||||
# Auto-compile any .prompt.md files in the command
|
||||
compiled_command, compiled_prompt_files, runtime_content = self._auto_compile_prompts(command, params)
|
||||
|
||||
# Show compilation progress if needed
|
||||
if compiled_prompt_files:
|
||||
compilation_lines = self.formatter.format_compilation_progress(compiled_prompt_files)
|
||||
for line in compilation_lines:
|
||||
print(line)
|
||||
|
||||
# Detect runtime and show execution details
|
||||
runtime = self._detect_runtime(compiled_command)
|
||||
|
||||
# Execute the final command
|
||||
if runtime_content is not None:
|
||||
# Show runtime execution details
|
||||
execution_lines = self.formatter.format_runtime_execution(
|
||||
runtime, compiled_command, len(runtime_content)
|
||||
)
|
||||
for line in execution_lines:
|
||||
print(line)
|
||||
|
||||
# Show content preview
|
||||
preview_lines = self.formatter.format_content_preview(runtime_content)
|
||||
for line in preview_lines:
|
||||
print(line)
|
||||
|
||||
try:
|
||||
# Set up GitHub token environment for all runtimes using centralized manager
|
||||
env = setup_runtime_environment(os.environ.copy())
|
||||
|
||||
# Show environment setup if relevant
|
||||
env_vars_set = []
|
||||
if 'GITHUB_TOKEN' in env and env['GITHUB_TOKEN']:
|
||||
env_vars_set.append('GITHUB_TOKEN')
|
||||
if 'GITHUB_APM_PAT' in env and env['GITHUB_APM_PAT']:
|
||||
env_vars_set.append('GITHUB_APM_PAT')
|
||||
|
||||
if env_vars_set:
|
||||
env_lines = self.formatter.format_environment_setup(runtime, env_vars_set)
|
||||
for line in env_lines:
|
||||
print(line)
|
||||
|
||||
# Track execution time
|
||||
start_time = time.time()
|
||||
|
||||
# Check if this command needs subprocess execution (has compiled content)
|
||||
if runtime_content is not None:
|
||||
# Use argument list approach for all runtimes to avoid shell parsing issues
|
||||
result = self._execute_runtime_command(compiled_command, runtime_content, env)
|
||||
else:
|
||||
# Use regular shell execution for other commands
|
||||
result = subprocess.run(compiled_command, shell=True, check=True, env=env)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
# Show success message
|
||||
success_lines = self.formatter.format_execution_success(runtime, execution_time)
|
||||
for line in success_lines:
|
||||
print(line)
|
||||
|
||||
return result.returncode == 0
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
# Show error message
|
||||
error_lines = self.formatter.format_execution_error(runtime, e.returncode)
|
||||
for line in error_lines:
|
||||
print(line)
|
||||
|
||||
raise RuntimeError(f"Script execution failed with exit code {e.returncode}")
|
||||
|
||||
def list_scripts(self) -> Dict[str, str]:
|
||||
"""List all available scripts from apm.yml.
|
||||
|
||||
Returns:
|
||||
Dict mapping script names to their commands
|
||||
"""
|
||||
config = self._load_config()
|
||||
return config.get('scripts', {}) if config else {}
|
||||
|
||||
def _load_config(self) -> Optional[Dict]:
|
||||
"""Load apm.yml from current directory."""
|
||||
config_path = Path('apm.yml')
|
||||
if not config_path.exists():
|
||||
return None
|
||||
|
||||
with open(config_path, 'r') as f:
|
||||
return yaml.safe_load(f)
|
||||
|
||||
def _auto_compile_prompts(self, command: str, params: Dict[str, str]) -> tuple[str, list[str], str]:
|
||||
"""Auto-compile .prompt.md files and transform runtime commands.
|
||||
|
||||
Args:
|
||||
command: Original script command
|
||||
params: Parameters for compilation
|
||||
|
||||
Returns:
|
||||
Tuple of (compiled_command, list_of_compiled_prompt_files, runtime_content_or_none)
|
||||
"""
|
||||
# Find all .prompt.md files in the command using regex
|
||||
prompt_files = re.findall(r'(\S+\.prompt\.md)', command)
|
||||
compiled_prompt_files = []
|
||||
runtime_content = None
|
||||
|
||||
compiled_command = command
|
||||
for prompt_file in prompt_files:
|
||||
# Compile the prompt file with current params
|
||||
compiled_path = self.compiler.compile(prompt_file, params)
|
||||
compiled_prompt_files.append(prompt_file)
|
||||
|
||||
# Read the compiled content
|
||||
with open(compiled_path, 'r') as f:
|
||||
compiled_content = f.read().strip()
|
||||
|
||||
# Check if this is a runtime command (copilot, codex, llm) before transformation
|
||||
is_runtime_cmd = any(runtime in command for runtime in ['copilot', 'codex', 'llm']) and re.search(re.escape(prompt_file), command)
|
||||
|
||||
# Transform command based on runtime pattern
|
||||
compiled_command = self._transform_runtime_command(
|
||||
compiled_command, prompt_file, compiled_content, compiled_path
|
||||
)
|
||||
|
||||
# Store content for runtime commands that need subprocess execution
|
||||
if is_runtime_cmd:
|
||||
runtime_content = compiled_content
|
||||
|
||||
return compiled_command, compiled_prompt_files, runtime_content
|
||||
|
||||
def _transform_runtime_command(self, command: str, prompt_file: str,
|
||||
compiled_content: str, compiled_path: str) -> str:
|
||||
"""Transform runtime commands to their proper execution format.
|
||||
|
||||
Args:
|
||||
command: Original command
|
||||
prompt_file: Original .prompt.md file path
|
||||
compiled_content: Compiled prompt content as string
|
||||
compiled_path: Path to compiled .txt file
|
||||
|
||||
Returns:
|
||||
Transformed command for proper runtime execution
|
||||
"""
|
||||
# Handle environment variables prefix (e.g., "ENV1=val1 ENV2=val2 codex [args] file.prompt.md")
|
||||
# More robust approach: split by runtime commands to separate env vars from command
|
||||
runtime_commands = ['codex', 'copilot', 'llm']
|
||||
|
||||
for runtime_cmd in runtime_commands:
|
||||
runtime_pattern = f' {runtime_cmd} '
|
||||
if runtime_pattern in command and re.search(re.escape(prompt_file), command):
|
||||
parts = command.split(runtime_pattern, 1)
|
||||
potential_env_part = parts[0]
|
||||
runtime_part = runtime_cmd + ' ' + parts[1]
|
||||
|
||||
# Check if the first part looks like environment variables (has = signs)
|
||||
if '=' in potential_env_part and not potential_env_part.startswith(runtime_cmd):
|
||||
env_vars = potential_env_part
|
||||
|
||||
# Extract arguments before and after the prompt file from runtime part
|
||||
runtime_match = re.search(f'{runtime_cmd}\\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', runtime_part)
|
||||
if runtime_match:
|
||||
args_before_file = runtime_match.group(1).strip()
|
||||
args_after_file = runtime_match.group(3).strip()
|
||||
|
||||
# Build the command based on runtime
|
||||
if runtime_cmd == 'codex':
|
||||
if args_before_file:
|
||||
result = f"{env_vars} codex exec {args_before_file}"
|
||||
else:
|
||||
result = f"{env_vars} codex exec"
|
||||
else:
|
||||
# For copilot and llm, keep the runtime name and args
|
||||
result = f"{env_vars} {runtime_cmd}"
|
||||
if args_before_file:
|
||||
# Remove any existing -p flag since we'll handle it in execution
|
||||
cleaned_args = args_before_file.replace('-p', '').strip()
|
||||
if cleaned_args:
|
||||
result += f" {cleaned_args}"
|
||||
|
||||
if args_after_file:
|
||||
result += f" {args_after_file}"
|
||||
return result
|
||||
|
||||
# Handle individual runtime patterns without environment variables
|
||||
|
||||
# Handle "codex [args] file.prompt.md [more_args]" -> "codex exec [args] [more_args]"
|
||||
if re.search(r'codex\s+.*' + re.escape(prompt_file), command):
|
||||
match = re.search(r'codex\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', command)
|
||||
if match:
|
||||
args_before_file = match.group(1).strip()
|
||||
args_after_file = match.group(3).strip()
|
||||
|
||||
result = "codex exec"
|
||||
if args_before_file:
|
||||
result += f" {args_before_file}"
|
||||
if args_after_file:
|
||||
result += f" {args_after_file}"
|
||||
return result
|
||||
|
||||
# Handle "copilot [args] file.prompt.md [more_args]" -> "copilot [args] [more_args]"
|
||||
elif re.search(r'copilot\s+.*' + re.escape(prompt_file), command):
|
||||
match = re.search(r'copilot\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', command)
|
||||
if match:
|
||||
args_before_file = match.group(1).strip()
|
||||
args_after_file = match.group(3).strip()
|
||||
|
||||
result = "copilot"
|
||||
if args_before_file:
|
||||
# Remove any existing -p flag since we'll handle it in execution
|
||||
cleaned_args = args_before_file.replace('-p', '').strip()
|
||||
if cleaned_args:
|
||||
result += f" {cleaned_args}"
|
||||
if args_after_file:
|
||||
result += f" {args_after_file}"
|
||||
return result
|
||||
|
||||
# Handle "llm [args] file.prompt.md [more_args]" -> "llm [args] [more_args]"
|
||||
elif re.search(r'llm\s+.*' + re.escape(prompt_file), command):
|
||||
match = re.search(r'llm\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', command)
|
||||
if match:
|
||||
args_before_file = match.group(1).strip()
|
||||
args_after_file = match.group(3).strip()
|
||||
|
||||
result = "llm"
|
||||
if args_before_file:
|
||||
result += f" {args_before_file}"
|
||||
if args_after_file:
|
||||
result += f" {args_after_file}"
|
||||
return result
|
||||
|
||||
# Handle bare "file.prompt.md" -> "codex exec" (default to codex)
|
||||
elif command.strip() == prompt_file:
|
||||
return "codex exec"
|
||||
|
||||
# Fallback: just replace file path with compiled path (for non-runtime commands)
|
||||
return command.replace(prompt_file, compiled_path)
|
||||
|
||||
def _detect_runtime(self, command: str) -> str:
|
||||
"""Detect which runtime is being used in the command.
|
||||
|
||||
Args:
|
||||
command: The command to analyze
|
||||
|
||||
Returns:
|
||||
Name of the detected runtime (copilot, codex, llm, or unknown)
|
||||
"""
|
||||
command_lower = command.lower().strip()
|
||||
if command_lower.startswith('copilot'):
|
||||
return 'copilot'
|
||||
elif command_lower.startswith('codex'):
|
||||
return 'codex'
|
||||
elif command_lower.startswith('llm'):
|
||||
return 'llm'
|
||||
else:
|
||||
return 'unknown'
|
||||
|
||||
def _execute_runtime_command(self, command: str, content: str, env: dict) -> subprocess.CompletedProcess:
|
||||
"""Execute a runtime command using subprocess argument list to avoid shell parsing issues.
|
||||
|
||||
Args:
|
||||
command: The simplified runtime command (without content)
|
||||
content: The compiled prompt content to pass to the runtime
|
||||
env: Environment variables
|
||||
|
||||
Returns:
|
||||
subprocess.CompletedProcess: The result of the command execution
|
||||
"""
|
||||
import shlex
|
||||
|
||||
# Parse the command into arguments
|
||||
args = shlex.split(command.strip())
|
||||
|
||||
# Handle environment variables at the beginning of the command
|
||||
# Extract environment variables (key=value pairs) from the beginning of args
|
||||
env_vars = env.copy() # Start with existing environment
|
||||
actual_command_args = []
|
||||
|
||||
for arg in args:
|
||||
if '=' in arg and not actual_command_args:
|
||||
# This looks like an environment variable and we haven't started the actual command yet
|
||||
key, value = arg.split('=', 1)
|
||||
# Validate environment variable name with restrictive pattern
|
||||
# Only allow uppercase letters, numbers, and underscores, starting with letter or underscore
|
||||
if re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', key):
|
||||
env_vars[key] = value
|
||||
continue
|
||||
# Once we hit a non-env-var argument, everything else is part of the command
|
||||
actual_command_args.append(arg)
|
||||
|
||||
# Determine how to pass content based on runtime
|
||||
runtime = self._detect_runtime(' '.join(actual_command_args))
|
||||
|
||||
if runtime == 'copilot':
|
||||
# Copilot uses -p flag
|
||||
actual_command_args.extend(["-p", content])
|
||||
elif runtime == 'codex':
|
||||
# Codex exec expects content as the last argument
|
||||
actual_command_args.append(content)
|
||||
elif runtime == 'llm':
|
||||
# LLM expects content as argument
|
||||
actual_command_args.append(content)
|
||||
else:
|
||||
# Default: assume content as last argument
|
||||
actual_command_args.append(content)
|
||||
|
||||
# Show subprocess details for debugging
|
||||
subprocess_lines = self.formatter.format_subprocess_details(actual_command_args[:-1], len(content))
|
||||
for line in subprocess_lines:
|
||||
print(line)
|
||||
|
||||
# Show environment variables if any were extracted
|
||||
if len(env_vars) > len(env):
|
||||
extracted_env_vars = []
|
||||
for key, value in env_vars.items():
|
||||
if key not in env:
|
||||
extracted_env_vars.append(f"{key}={value}")
|
||||
if extracted_env_vars:
|
||||
env_lines = self.formatter.format_environment_setup("command", extracted_env_vars)
|
||||
for line in env_lines:
|
||||
print(line)
|
||||
|
||||
# Execute using argument list (no shell interpretation) with updated environment
|
||||
return subprocess.run(actual_command_args, check=True, env=env_vars)
|
||||
|
||||
|
||||
class PromptCompiler:
|
||||
"""Compiles .prompt.md files with parameter substitution."""
|
||||
|
||||
DEFAULT_COMPILED_DIR = Path('.apm/compiled')
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize compiler."""
|
||||
self.compiled_dir = self.DEFAULT_COMPILED_DIR
|
||||
|
||||
def compile(self, prompt_file: str, params: Dict[str, str]) -> str:
|
||||
"""Compile a .prompt.md file with parameter substitution.
|
||||
|
||||
Args:
|
||||
prompt_file: Path to the .prompt.md file
|
||||
params: Parameters to substitute
|
||||
|
||||
Returns:
|
||||
Path to the compiled file
|
||||
"""
|
||||
# Resolve the prompt file path - check local first, then dependencies
|
||||
prompt_path = self._resolve_prompt_file(prompt_file)
|
||||
|
||||
# Now ensure compiled directory exists
|
||||
self.compiled_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(prompt_path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Parse frontmatter and content
|
||||
if content.startswith('---'):
|
||||
# Split frontmatter and content
|
||||
parts = content.split('---', 2)
|
||||
if len(parts) >= 3:
|
||||
frontmatter = parts[1].strip()
|
||||
main_content = parts[2].strip()
|
||||
else:
|
||||
main_content = content
|
||||
else:
|
||||
main_content = content
|
||||
|
||||
# Substitute parameters in content
|
||||
compiled_content = self._substitute_parameters(main_content, params)
|
||||
|
||||
# Generate output file path
|
||||
output_name = prompt_path.stem.replace('.prompt', '') + '.txt'
|
||||
output_path = self.compiled_dir / output_name
|
||||
|
||||
# Write compiled content
|
||||
with open(output_path, 'w') as f:
|
||||
f.write(compiled_content)
|
||||
|
||||
return str(output_path)
|
||||
|
||||
def _resolve_prompt_file(self, prompt_file: str) -> Path:
|
||||
"""Resolve prompt file path, checking local directory first, then dependencies.
|
||||
|
||||
Args:
|
||||
prompt_file: Relative path to the .prompt.md file
|
||||
|
||||
Returns:
|
||||
Path: Resolved path to the prompt file
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If prompt file is not found in local or dependency modules
|
||||
"""
|
||||
prompt_path = Path(prompt_file)
|
||||
|
||||
# First check if it exists in current directory (local)
|
||||
if prompt_path.exists():
|
||||
return prompt_path
|
||||
|
||||
# If not found locally, search in dependency modules
|
||||
apm_modules_dir = Path("apm_modules")
|
||||
if apm_modules_dir.exists():
|
||||
# Search all dependency directories for the prompt file
|
||||
for dep_dir in apm_modules_dir.iterdir():
|
||||
if dep_dir.is_dir():
|
||||
# Check in the root of the dependency
|
||||
dep_prompt_path = dep_dir / prompt_file
|
||||
if dep_prompt_path.exists():
|
||||
return dep_prompt_path
|
||||
|
||||
# Also check in common subdirectories
|
||||
for subdir in ['prompts', '.', 'workflows']:
|
||||
sub_prompt_path = dep_dir / subdir / prompt_file
|
||||
if sub_prompt_path.exists():
|
||||
return sub_prompt_path
|
||||
|
||||
# If still not found, raise an error with helpful message
|
||||
searched_locations = [
|
||||
f"Local: {prompt_path}",
|
||||
]
|
||||
|
||||
if apm_modules_dir.exists():
|
||||
searched_locations.append("Dependencies:")
|
||||
for dep_dir in apm_modules_dir.iterdir():
|
||||
if dep_dir.is_dir():
|
||||
searched_locations.append(f" - {dep_dir.name}/{prompt_file}")
|
||||
|
||||
raise FileNotFoundError(
|
||||
f"Prompt file '{prompt_file}' not found.\n"
|
||||
f"Searched in:\n" + "\n".join(searched_locations) +
|
||||
f"\n\nTip: Run 'apm install' to ensure dependencies are installed."
|
||||
)
|
||||
|
||||
def _substitute_parameters(self, content: str, params: Dict[str, str]) -> str:
|
||||
"""Substitute parameters in content.
|
||||
|
||||
Args:
|
||||
content: Content to process
|
||||
params: Parameters to substitute
|
||||
|
||||
Returns:
|
||||
Content with parameters substituted
|
||||
"""
|
||||
result = content
|
||||
for key, value in params.items():
|
||||
# Replace ${input:key} placeholders
|
||||
placeholder = f"${{input:{key}}}"
|
||||
result = result.replace(placeholder, str(value))
|
||||
return result
|
||||
197
src/apm_cli/core/token_manager.py
Normal file
197
src/apm_cli/core/token_manager.py
Normal file
@@ -0,0 +1,197 @@
|
||||
"""Centralized GitHub token management for different AI runtimes.
|
||||
|
||||
This module handles the complex token environment setup required by different
|
||||
AI CLI tools, each of which expects different environment variable names for
|
||||
GitHub authentication and API access.
|
||||
|
||||
Token Architecture:
|
||||
- GITHUB_COPILOT_PAT: User-scoped PAT specifically for Copilot
|
||||
- GITHUB_APM_PAT: Fine-grained PAT for APM module access
|
||||
- GITHUB_TOKEN: User-scoped PAT for GitHub Models API access
|
||||
- GITHUB_NPM_PAT: Classic PAT for GitHub npm registry access
|
||||
|
||||
Runtime Requirements:
|
||||
- Codex CLI: Uses GITHUB_TOKEN (must be user-scoped for GitHub Models)
|
||||
"""
|
||||
|
||||
import os
|
||||
from typing import Dict, Optional, Tuple
|
||||
|
||||
|
||||
class GitHubTokenManager:
|
||||
"""Manages GitHub token environment setup for different AI runtimes."""
|
||||
|
||||
# Define token precedence for different use cases
|
||||
TOKEN_PRECEDENCE = {
|
||||
'copilot': ['GITHUB_COPILOT_PAT', 'GITHUB_TOKEN', 'GITHUB_APM_PAT'],
|
||||
'models': ['GITHUB_TOKEN'], # GitHub Models requires user-scoped PAT
|
||||
'modules': ['GITHUB_APM_PAT', 'GITHUB_TOKEN'], # APM module access
|
||||
'npm': ['GITHUB_NPM_PAT'] # npm registry access
|
||||
}
|
||||
|
||||
# Runtime-specific environment variable mappings
|
||||
RUNTIME_ENV_VARS = {
|
||||
'copilot': ['GH_TOKEN', 'GITHUB_PERSONAL_ACCESS_TOKEN'],
|
||||
'codex': ['GITHUB_TOKEN'], # Uses GITHUB_TOKEN directly
|
||||
'llm': ['GITHUB_MODELS_KEY'], # LLM-specific variable for GitHub Models
|
||||
}
|
||||
|
||||
def __init__(self, preserve_existing: bool = True):
|
||||
"""Initialize token manager.
|
||||
|
||||
Args:
|
||||
preserve_existing: If True, never overwrite existing environment variables
|
||||
"""
|
||||
self.preserve_existing = preserve_existing
|
||||
|
||||
def setup_environment(self, env: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
||||
"""Set up complete token environment for all runtimes.
|
||||
|
||||
Args:
|
||||
env: Environment dictionary to modify (defaults to os.environ.copy())
|
||||
|
||||
Returns:
|
||||
Updated environment dictionary with all required tokens set
|
||||
"""
|
||||
if env is None:
|
||||
env = os.environ.copy()
|
||||
|
||||
# Get available tokens
|
||||
available_tokens = self._get_available_tokens(env)
|
||||
|
||||
# Set up tokens for each runtime without overwriting existing values
|
||||
self._setup_copilot_tokens(env, available_tokens)
|
||||
self._setup_codex_tokens(env, available_tokens)
|
||||
self._setup_llm_tokens(env, available_tokens)
|
||||
|
||||
return env
|
||||
|
||||
def get_token_for_purpose(self, purpose: str, env: Optional[Dict[str, str]] = None) -> Optional[str]:
|
||||
"""Get the best available token for a specific purpose.
|
||||
|
||||
Args:
|
||||
purpose: Token purpose ('copilot', 'models', 'modules', 'npm')
|
||||
env: Environment to check (defaults to os.environ)
|
||||
|
||||
Returns:
|
||||
Best available token for the purpose, or None if not available
|
||||
"""
|
||||
if env is None:
|
||||
env = os.environ
|
||||
|
||||
if purpose not in self.TOKEN_PRECEDENCE:
|
||||
raise ValueError(f"Unknown purpose: {purpose}")
|
||||
|
||||
for token_var in self.TOKEN_PRECEDENCE[purpose]:
|
||||
token = env.get(token_var)
|
||||
if token:
|
||||
return token
|
||||
return None
|
||||
|
||||
def validate_tokens(self, env: Optional[Dict[str, str]] = None) -> Tuple[bool, str]:
|
||||
"""Validate that required tokens are available.
|
||||
|
||||
Args:
|
||||
env: Environment to check (defaults to os.environ)
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
if env is None:
|
||||
env = os.environ
|
||||
|
||||
# Check for at least one valid token
|
||||
has_any_token = any(
|
||||
self.get_token_for_purpose(purpose, env)
|
||||
for purpose in ['copilot', 'models', 'modules']
|
||||
)
|
||||
|
||||
if not has_any_token:
|
||||
return False, (
|
||||
"No GitHub tokens found. Set one of:\n"
|
||||
"- GITHUB_TOKEN (user-scoped PAT for GitHub Models)\n"
|
||||
"- GITHUB_APM_PAT (fine-grained PAT for APM modules)"
|
||||
)
|
||||
|
||||
# Warn about GitHub Models access if only fine-grained PAT is available
|
||||
models_token = self.get_token_for_purpose('models', env)
|
||||
if not models_token:
|
||||
has_fine_grained = env.get('GITHUB_APM_PAT')
|
||||
if has_fine_grained:
|
||||
return True, (
|
||||
"Warning: Only fine-grained PAT available. "
|
||||
"GitHub Models requires GITHUB_TOKEN (user-scoped PAT)"
|
||||
)
|
||||
|
||||
return True, "Token validation passed"
|
||||
|
||||
def _get_available_tokens(self, env: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Get all available GitHub tokens from environment."""
|
||||
tokens = {}
|
||||
for purpose, token_vars in self.TOKEN_PRECEDENCE.items():
|
||||
for token_var in token_vars:
|
||||
if token_var in env and env[token_var]:
|
||||
tokens[token_var] = env[token_var]
|
||||
return tokens
|
||||
|
||||
def _setup_copilot_tokens(self, env: Dict[str, str], available_tokens: Dict[str, str]):
|
||||
"""Set up tokens for Copilot."""
|
||||
copilot_token = self.get_token_for_purpose('copilot', available_tokens)
|
||||
if not copilot_token:
|
||||
return
|
||||
|
||||
for env_var in self.RUNTIME_ENV_VARS['copilot']:
|
||||
if self.preserve_existing and env_var in env:
|
||||
continue
|
||||
env[env_var] = copilot_token
|
||||
|
||||
def _setup_codex_tokens(self, env: Dict[str, str], available_tokens: Dict[str, str]):
|
||||
"""Set up tokens for Codex CLI (preserve existing GITHUB_TOKEN)."""
|
||||
# Codex uses GITHUB_TOKEN directly - only set if missing
|
||||
if self.preserve_existing and 'GITHUB_TOKEN' in env:
|
||||
return
|
||||
|
||||
models_token = self.get_token_for_purpose('models', available_tokens)
|
||||
if models_token and 'GITHUB_TOKEN' not in env:
|
||||
env['GITHUB_TOKEN'] = models_token
|
||||
|
||||
def _setup_llm_tokens(self, env: Dict[str, str], available_tokens: Dict[str, str]):
|
||||
"""Set up tokens for LLM CLI."""
|
||||
# LLM uses GITHUB_MODELS_KEY, prefer GITHUB_TOKEN if available
|
||||
if self.preserve_existing and 'GITHUB_MODELS_KEY' in env:
|
||||
return
|
||||
|
||||
models_token = self.get_token_for_purpose('models', available_tokens)
|
||||
if models_token:
|
||||
env['GITHUB_MODELS_KEY'] = models_token
|
||||
|
||||
|
||||
# Convenience functions for common use cases
|
||||
def setup_runtime_environment(env: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
||||
"""Set up complete runtime environment for all AI CLIs."""
|
||||
manager = GitHubTokenManager()
|
||||
return manager.setup_environment(env)
|
||||
|
||||
|
||||
def validate_github_tokens(env: Optional[Dict[str, str]] = None) -> Tuple[bool, str]:
|
||||
"""Validate GitHub token setup."""
|
||||
manager = GitHubTokenManager()
|
||||
return manager.validate_tokens(env)
|
||||
|
||||
|
||||
def get_github_token_for_runtime(runtime: str, env: Optional[Dict[str, str]] = None) -> Optional[str]:
|
||||
"""Get the appropriate GitHub token for a specific runtime."""
|
||||
manager = GitHubTokenManager()
|
||||
|
||||
# Map runtime names to purposes
|
||||
runtime_to_purpose = {
|
||||
'copilot': 'copilot',
|
||||
'codex': 'models',
|
||||
'llm': 'models',
|
||||
}
|
||||
|
||||
purpose = runtime_to_purpose.get(runtime)
|
||||
if not purpose:
|
||||
raise ValueError(f"Unknown runtime: {runtime}")
|
||||
|
||||
return manager.get_token_for_purpose(purpose, env)
|
||||
28
src/apm_cli/deps/__init__.py
Normal file
28
src/apm_cli/deps/__init__.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""Dependencies management package for APM-CLI."""
|
||||
|
||||
from .apm_resolver import APMDependencyResolver
|
||||
from .dependency_graph import (
|
||||
DependencyGraph, DependencyTree, DependencyNode, FlatDependencyMap,
|
||||
CircularRef, ConflictInfo
|
||||
)
|
||||
from .aggregator import sync_workflow_dependencies, scan_workflows_for_dependencies
|
||||
from .verifier import verify_dependencies, install_missing_dependencies, load_apm_config
|
||||
from .github_downloader import GitHubPackageDownloader
|
||||
from .package_validator import PackageValidator
|
||||
|
||||
__all__ = [
|
||||
'sync_workflow_dependencies',
|
||||
'scan_workflows_for_dependencies',
|
||||
'verify_dependencies',
|
||||
'install_missing_dependencies',
|
||||
'load_apm_config',
|
||||
'GitHubPackageDownloader',
|
||||
'PackageValidator',
|
||||
'DependencyGraph',
|
||||
'DependencyTree',
|
||||
'DependencyNode',
|
||||
'FlatDependencyMap',
|
||||
'CircularRef',
|
||||
'ConflictInfo',
|
||||
'APMDependencyResolver'
|
||||
]
|
||||
67
src/apm_cli/deps/aggregator.py
Normal file
67
src/apm_cli/deps/aggregator.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""Workflow dependency aggregator for APM-CLI."""
|
||||
|
||||
import os
|
||||
import glob
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
import frontmatter
|
||||
|
||||
|
||||
def scan_workflows_for_dependencies():
|
||||
"""Scan all workflow files for MCP dependencies following VSCode's .github/prompts convention.
|
||||
|
||||
Returns:
|
||||
set: A set of unique MCP server names from all workflows.
|
||||
"""
|
||||
# Support VSCode's .github/prompts convention with .prompt.md files
|
||||
prompt_patterns = [
|
||||
"**/.github/prompts/*.prompt.md", # VSCode convention: .github/prompts/
|
||||
"**/*.prompt.md" # Generic .prompt.md files
|
||||
]
|
||||
|
||||
workflows = []
|
||||
for pattern in prompt_patterns:
|
||||
workflows.extend(glob.glob(pattern, recursive=True))
|
||||
|
||||
# Remove duplicates
|
||||
workflows = list(set(workflows))
|
||||
|
||||
all_servers = set()
|
||||
|
||||
for workflow_file in workflows:
|
||||
try:
|
||||
with open(workflow_file, 'r', encoding='utf-8') as f:
|
||||
content = frontmatter.load(f)
|
||||
if 'mcp' in content.metadata and isinstance(content.metadata['mcp'], list):
|
||||
all_servers.update(content.metadata['mcp'])
|
||||
except Exception as e:
|
||||
print(f"Error processing {workflow_file}: {e}")
|
||||
|
||||
return all_servers
|
||||
|
||||
|
||||
def sync_workflow_dependencies(output_file="apm.yml"):
|
||||
"""Extract all MCP servers from workflows into apm.yml.
|
||||
|
||||
Args:
|
||||
output_file (str, optional): Path to the output file. Defaults to "apm.yml".
|
||||
|
||||
Returns:
|
||||
tuple: (bool, list) - Success status and list of servers added
|
||||
"""
|
||||
all_servers = scan_workflows_for_dependencies()
|
||||
|
||||
# Prepare the configuration
|
||||
apm_config = {
|
||||
'version': '1.0',
|
||||
'servers': sorted(list(all_servers))
|
||||
}
|
||||
|
||||
try:
|
||||
# Create the file
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
yaml.dump(apm_config, f, default_flow_style=False)
|
||||
return True, apm_config['servers']
|
||||
except Exception as e:
|
||||
print(f"Error writing to {output_file}: {e}")
|
||||
return False, []
|
||||
362
src/apm_cli/deps/apm_resolver.py
Normal file
362
src/apm_cli/deps/apm_resolver.py
Normal file
@@ -0,0 +1,362 @@
|
||||
"""APM dependency resolution engine with recursive resolution and conflict detection."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set, Optional, Tuple
|
||||
from collections import deque
|
||||
|
||||
from ..models.apm_package import APMPackage, DependencyReference
|
||||
from .dependency_graph import (
|
||||
DependencyGraph, DependencyTree, DependencyNode, FlatDependencyMap,
|
||||
CircularRef, ConflictInfo
|
||||
)
|
||||
|
||||
|
||||
class APMDependencyResolver:
|
||||
"""Handles recursive APM dependency resolution similar to NPM."""
|
||||
|
||||
def __init__(self, max_depth: int = 50):
|
||||
"""Initialize the resolver with maximum recursion depth."""
|
||||
self.max_depth = max_depth
|
||||
self._resolution_path = [] # For test compatibility
|
||||
|
||||
def resolve_dependencies(self, project_root: Path) -> DependencyGraph:
|
||||
"""
|
||||
Resolve all APM dependencies recursively.
|
||||
|
||||
Args:
|
||||
project_root: Path to the project root containing apm.yml
|
||||
|
||||
Returns:
|
||||
DependencyGraph: Complete resolved dependency graph
|
||||
"""
|
||||
# Load the root package
|
||||
apm_yml_path = project_root / "apm.yml"
|
||||
if not apm_yml_path.exists():
|
||||
# Create empty dependency graph for projects without apm.yml
|
||||
empty_package = APMPackage(name="unknown", version="0.0.0", package_path=project_root)
|
||||
empty_tree = DependencyTree(root_package=empty_package)
|
||||
empty_flat = FlatDependencyMap()
|
||||
return DependencyGraph(
|
||||
root_package=empty_package,
|
||||
dependency_tree=empty_tree,
|
||||
flattened_dependencies=empty_flat
|
||||
)
|
||||
|
||||
try:
|
||||
root_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
# Create error graph
|
||||
empty_package = APMPackage(name="error", version="0.0.0", package_path=project_root)
|
||||
empty_tree = DependencyTree(root_package=empty_package)
|
||||
empty_flat = FlatDependencyMap()
|
||||
graph = DependencyGraph(
|
||||
root_package=empty_package,
|
||||
dependency_tree=empty_tree,
|
||||
flattened_dependencies=empty_flat
|
||||
)
|
||||
graph.add_error(f"Failed to load root apm.yml: {e}")
|
||||
return graph
|
||||
|
||||
# Build the complete dependency tree
|
||||
dependency_tree = self.build_dependency_tree(apm_yml_path)
|
||||
|
||||
# Detect circular dependencies
|
||||
circular_deps = self.detect_circular_dependencies(dependency_tree)
|
||||
|
||||
# Flatten dependencies for installation
|
||||
flattened_deps = self.flatten_dependencies(dependency_tree)
|
||||
|
||||
# Create and return the complete graph
|
||||
graph = DependencyGraph(
|
||||
root_package=root_package,
|
||||
dependency_tree=dependency_tree,
|
||||
flattened_dependencies=flattened_deps,
|
||||
circular_dependencies=circular_deps
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
def build_dependency_tree(self, root_apm_yml: Path) -> DependencyTree:
|
||||
"""
|
||||
Build complete tree of all dependencies and sub-dependencies.
|
||||
|
||||
Uses breadth-first traversal to build the dependency tree level by level.
|
||||
This allows for early conflict detection and clearer error reporting.
|
||||
|
||||
Args:
|
||||
root_apm_yml: Path to the root apm.yml file
|
||||
|
||||
Returns:
|
||||
DependencyTree: Hierarchical dependency tree
|
||||
"""
|
||||
# Load root package
|
||||
try:
|
||||
root_package = APMPackage.from_apm_yml(root_apm_yml)
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
# Return empty tree with error
|
||||
empty_package = APMPackage(name="error", version="0.0.0")
|
||||
tree = DependencyTree(root_package=empty_package)
|
||||
return tree
|
||||
|
||||
# Initialize the tree
|
||||
tree = DependencyTree(root_package=root_package)
|
||||
|
||||
# Queue for breadth-first traversal: (dependency_ref, depth, parent_node)
|
||||
processing_queue: deque[Tuple[DependencyReference, int, Optional[DependencyNode]]] = deque()
|
||||
|
||||
# Set to track queued repo URLs for O(1) lookup instead of O(n) list comprehension
|
||||
queued_repo_urls: Set[str] = set()
|
||||
|
||||
# Add root dependencies to queue
|
||||
root_deps = root_package.get_apm_dependencies()
|
||||
for dep_ref in root_deps:
|
||||
processing_queue.append((dep_ref, 1, None))
|
||||
queued_repo_urls.add(dep_ref.repo_url)
|
||||
|
||||
# Process dependencies breadth-first
|
||||
while processing_queue:
|
||||
dep_ref, depth, parent_node = processing_queue.popleft()
|
||||
|
||||
# Remove from queued set since we're now processing this dependency
|
||||
queued_repo_urls.discard(dep_ref.repo_url)
|
||||
|
||||
# Check maximum depth to prevent infinite recursion
|
||||
if depth > self.max_depth:
|
||||
continue
|
||||
|
||||
# Check if we already processed this dependency at this level or higher
|
||||
existing_node = tree.get_node(dep_ref.repo_url)
|
||||
if existing_node and existing_node.depth <= depth:
|
||||
# We've already processed this dependency at a shallower or equal depth
|
||||
# Create parent-child relationship if parent exists
|
||||
if parent_node and existing_node not in parent_node.children:
|
||||
parent_node.children.append(existing_node)
|
||||
continue
|
||||
|
||||
# Create a new node for this dependency
|
||||
# Note: In a real implementation, we would load the actual package here
|
||||
# For now, create a placeholder package
|
||||
placeholder_package = APMPackage(
|
||||
name=dep_ref.get_display_name(),
|
||||
version="unknown",
|
||||
source=dep_ref.repo_url
|
||||
)
|
||||
|
||||
node = DependencyNode(
|
||||
package=placeholder_package,
|
||||
dependency_ref=dep_ref,
|
||||
depth=depth,
|
||||
parent=parent_node
|
||||
)
|
||||
|
||||
# Add to tree
|
||||
tree.add_node(node)
|
||||
|
||||
# Create parent-child relationship
|
||||
if parent_node:
|
||||
parent_node.children.append(node)
|
||||
|
||||
# Try to load the dependency package and its dependencies
|
||||
# For Task 3, this focuses on the resolution algorithm structure
|
||||
# Package loading integration will be completed in Tasks 2 & 4
|
||||
try:
|
||||
# Attempt to load package - currently returns None (placeholder implementation)
|
||||
# This will integrate with Task 2 (GitHub downloader) and Task 4 (apm_modules scanning)
|
||||
loaded_package = self._try_load_dependency_package(dep_ref)
|
||||
if loaded_package:
|
||||
# Update the node with the actual loaded package
|
||||
node.package = loaded_package
|
||||
|
||||
# Get sub-dependencies and add them to the processing queue
|
||||
sub_dependencies = loaded_package.get_apm_dependencies()
|
||||
for sub_dep in sub_dependencies:
|
||||
# Avoid infinite recursion by checking if we're already processing this dep
|
||||
# Use O(1) set lookup instead of O(n) list comprehension
|
||||
if sub_dep.repo_url not in queued_repo_urls:
|
||||
processing_queue.append((sub_dep, depth + 1, node))
|
||||
queued_repo_urls.add(sub_dep.repo_url)
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
# Could not load dependency package - this is expected for remote dependencies
|
||||
# The node already has a placeholder package, so continue with that
|
||||
pass
|
||||
|
||||
return tree
|
||||
|
||||
def detect_circular_dependencies(self, tree: DependencyTree) -> List[CircularRef]:
|
||||
"""
|
||||
Detect and report circular dependency chains.
|
||||
|
||||
Uses depth-first search to detect cycles in the dependency graph.
|
||||
A cycle is detected when we encounter the same repository URL
|
||||
in our current traversal path.
|
||||
|
||||
Args:
|
||||
tree: The dependency tree to analyze
|
||||
|
||||
Returns:
|
||||
List[CircularRef]: List of detected circular dependencies
|
||||
"""
|
||||
circular_deps = []
|
||||
visited: Set[str] = set()
|
||||
current_path: List[str] = []
|
||||
|
||||
def dfs_detect_cycles(node: DependencyNode) -> None:
|
||||
"""Recursive DFS function to detect cycles."""
|
||||
node_id = node.get_id()
|
||||
repo_url = node.dependency_ref.repo_url
|
||||
|
||||
# Check if this repo URL is already in our current path (cycle detected)
|
||||
if repo_url in current_path:
|
||||
# Found a cycle - create the cycle path
|
||||
cycle_start_index = current_path.index(repo_url)
|
||||
cycle_path = current_path[cycle_start_index:] + [repo_url]
|
||||
|
||||
circular_ref = CircularRef(
|
||||
cycle_path=cycle_path,
|
||||
detected_at_depth=node.depth
|
||||
)
|
||||
circular_deps.append(circular_ref)
|
||||
return
|
||||
|
||||
# Mark current node as visited and add repo URL to path
|
||||
visited.add(node_id)
|
||||
current_path.append(repo_url)
|
||||
|
||||
# Check all children
|
||||
for child in node.children:
|
||||
child_id = child.get_id()
|
||||
|
||||
# Only recurse if we haven't processed this subtree completely
|
||||
if child_id not in visited or child.dependency_ref.repo_url in current_path:
|
||||
dfs_detect_cycles(child)
|
||||
|
||||
# Remove from path when backtracking (but keep in visited)
|
||||
current_path.pop()
|
||||
|
||||
# Start DFS from all root level dependencies (depth 1)
|
||||
root_deps = tree.get_nodes_at_depth(1)
|
||||
for root_dep in root_deps:
|
||||
if root_dep.get_id() not in visited:
|
||||
current_path = [] # Reset path for each root
|
||||
dfs_detect_cycles(root_dep)
|
||||
|
||||
return circular_deps
|
||||
|
||||
def flatten_dependencies(self, tree: DependencyTree) -> FlatDependencyMap:
|
||||
"""
|
||||
Flatten tree to avoid duplicate installations (NPM hoisting).
|
||||
|
||||
Implements "first wins" conflict resolution strategy where the first
|
||||
declared dependency takes precedence over later conflicting dependencies.
|
||||
|
||||
Args:
|
||||
tree: The dependency tree to flatten
|
||||
|
||||
Returns:
|
||||
FlatDependencyMap: Flattened dependencies ready for installation
|
||||
"""
|
||||
flat_map = FlatDependencyMap()
|
||||
seen_repos: Set[str] = set()
|
||||
|
||||
# Process dependencies level by level (breadth-first)
|
||||
# This ensures that dependencies declared earlier in the tree get priority
|
||||
for depth in range(1, tree.max_depth + 1):
|
||||
nodes_at_depth = tree.get_nodes_at_depth(depth)
|
||||
|
||||
# Sort nodes by their position in the tree to ensure deterministic ordering
|
||||
# In a real implementation, this would be based on declaration order
|
||||
nodes_at_depth.sort(key=lambda node: node.get_id())
|
||||
|
||||
for node in nodes_at_depth:
|
||||
repo_url = node.dependency_ref.repo_url
|
||||
|
||||
if repo_url not in seen_repos:
|
||||
# First occurrence - add without conflict
|
||||
flat_map.add_dependency(node.dependency_ref, is_conflict=False)
|
||||
seen_repos.add(repo_url)
|
||||
else:
|
||||
# Conflict - record it but keep the first one
|
||||
flat_map.add_dependency(node.dependency_ref, is_conflict=True)
|
||||
|
||||
return flat_map
|
||||
|
||||
def _validate_dependency_reference(self, dep_ref: DependencyReference) -> bool:
|
||||
"""
|
||||
Validate that a dependency reference is well-formed.
|
||||
|
||||
Args:
|
||||
dep_ref: The dependency reference to validate
|
||||
|
||||
Returns:
|
||||
bool: True if valid, False otherwise
|
||||
"""
|
||||
if not dep_ref.repo_url:
|
||||
return False
|
||||
|
||||
# Basic validation - in real implementation would be more thorough
|
||||
if '/' not in dep_ref.repo_url:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _try_load_dependency_package(self, dep_ref: DependencyReference) -> Optional[APMPackage]:
|
||||
"""
|
||||
Try to load a dependency package from local paths.
|
||||
|
||||
This is a placeholder implementation for Task 3 (dependency resolution algorithm).
|
||||
The actual package loading from apm_modules/ will be implemented in Task 4
|
||||
(Enhanced Primitive Discovery System) and Task 2 (GitHub Package Downloader).
|
||||
|
||||
Args:
|
||||
dep_ref: Reference to the dependency to load
|
||||
|
||||
Returns:
|
||||
APMPackage: Loaded package if found, None otherwise
|
||||
|
||||
Raises:
|
||||
ValueError: If package exists but has invalid format
|
||||
FileNotFoundError: If package cannot be found
|
||||
"""
|
||||
# For Task 3 (dependency resolution), we focus on the algorithm logic
|
||||
# without implementing specific file system scanning which belongs to Task 4
|
||||
#
|
||||
# In the final implementation:
|
||||
# - Task 2 will handle downloading packages from GitHub repositories
|
||||
# - Task 4 will handle scanning apm_modules/ directory structure
|
||||
# - This method will integrate with both systems
|
||||
|
||||
# For now, return None to indicate package not found locally
|
||||
# This allows the resolution algorithm to create placeholder nodes
|
||||
# and continue with dependency graph construction
|
||||
return None
|
||||
|
||||
def _create_resolution_summary(self, graph: DependencyGraph) -> str:
|
||||
"""
|
||||
Create a human-readable summary of the resolution results.
|
||||
|
||||
Args:
|
||||
graph: The resolved dependency graph
|
||||
|
||||
Returns:
|
||||
str: Summary string
|
||||
"""
|
||||
summary = graph.get_summary()
|
||||
lines = [
|
||||
f"Dependency Resolution Summary:",
|
||||
f" Root package: {summary['root_package']}",
|
||||
f" Total dependencies: {summary['total_dependencies']}",
|
||||
f" Maximum depth: {summary['max_depth']}",
|
||||
]
|
||||
|
||||
if summary['has_conflicts']:
|
||||
lines.append(f" Conflicts detected: {summary['conflict_count']}")
|
||||
|
||||
if summary['has_circular_dependencies']:
|
||||
lines.append(f" Circular dependencies: {summary['circular_count']}")
|
||||
|
||||
if summary['has_errors']:
|
||||
lines.append(f" Resolution errors: {summary['error_count']}")
|
||||
|
||||
lines.append(f" Status: {'✅ Valid' if summary['is_valid'] else '❌ Invalid'}")
|
||||
|
||||
return "\n".join(lines)
|
||||
187
src/apm_cli/deps/dependency_graph.py
Normal file
187
src/apm_cli/deps/dependency_graph.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""Data structures for dependency graph representation and resolution."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set, Tuple, Any
|
||||
from ..models.apm_package import APMPackage, DependencyReference
|
||||
|
||||
|
||||
@dataclass
|
||||
class DependencyNode:
|
||||
"""Represents a single dependency node in the dependency graph."""
|
||||
package: APMPackage
|
||||
dependency_ref: DependencyReference
|
||||
depth: int = 0
|
||||
children: List['DependencyNode'] = field(default_factory=list)
|
||||
parent: Optional['DependencyNode'] = None
|
||||
|
||||
def get_id(self) -> str:
|
||||
"""Get unique identifier for this node."""
|
||||
# Include reference to distinguish between different versions/branches of same repo
|
||||
if self.dependency_ref.reference:
|
||||
return f"{self.dependency_ref.repo_url}#{self.dependency_ref.reference}"
|
||||
return self.dependency_ref.repo_url
|
||||
|
||||
def get_display_name(self) -> str:
|
||||
"""Get display name for this dependency."""
|
||||
return self.dependency_ref.get_display_name()
|
||||
|
||||
|
||||
@dataclass
|
||||
class CircularRef:
|
||||
"""Represents a circular dependency reference."""
|
||||
cycle_path: List[str] # List of repo URLs forming the cycle
|
||||
detected_at_depth: int
|
||||
|
||||
def _format_complete_cycle(self) -> str:
|
||||
"""
|
||||
Return a string representation of the cycle, ensuring it is visually complete.
|
||||
If the cycle path does not end at the starting node, append the start to the end.
|
||||
"""
|
||||
if not self.cycle_path:
|
||||
return "(empty path)"
|
||||
cycle_display = " -> ".join(self.cycle_path)
|
||||
# Ensure the cycle visually returns to the start node
|
||||
if len(self.cycle_path) > 1 and self.cycle_path[0] != self.cycle_path[-1]:
|
||||
cycle_display += f" -> {self.cycle_path[0]}"
|
||||
return cycle_display
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the circular dependency."""
|
||||
return f"Circular dependency detected: {self._format_complete_cycle()}"
|
||||
@dataclass
|
||||
class DependencyTree:
|
||||
"""Hierarchical representation of dependencies before flattening."""
|
||||
root_package: APMPackage
|
||||
nodes: Dict[str, DependencyNode] = field(default_factory=dict)
|
||||
max_depth: int = 0
|
||||
|
||||
def add_node(self, node: DependencyNode) -> None:
|
||||
"""Add a node to the tree."""
|
||||
self.nodes[node.get_id()] = node
|
||||
self.max_depth = max(self.max_depth, node.depth)
|
||||
|
||||
def get_node(self, repo_url: str) -> Optional[DependencyNode]:
|
||||
"""Get a node by its repository URL."""
|
||||
return self.nodes.get(repo_url)
|
||||
|
||||
def get_nodes_at_depth(self, depth: int) -> List[DependencyNode]:
|
||||
"""Get all nodes at a specific depth level."""
|
||||
return [node for node in self.nodes.values() if node.depth == depth]
|
||||
|
||||
def has_dependency(self, repo_url: str) -> bool:
|
||||
"""Check if a dependency exists in the tree."""
|
||||
# Check by repo URL, not by full node ID (which may include reference)
|
||||
return any(node.dependency_ref.repo_url == repo_url for node in self.nodes.values())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConflictInfo:
|
||||
"""Information about a dependency conflict."""
|
||||
repo_url: str
|
||||
winner: DependencyReference # The dependency that "wins"
|
||||
conflicts: List[DependencyReference] # All conflicting dependencies
|
||||
reason: str # Explanation of why winner was chosen
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the conflict."""
|
||||
conflict_refs = [str(ref) for ref in self.conflicts]
|
||||
return f"Conflict for {self.repo_url}: {str(self.winner)} wins over {', '.join(conflict_refs)} ({self.reason})"
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlatDependencyMap:
|
||||
"""Final flattened dependency mapping ready for installation."""
|
||||
dependencies: Dict[str, DependencyReference] = field(default_factory=dict)
|
||||
conflicts: List[ConflictInfo] = field(default_factory=list)
|
||||
install_order: List[str] = field(default_factory=list) # Order for installation
|
||||
|
||||
def add_dependency(self, dep_ref: DependencyReference, is_conflict: bool = False) -> None:
|
||||
"""Add a dependency to the flat map."""
|
||||
repo_url = dep_ref.repo_url
|
||||
|
||||
# If this is the first occurrence, just add it
|
||||
if repo_url not in self.dependencies:
|
||||
self.dependencies[repo_url] = dep_ref
|
||||
self.install_order.append(repo_url)
|
||||
elif is_conflict:
|
||||
# Record the conflict but keep the first one (first wins strategy)
|
||||
existing_ref = self.dependencies[repo_url]
|
||||
conflict = ConflictInfo(
|
||||
repo_url=repo_url,
|
||||
winner=existing_ref,
|
||||
conflicts=[dep_ref],
|
||||
reason="first declared dependency wins"
|
||||
)
|
||||
|
||||
# Check if we already have a conflict for this repo
|
||||
existing_conflict = next((c for c in self.conflicts if c.repo_url == repo_url), None)
|
||||
if existing_conflict:
|
||||
existing_conflict.conflicts.append(dep_ref)
|
||||
else:
|
||||
self.conflicts.append(conflict)
|
||||
|
||||
def get_dependency(self, repo_url: str) -> Optional[DependencyReference]:
|
||||
"""Get a dependency by repository URL."""
|
||||
return self.dependencies.get(repo_url)
|
||||
|
||||
def has_conflicts(self) -> bool:
|
||||
"""Check if there are any conflicts in the flattened map."""
|
||||
return bool(self.conflicts)
|
||||
|
||||
def total_dependencies(self) -> int:
|
||||
"""Get total number of unique dependencies."""
|
||||
return len(self.dependencies)
|
||||
|
||||
def get_installation_list(self) -> List[DependencyReference]:
|
||||
"""Get dependencies in installation order."""
|
||||
return [self.dependencies[repo_url] for repo_url in self.install_order if repo_url in self.dependencies]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DependencyGraph:
|
||||
"""Complete resolved dependency information."""
|
||||
root_package: APMPackage
|
||||
dependency_tree: DependencyTree
|
||||
flattened_dependencies: FlatDependencyMap
|
||||
circular_dependencies: List[CircularRef] = field(default_factory=list)
|
||||
resolution_errors: List[str] = field(default_factory=list)
|
||||
|
||||
def has_circular_dependencies(self) -> bool:
|
||||
"""Check if there are any circular dependencies."""
|
||||
return bool(self.circular_dependencies)
|
||||
|
||||
def has_conflicts(self) -> bool:
|
||||
"""Check if there are any dependency conflicts."""
|
||||
return self.flattened_dependencies.has_conflicts()
|
||||
|
||||
def has_errors(self) -> bool:
|
||||
"""Check if there are any resolution errors."""
|
||||
return bool(self.resolution_errors)
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""Check if the dependency graph is valid (no circular deps or errors)."""
|
||||
return not self.has_circular_dependencies() and not self.has_errors()
|
||||
|
||||
def get_summary(self) -> Dict[str, Any]:
|
||||
"""Get a summary of the dependency resolution."""
|
||||
return {
|
||||
"root_package": self.root_package.name,
|
||||
"total_dependencies": self.flattened_dependencies.total_dependencies(),
|
||||
"max_depth": self.dependency_tree.max_depth,
|
||||
"has_circular_dependencies": self.has_circular_dependencies(),
|
||||
"circular_count": len(self.circular_dependencies),
|
||||
"has_conflicts": self.has_conflicts(),
|
||||
"conflict_count": len(self.flattened_dependencies.conflicts),
|
||||
"has_errors": self.has_errors(),
|
||||
"error_count": len(self.resolution_errors),
|
||||
"is_valid": self.is_valid()
|
||||
}
|
||||
|
||||
def add_error(self, error: str) -> None:
|
||||
"""Add a resolution error."""
|
||||
self.resolution_errors.append(error)
|
||||
|
||||
def add_circular_dependency(self, circular_ref: CircularRef) -> None:
|
||||
"""Add a circular dependency detection."""
|
||||
self.circular_dependencies.append(circular_ref)
|
||||
381
src/apm_cli/deps/github_downloader.py
Normal file
381
src/apm_cli/deps/github_downloader.py
Normal file
@@ -0,0 +1,381 @@
|
||||
"""GitHub package downloader for APM dependencies."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any
|
||||
import re
|
||||
|
||||
import git
|
||||
from git import Repo
|
||||
from git.exc import GitCommandError, InvalidGitRepositoryError
|
||||
|
||||
from ..core.token_manager import GitHubTokenManager
|
||||
from ..models.apm_package import (
|
||||
DependencyReference,
|
||||
PackageInfo,
|
||||
ResolvedReference,
|
||||
GitReferenceType,
|
||||
validate_apm_package,
|
||||
APMPackage
|
||||
)
|
||||
|
||||
|
||||
class GitHubPackageDownloader:
|
||||
"""Downloads and validates APM packages from GitHub repositories."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the GitHub package downloader."""
|
||||
self.token_manager = GitHubTokenManager()
|
||||
self.git_env = self._setup_git_environment()
|
||||
|
||||
def _setup_git_environment(self) -> Dict[str, Any]:
|
||||
"""Set up Git environment with GitHub authentication using centralized token manager.
|
||||
|
||||
Returns:
|
||||
Dict containing environment variables for Git operations
|
||||
"""
|
||||
# Use centralized token management
|
||||
env = self.token_manager.setup_environment()
|
||||
|
||||
# Get the token for modules (APM package access)
|
||||
self.github_token = self.token_manager.get_token_for_purpose('modules', env)
|
||||
self.has_github_token = self.github_token is not None
|
||||
|
||||
# Configure Git security settings
|
||||
env['GIT_TERMINAL_PROMPT'] = '0'
|
||||
env['GIT_ASKPASS'] = 'echo' # Prevent interactive credential prompts
|
||||
env['GIT_CONFIG_NOSYSTEM'] = '1'
|
||||
env['GIT_CONFIG_GLOBAL'] = '/dev/null'
|
||||
|
||||
return env
|
||||
|
||||
def _sanitize_git_error(self, error_message: str) -> str:
|
||||
"""Sanitize Git error messages to remove potentially sensitive authentication information.
|
||||
|
||||
Args:
|
||||
error_message: Raw error message from Git operations
|
||||
|
||||
Returns:
|
||||
str: Sanitized error message with sensitive data removed
|
||||
"""
|
||||
import re
|
||||
|
||||
# Remove any tokens that might appear in URLs (format: https://token@github.com)
|
||||
sanitized = re.sub(r'https://[^@\s]+@github\.com', 'https://***@github.com', error_message)
|
||||
|
||||
# Remove any tokens that might appear as standalone values
|
||||
sanitized = re.sub(r'(ghp_|gho_|ghu_|ghs_|ghr_)[a-zA-Z0-9_]+', '***', sanitized)
|
||||
|
||||
# Remove environment variable values that might contain tokens
|
||||
sanitized = re.sub(r'(GITHUB_TOKEN|GITHUB_APM_PAT|GH_TOKEN|GITHUB_COPILOT_PAT|GITHUB_NPM_PAT)=[^\s]+', r'\1=***', sanitized)
|
||||
|
||||
return sanitized
|
||||
|
||||
def _build_repo_url(self, repo_ref: str, use_ssh: bool = False) -> str:
|
||||
"""Build the appropriate repository URL for cloning.
|
||||
|
||||
Uses GitHub Enterprise authentication format for private repositories:
|
||||
- x-access-token format for authenticated HTTPS (GitHub Enterprise standard)
|
||||
- SSH URLs for SSH key-based authentication
|
||||
- Standard HTTPS URLs as fallback
|
||||
|
||||
Args:
|
||||
repo_ref: Repository reference in format "owner/repo"
|
||||
use_ssh: Whether to use SSH URL for git operations
|
||||
|
||||
Returns:
|
||||
str: Repository URL suitable for git clone operations
|
||||
"""
|
||||
if use_ssh:
|
||||
# Use SSH URL for private repository access with SSH keys
|
||||
return f"git@github.com:{repo_ref}.git"
|
||||
elif self.github_token:
|
||||
# Use GitHub Enterprise x-access-token format for authenticated access
|
||||
# This is the standard format for GitHub Actions and Enterprise environments
|
||||
return f"https://x-access-token:{self.github_token}@github.com/{repo_ref}.git"
|
||||
else:
|
||||
# Use standard HTTPS URL for public repositories
|
||||
return f"https://github.com/{repo_ref}"
|
||||
|
||||
def _clone_with_fallback(self, repo_url_base: str, target_path: Path, **clone_kwargs) -> Repo:
|
||||
"""Attempt to clone a repository with fallback authentication methods.
|
||||
|
||||
Uses GitHub Enterprise authentication patterns:
|
||||
1. x-access-token format for private repos (GitHub Enterprise standard)
|
||||
2. SSH for SSH key-based authentication
|
||||
3. Standard HTTPS for public repos (fallback)
|
||||
|
||||
Args:
|
||||
repo_url_base: Base repository reference (owner/repo)
|
||||
target_path: Target path for cloning
|
||||
**clone_kwargs: Additional arguments for Repo.clone_from
|
||||
|
||||
Returns:
|
||||
Repo: Successfully cloned repository
|
||||
|
||||
Raises:
|
||||
RuntimeError: If all authentication methods fail
|
||||
"""
|
||||
last_error = None
|
||||
|
||||
# Method 1: Try x-access-token format if token is available (GitHub Enterprise)
|
||||
if self.github_token:
|
||||
try:
|
||||
auth_url = self._build_repo_url(repo_url_base, use_ssh=False)
|
||||
return Repo.clone_from(auth_url, target_path, env=self.git_env, **clone_kwargs)
|
||||
except GitCommandError as e:
|
||||
last_error = e
|
||||
# Continue to next method
|
||||
|
||||
# Method 2: Try SSH if it might work (for SSH key-based authentication)
|
||||
try:
|
||||
ssh_url = self._build_repo_url(repo_url_base, use_ssh=True)
|
||||
return Repo.clone_from(ssh_url, target_path, env=self.git_env, **clone_kwargs)
|
||||
except GitCommandError as e:
|
||||
last_error = e
|
||||
# Continue to next method
|
||||
|
||||
# Method 3: Try standard HTTPS as fallback for public repos
|
||||
try:
|
||||
public_url = f"https://github.com/{repo_url_base}"
|
||||
return Repo.clone_from(public_url, target_path, env=self.git_env, **clone_kwargs)
|
||||
except GitCommandError as e:
|
||||
last_error = e
|
||||
|
||||
# All methods failed
|
||||
error_msg = f"Failed to clone repository {repo_url_base} using all available methods. "
|
||||
if not self.has_github_token:
|
||||
error_msg += "For private repositories, set GITHUB_APM_PAT or GITHUB_TOKEN environment variable, " \
|
||||
"or ensure SSH keys are configured."
|
||||
else:
|
||||
error_msg += "Please check repository access permissions and authentication setup."
|
||||
|
||||
if last_error:
|
||||
sanitized_error = self._sanitize_git_error(str(last_error))
|
||||
error_msg += f" Last error: {sanitized_error}"
|
||||
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
def resolve_git_reference(self, repo_ref: str) -> ResolvedReference:
|
||||
"""Resolve a Git reference (branch/tag/commit) to a specific commit SHA.
|
||||
|
||||
Args:
|
||||
repo_ref: Repository reference string (e.g., "user/repo#branch")
|
||||
|
||||
Returns:
|
||||
ResolvedReference: Resolved reference with commit SHA
|
||||
|
||||
Raises:
|
||||
ValueError: If the reference format is invalid
|
||||
RuntimeError: If Git operations fail
|
||||
"""
|
||||
# Parse the repository reference
|
||||
try:
|
||||
dep_ref = DependencyReference.parse(repo_ref)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid repository reference '{repo_ref}': {e}")
|
||||
|
||||
# Default to main branch if no reference specified
|
||||
ref = dep_ref.reference or "main"
|
||||
|
||||
# Pre-analyze the reference type to determine the best approach
|
||||
is_likely_commit = re.match(r'^[a-f0-9]{7,40}$', ref.lower()) is not None
|
||||
|
||||
# Create a temporary directory for Git operations
|
||||
temp_dir = None
|
||||
try:
|
||||
import tempfile
|
||||
temp_dir = Path(tempfile.mkdtemp())
|
||||
|
||||
if is_likely_commit:
|
||||
# For commit SHAs, clone full repository first, then checkout the commit
|
||||
try:
|
||||
repo = self._clone_with_fallback(dep_ref.repo_url, temp_dir)
|
||||
commit = repo.commit(ref)
|
||||
ref_type = GitReferenceType.COMMIT
|
||||
resolved_commit = commit.hexsha
|
||||
ref_name = ref
|
||||
except Exception as e:
|
||||
sanitized_error = self._sanitize_git_error(str(e))
|
||||
raise ValueError(f"Could not resolve commit '{ref}' in repository {dep_ref.repo_url}: {sanitized_error}")
|
||||
else:
|
||||
# For branches and tags, try shallow clone first
|
||||
try:
|
||||
# Try to clone with specific branch/tag first
|
||||
repo = self._clone_with_fallback(
|
||||
dep_ref.repo_url,
|
||||
temp_dir,
|
||||
depth=1,
|
||||
branch=ref
|
||||
)
|
||||
ref_type = GitReferenceType.BRANCH # Could be branch or tag
|
||||
resolved_commit = repo.head.commit.hexsha
|
||||
ref_name = ref
|
||||
|
||||
except GitCommandError:
|
||||
# If branch/tag clone fails, try full clone and resolve reference
|
||||
try:
|
||||
repo = self._clone_with_fallback(dep_ref.repo_url, temp_dir)
|
||||
|
||||
# Try to resolve the reference
|
||||
try:
|
||||
# Try as branch first
|
||||
try:
|
||||
branch = repo.refs[f"origin/{ref}"]
|
||||
ref_type = GitReferenceType.BRANCH
|
||||
resolved_commit = branch.commit.hexsha
|
||||
ref_name = ref
|
||||
except IndexError:
|
||||
# Try as tag
|
||||
try:
|
||||
tag = repo.tags[ref]
|
||||
ref_type = GitReferenceType.TAG
|
||||
resolved_commit = tag.commit.hexsha
|
||||
ref_name = ref
|
||||
except IndexError:
|
||||
raise ValueError(f"Reference '{ref}' not found in repository {dep_ref.repo_url}")
|
||||
|
||||
except Exception as e:
|
||||
sanitized_error = self._sanitize_git_error(str(e))
|
||||
raise ValueError(f"Could not resolve reference '{ref}' in repository {dep_ref.repo_url}: {sanitized_error}")
|
||||
|
||||
except GitCommandError as e:
|
||||
# Check if this might be a private repository access issue
|
||||
if "Authentication failed" in str(e) or "remote: Repository not found" in str(e):
|
||||
error_msg = f"Failed to clone repository {dep_ref.repo_url}. "
|
||||
if not self.has_github_token:
|
||||
error_msg += "This might be a private repository that requires authentication. " \
|
||||
"Please set GITHUB_APM_PAT or GITHUB_TOKEN environment variable."
|
||||
else:
|
||||
error_msg += "Authentication failed. Please check your GitHub token permissions."
|
||||
raise RuntimeError(error_msg)
|
||||
else:
|
||||
sanitized_error = self._sanitize_git_error(str(e))
|
||||
raise RuntimeError(f"Failed to clone repository {dep_ref.repo_url}: {sanitized_error}")
|
||||
|
||||
finally:
|
||||
# Clean up temporary directory
|
||||
if temp_dir and temp_dir.exists():
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
|
||||
return ResolvedReference(
|
||||
original_ref=repo_ref,
|
||||
ref_type=ref_type,
|
||||
resolved_commit=resolved_commit,
|
||||
ref_name=ref_name
|
||||
)
|
||||
|
||||
def download_package(self, repo_ref: str, target_path: Path) -> PackageInfo:
|
||||
"""Download a GitHub repository and validate it as an APM package.
|
||||
|
||||
Args:
|
||||
repo_ref: Repository reference string (e.g., "user/repo#branch")
|
||||
target_path: Local path where package should be downloaded
|
||||
|
||||
Returns:
|
||||
PackageInfo: Information about the downloaded package
|
||||
|
||||
Raises:
|
||||
ValueError: If the repository reference is invalid
|
||||
RuntimeError: If download or validation fails
|
||||
"""
|
||||
# Parse the repository reference
|
||||
try:
|
||||
dep_ref = DependencyReference.parse(repo_ref)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid repository reference '{repo_ref}': {e}")
|
||||
|
||||
# Resolve the Git reference to get specific commit
|
||||
resolved_ref = self.resolve_git_reference(repo_ref)
|
||||
|
||||
# Create target directory if it doesn't exist
|
||||
target_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# If directory already exists and has content, remove it
|
||||
if target_path.exists() and any(target_path.iterdir()):
|
||||
shutil.rmtree(target_path)
|
||||
target_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
# Clone the repository using fallback authentication methods
|
||||
# Use shallow clone for performance if we have a specific commit
|
||||
if resolved_ref.ref_type == GitReferenceType.COMMIT:
|
||||
# For commits, we need to clone and checkout the specific commit
|
||||
repo = self._clone_with_fallback(dep_ref.repo_url, target_path)
|
||||
repo.git.checkout(resolved_ref.resolved_commit)
|
||||
else:
|
||||
# For branches and tags, we can use shallow clone
|
||||
repo = self._clone_with_fallback(
|
||||
dep_ref.repo_url,
|
||||
target_path,
|
||||
depth=1,
|
||||
branch=resolved_ref.ref_name
|
||||
)
|
||||
|
||||
# Remove .git directory to save space and prevent treating as a Git repository
|
||||
git_dir = target_path / ".git"
|
||||
if git_dir.exists():
|
||||
shutil.rmtree(git_dir, ignore_errors=True)
|
||||
|
||||
except GitCommandError as e:
|
||||
# Check if this might be a private repository access issue
|
||||
if "Authentication failed" in str(e) or "remote: Repository not found" in str(e):
|
||||
error_msg = f"Failed to clone repository {dep_ref.repo_url}. "
|
||||
if not self.has_github_token:
|
||||
error_msg += "This might be a private repository that requires authentication. " \
|
||||
"Please set GITHUB_APM_PAT or GITHUB_TOKEN environment variable."
|
||||
else:
|
||||
error_msg += "Authentication failed. Please check your GitHub token permissions."
|
||||
raise RuntimeError(error_msg)
|
||||
else:
|
||||
sanitized_error = self._sanitize_git_error(str(e))
|
||||
raise RuntimeError(f"Failed to clone repository {dep_ref.repo_url}: {sanitized_error}")
|
||||
except RuntimeError:
|
||||
# Re-raise RuntimeError from _clone_with_fallback
|
||||
raise
|
||||
|
||||
# Validate the downloaded package
|
||||
validation_result = validate_apm_package(target_path)
|
||||
if not validation_result.is_valid:
|
||||
# Clean up on validation failure
|
||||
if target_path.exists():
|
||||
shutil.rmtree(target_path, ignore_errors=True)
|
||||
|
||||
error_msg = f"Invalid APM package {dep_ref.repo_url}:\n"
|
||||
for error in validation_result.errors:
|
||||
error_msg += f" - {error}\n"
|
||||
raise RuntimeError(error_msg.strip())
|
||||
|
||||
# Load the APM package metadata
|
||||
if not validation_result.package:
|
||||
raise RuntimeError(f"Package validation succeeded but no package metadata found for {dep_ref.repo_url}")
|
||||
|
||||
package = validation_result.package
|
||||
package.source = dep_ref.to_github_url()
|
||||
package.resolved_commit = resolved_ref.resolved_commit
|
||||
|
||||
# Create and return PackageInfo
|
||||
return PackageInfo(
|
||||
package=package,
|
||||
install_path=target_path,
|
||||
resolved_reference=resolved_ref,
|
||||
installed_at=datetime.now().isoformat()
|
||||
)
|
||||
|
||||
def _get_clone_progress_callback(self):
|
||||
"""Get a progress callback for Git clone operations.
|
||||
|
||||
Returns:
|
||||
Callable that can be used as progress callback for GitPython
|
||||
"""
|
||||
def progress_callback(op_code, cur_count, max_count=None, message=''):
|
||||
"""Progress callback for Git operations."""
|
||||
if max_count:
|
||||
percentage = int((cur_count / max_count) * 100)
|
||||
print(f"\r🚀 Cloning: {percentage}% ({cur_count}/{max_count}) {message}", end='', flush=True)
|
||||
else:
|
||||
print(f"\r🚀 Cloning: {message} ({cur_count})", end='', flush=True)
|
||||
|
||||
return progress_callback
|
||||
216
src/apm_cli/deps/package_validator.py
Normal file
216
src/apm_cli/deps/package_validator.py
Normal file
@@ -0,0 +1,216 @@
|
||||
"""APM package structure validation."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
import os
|
||||
|
||||
from ..models.apm_package import (
|
||||
ValidationResult,
|
||||
APMPackage,
|
||||
validate_apm_package as base_validate_apm_package
|
||||
)
|
||||
|
||||
|
||||
class PackageValidator:
|
||||
"""Validates APM package structure and content."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the package validator."""
|
||||
pass
|
||||
|
||||
def validate_package(self, package_path: Path) -> ValidationResult:
|
||||
"""Validate that a directory contains a valid APM package.
|
||||
|
||||
Args:
|
||||
package_path: Path to the directory to validate
|
||||
|
||||
Returns:
|
||||
ValidationResult: Validation results with any errors/warnings
|
||||
"""
|
||||
return base_validate_apm_package(package_path)
|
||||
|
||||
def validate_package_structure(self, package_path: Path) -> ValidationResult:
|
||||
"""Validate APM package directory structure.
|
||||
|
||||
Checks for required files and directories:
|
||||
- apm.yml at root
|
||||
- .apm/ directory with primitives
|
||||
|
||||
Args:
|
||||
package_path: Path to the package directory
|
||||
|
||||
Returns:
|
||||
ValidationResult: Detailed validation results
|
||||
"""
|
||||
result = ValidationResult()
|
||||
|
||||
if not package_path.exists():
|
||||
result.add_error(f"Package directory does not exist: {package_path}")
|
||||
return result
|
||||
|
||||
if not package_path.is_dir():
|
||||
result.add_error(f"Package path is not a directory: {package_path}")
|
||||
return result
|
||||
|
||||
# Check for apm.yml
|
||||
apm_yml = package_path / "apm.yml"
|
||||
if not apm_yml.exists():
|
||||
result.add_error("Missing required file: apm.yml")
|
||||
return result
|
||||
|
||||
# Try to parse apm.yml
|
||||
try:
|
||||
package = APMPackage.from_apm_yml(apm_yml)
|
||||
result.package = package
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
result.add_error(f"Invalid apm.yml: {e}")
|
||||
return result
|
||||
|
||||
# Check for .apm directory
|
||||
apm_dir = package_path / ".apm"
|
||||
if not apm_dir.exists():
|
||||
result.add_error("Missing required directory: .apm/")
|
||||
return result
|
||||
|
||||
if not apm_dir.is_dir():
|
||||
result.add_error(".apm must be a directory")
|
||||
return result
|
||||
|
||||
# Check for primitive content
|
||||
primitive_types = ['instructions', 'chatmodes', 'contexts', 'prompts']
|
||||
has_primitives = False
|
||||
|
||||
for primitive_type in primitive_types:
|
||||
primitive_dir = apm_dir / primitive_type
|
||||
if primitive_dir.exists() and primitive_dir.is_dir():
|
||||
md_files = list(primitive_dir.glob("*.md"))
|
||||
if md_files:
|
||||
has_primitives = True
|
||||
# Validate each primitive file
|
||||
for md_file in md_files:
|
||||
self._validate_primitive_file(md_file, result)
|
||||
|
||||
if not has_primitives:
|
||||
result.add_warning("No primitive files found in .apm/ directory")
|
||||
|
||||
return result
|
||||
|
||||
def _validate_primitive_file(self, file_path: Path, result: ValidationResult) -> None:
|
||||
"""Validate a single primitive file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the primitive markdown file
|
||||
result: ValidationResult to add warnings/errors to
|
||||
"""
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8')
|
||||
if not content.strip():
|
||||
result.add_warning(f"Empty primitive file: {file_path.name}")
|
||||
except Exception as e:
|
||||
result.add_warning(f"Could not read primitive file {file_path.name}: {e}")
|
||||
|
||||
def validate_primitive_structure(self, apm_dir: Path) -> List[str]:
|
||||
"""Validate the structure of primitives in .apm directory.
|
||||
|
||||
Args:
|
||||
apm_dir: Path to the .apm directory
|
||||
|
||||
Returns:
|
||||
List[str]: List of validation warnings/issues found
|
||||
"""
|
||||
issues = []
|
||||
|
||||
if not apm_dir.exists():
|
||||
issues.append("Missing .apm directory")
|
||||
return issues
|
||||
|
||||
primitive_types = ['instructions', 'chatmodes', 'contexts', 'prompts']
|
||||
found_primitives = False
|
||||
|
||||
for primitive_type in primitive_types:
|
||||
primitive_dir = apm_dir / primitive_type
|
||||
if primitive_dir.exists():
|
||||
if not primitive_dir.is_dir():
|
||||
issues.append(f"{primitive_type} should be a directory")
|
||||
continue
|
||||
|
||||
# Check for markdown files
|
||||
md_files = list(primitive_dir.glob("*.md"))
|
||||
if md_files:
|
||||
found_primitives = True
|
||||
|
||||
# Validate naming convention
|
||||
for md_file in md_files:
|
||||
if not self._is_valid_primitive_name(md_file.name, primitive_type):
|
||||
issues.append(f"Invalid primitive file name: {md_file.name}")
|
||||
|
||||
if not found_primitives:
|
||||
issues.append("No primitive files found in .apm directory")
|
||||
|
||||
return issues
|
||||
|
||||
def _is_valid_primitive_name(self, filename: str, primitive_type: str) -> bool:
|
||||
"""Check if a primitive filename follows naming conventions.
|
||||
|
||||
Args:
|
||||
filename: The filename to validate
|
||||
primitive_type: Type of primitive (instructions, chatmodes, etc.)
|
||||
|
||||
Returns:
|
||||
bool: True if filename is valid
|
||||
"""
|
||||
# Basic validation - should end with .md
|
||||
if not filename.endswith('.md'):
|
||||
return False
|
||||
|
||||
# Should not contain spaces (prefer hyphens or underscores)
|
||||
if ' ' in filename:
|
||||
return False
|
||||
|
||||
# For specific types, check expected suffixes using a mapping
|
||||
name_without_ext = filename[:-3] # Remove .md
|
||||
suffix_map = {
|
||||
'instructions': '.instructions',
|
||||
'chatmodes': '.chatmode',
|
||||
'contexts': '.context',
|
||||
'prompts': '.prompt',
|
||||
}
|
||||
expected_suffix = suffix_map.get(primitive_type)
|
||||
if expected_suffix and not name_without_ext.endswith(expected_suffix):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_package_info_summary(self, package_path: Path) -> Optional[str]:
|
||||
"""Get a summary of package information for display.
|
||||
|
||||
Args:
|
||||
package_path: Path to the package directory
|
||||
|
||||
Returns:
|
||||
Optional[str]: Summary string or None if package is invalid
|
||||
"""
|
||||
validation_result = self.validate_package(package_path)
|
||||
|
||||
if not validation_result.is_valid or not validation_result.package:
|
||||
return None
|
||||
|
||||
package = validation_result.package
|
||||
summary = f"{package.name} v{package.version}"
|
||||
|
||||
if package.description:
|
||||
summary += f" - {package.description}"
|
||||
|
||||
# Count primitives
|
||||
apm_dir = package_path / ".apm"
|
||||
if apm_dir.exists():
|
||||
primitive_count = 0
|
||||
for primitive_type in ['instructions', 'chatmodes', 'contexts', 'prompts']:
|
||||
primitive_dir = apm_dir / primitive_type
|
||||
if primitive_dir.exists():
|
||||
primitive_count += len(list(primitive_dir.glob("*.md")))
|
||||
|
||||
if primitive_count > 0:
|
||||
summary += f" ({primitive_count} primitives)"
|
||||
|
||||
return summary
|
||||
102
src/apm_cli/deps/verifier.py
Normal file
102
src/apm_cli/deps/verifier.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""Dependency verification for APM-CLI."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from ..factory import PackageManagerFactory, ClientFactory
|
||||
|
||||
|
||||
def load_apm_config(config_file="apm.yml"):
|
||||
"""Load the APM configuration file.
|
||||
|
||||
Args:
|
||||
config_file (str, optional): Path to the configuration file. Defaults to "apm.yml".
|
||||
|
||||
Returns:
|
||||
dict: The configuration, or None if loading failed.
|
||||
"""
|
||||
try:
|
||||
config_path = Path(config_file)
|
||||
if not config_path.exists():
|
||||
print(f"Configuration file {config_file} not found.")
|
||||
return None
|
||||
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
return config
|
||||
except Exception as e:
|
||||
print(f"Error loading {config_file}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def verify_dependencies(config_file="apm.yml"):
|
||||
"""Check if apm.yml servers are installed.
|
||||
|
||||
Args:
|
||||
config_file (str, optional): Path to the configuration file. Defaults to "apm.yml".
|
||||
|
||||
Returns:
|
||||
tuple: (bool, list, list) - All installed status, list of installed, list of missing
|
||||
"""
|
||||
config = load_apm_config(config_file)
|
||||
if not config or 'servers' not in config:
|
||||
return False, [], []
|
||||
|
||||
try:
|
||||
package_manager = PackageManagerFactory.create_package_manager()
|
||||
installed = package_manager.list_installed()
|
||||
|
||||
# Check which servers are missing
|
||||
required_servers = config['servers']
|
||||
missing = [server for server in required_servers if server not in installed]
|
||||
installed_servers = [server for server in required_servers if server in installed]
|
||||
|
||||
all_installed = len(missing) == 0
|
||||
|
||||
return all_installed, installed_servers, missing
|
||||
except Exception as e:
|
||||
print(f"Error verifying dependencies: {e}")
|
||||
return False, [], []
|
||||
|
||||
|
||||
def install_missing_dependencies(config_file="apm.yml", client_type="vscode"):
|
||||
"""Install missing dependencies from apm.yml for specified client.
|
||||
|
||||
Args:
|
||||
config_file (str, optional): Path to the configuration file. Defaults to "apm.yml".
|
||||
client_type (str, optional): Type of client to configure. Defaults to "vscode".
|
||||
|
||||
Returns:
|
||||
tuple: (bool, list) - Success status and list of installed packages
|
||||
"""
|
||||
_, _, missing = verify_dependencies(config_file)
|
||||
|
||||
if not missing:
|
||||
return True, []
|
||||
|
||||
installed = []
|
||||
|
||||
# Get client adapter and package manager
|
||||
client = ClientFactory.create_client(client_type)
|
||||
package_manager = PackageManagerFactory.create_package_manager()
|
||||
|
||||
for server in missing:
|
||||
try:
|
||||
# Install the package using the package manager
|
||||
install_result = package_manager.install(server)
|
||||
|
||||
if install_result:
|
||||
# Configure the client to use the server
|
||||
# For VSCode this updates the .vscode/mcp.json file in the project root
|
||||
client_result = client.configure_mcp_server(server, server_name=server)
|
||||
|
||||
if client_result:
|
||||
installed.append(server)
|
||||
else:
|
||||
print(f"Warning: Package {server} installed but client configuration failed")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error installing {server}: {e}")
|
||||
|
||||
return len(installed) == len(missing), installed
|
||||
61
src/apm_cli/factory.py
Normal file
61
src/apm_cli/factory.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""Factory classes for creating adapters."""
|
||||
|
||||
from .adapters.client.vscode import VSCodeClientAdapter
|
||||
from .adapters.client.codex import CodexClientAdapter
|
||||
from .adapters.package_manager.default_manager import DefaultMCPPackageManager
|
||||
|
||||
|
||||
class ClientFactory:
|
||||
"""Factory for creating MCP client adapters."""
|
||||
|
||||
@staticmethod
|
||||
def create_client(client_type):
|
||||
"""Create a client adapter based on the specified type.
|
||||
|
||||
Args:
|
||||
client_type (str): Type of client adapter to create.
|
||||
|
||||
Returns:
|
||||
MCPClientAdapter: An instance of the specified client adapter.
|
||||
|
||||
Raises:
|
||||
ValueError: If the client type is not supported.
|
||||
"""
|
||||
clients = {
|
||||
"vscode": VSCodeClientAdapter,
|
||||
"codex": CodexClientAdapter,
|
||||
# Add more clients as needed
|
||||
}
|
||||
|
||||
if client_type.lower() not in clients:
|
||||
raise ValueError(f"Unsupported client type: {client_type}")
|
||||
|
||||
return clients[client_type.lower()]()
|
||||
|
||||
|
||||
class PackageManagerFactory:
|
||||
"""Factory for creating MCP package manager adapters."""
|
||||
|
||||
@staticmethod
|
||||
def create_package_manager(manager_type="default"):
|
||||
"""Create a package manager adapter based on the specified type.
|
||||
|
||||
Args:
|
||||
manager_type (str, optional): Type of package manager adapter to create.
|
||||
Defaults to "default".
|
||||
|
||||
Returns:
|
||||
MCPPackageManagerAdapter: An instance of the specified package manager adapter.
|
||||
|
||||
Raises:
|
||||
ValueError: If the package manager type is not supported.
|
||||
"""
|
||||
managers = {
|
||||
"default": DefaultMCPPackageManager,
|
||||
# Add more package managers as they emerge
|
||||
}
|
||||
|
||||
if manager_type.lower() not in managers:
|
||||
raise ValueError(f"Unsupported package manager type: {manager_type}")
|
||||
|
||||
return managers[manager_type.lower()]()
|
||||
21
src/apm_cli/models/__init__.py
Normal file
21
src/apm_cli/models/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""Models for APM CLI data structures."""
|
||||
|
||||
from .apm_package import (
|
||||
APMPackage,
|
||||
DependencyReference,
|
||||
ValidationResult,
|
||||
ValidationError,
|
||||
ResolvedReference,
|
||||
PackageInfo,
|
||||
GitReferenceType,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"APMPackage",
|
||||
"DependencyReference",
|
||||
"ValidationResult",
|
||||
"ValidationError",
|
||||
"ResolvedReference",
|
||||
"PackageInfo",
|
||||
"GitReferenceType",
|
||||
]
|
||||
483
src/apm_cli/models/apm_package.py
Normal file
483
src/apm_cli/models/apm_package.py
Normal file
@@ -0,0 +1,483 @@
|
||||
"""APM Package data models and validation logic."""
|
||||
|
||||
import re
|
||||
import urllib.parse
|
||||
import yaml
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict, Any, Union
|
||||
|
||||
|
||||
class GitReferenceType(Enum):
|
||||
"""Types of Git references supported."""
|
||||
BRANCH = "branch"
|
||||
TAG = "tag"
|
||||
COMMIT = "commit"
|
||||
|
||||
|
||||
class ValidationError(Enum):
|
||||
"""Types of validation errors for APM packages."""
|
||||
MISSING_APM_YML = "missing_apm_yml"
|
||||
MISSING_APM_DIR = "missing_apm_dir"
|
||||
INVALID_YML_FORMAT = "invalid_yml_format"
|
||||
MISSING_REQUIRED_FIELD = "missing_required_field"
|
||||
INVALID_VERSION_FORMAT = "invalid_version_format"
|
||||
INVALID_DEPENDENCY_FORMAT = "invalid_dependency_format"
|
||||
EMPTY_APM_DIR = "empty_apm_dir"
|
||||
INVALID_PRIMITIVE_STRUCTURE = "invalid_primitive_structure"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResolvedReference:
|
||||
"""Represents a resolved Git reference."""
|
||||
original_ref: str
|
||||
ref_type: GitReferenceType
|
||||
resolved_commit: str
|
||||
ref_name: str # The actual branch/tag/commit name
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation of resolved reference."""
|
||||
if self.ref_type == GitReferenceType.COMMIT:
|
||||
return f"{self.resolved_commit[:8]}"
|
||||
return f"{self.ref_name} ({self.resolved_commit[:8]})"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DependencyReference:
|
||||
"""Represents a reference to an APM dependency."""
|
||||
repo_url: str # e.g., "user/repo" or "github.com/user/repo"
|
||||
reference: Optional[str] = None # e.g., "main", "v1.0.0", "abc123"
|
||||
alias: Optional[str] = None # Optional alias for the dependency
|
||||
|
||||
@classmethod
|
||||
def parse(cls, dependency_str: str) -> "DependencyReference":
|
||||
"""Parse a dependency string into a DependencyReference.
|
||||
|
||||
Supports formats:
|
||||
- user/repo
|
||||
- user/repo#branch
|
||||
- user/repo#v1.0.0
|
||||
- user/repo#commit_sha
|
||||
- github.com/user/repo#ref
|
||||
- user/repo@alias
|
||||
- user/repo#ref@alias
|
||||
|
||||
Args:
|
||||
dependency_str: The dependency string to parse
|
||||
|
||||
Returns:
|
||||
DependencyReference: Parsed dependency reference
|
||||
|
||||
Raises:
|
||||
ValueError: If the dependency string format is invalid
|
||||
"""
|
||||
if not dependency_str.strip():
|
||||
raise ValueError("Empty dependency string")
|
||||
|
||||
# Check for control characters (newlines, tabs, etc.)
|
||||
if any(ord(c) < 32 for c in dependency_str):
|
||||
raise ValueError("Dependency string contains invalid control characters")
|
||||
|
||||
# Handle SSH URLs first (before @ processing) to avoid conflict with alias separator
|
||||
original_str = dependency_str
|
||||
if dependency_str.startswith("git@github.com:"):
|
||||
# For SSH URLs, extract repo part before @ processing
|
||||
ssh_repo_part = dependency_str[len("git@github.com:"):]
|
||||
if ssh_repo_part.endswith(".git"):
|
||||
ssh_repo_part = ssh_repo_part[:-4]
|
||||
|
||||
# Handle reference and alias in SSH URL
|
||||
reference = None
|
||||
alias = None
|
||||
|
||||
if "@" in ssh_repo_part:
|
||||
ssh_repo_part, alias = ssh_repo_part.rsplit("@", 1)
|
||||
alias = alias.strip()
|
||||
|
||||
if "#" in ssh_repo_part:
|
||||
repo_part, reference = ssh_repo_part.rsplit("#", 1)
|
||||
reference = reference.strip()
|
||||
else:
|
||||
repo_part = ssh_repo_part
|
||||
|
||||
repo_url = repo_part.strip()
|
||||
else:
|
||||
# Handle alias (@alias) for non-SSH URLs
|
||||
alias = None
|
||||
if "@" in dependency_str:
|
||||
dependency_str, alias = dependency_str.rsplit("@", 1)
|
||||
alias = alias.strip()
|
||||
|
||||
# Handle reference (#ref)
|
||||
reference = None
|
||||
if "#" in dependency_str:
|
||||
repo_part, reference = dependency_str.rsplit("#", 1)
|
||||
reference = reference.strip()
|
||||
else:
|
||||
repo_part = dependency_str
|
||||
|
||||
# SECURITY: Use urllib.parse for all URL validation to avoid substring vulnerabilities
|
||||
|
||||
repo_url = repo_part.strip()
|
||||
|
||||
# Normalize to URL format for secure parsing - always use urllib.parse, never substring checks
|
||||
if repo_url.startswith(("https://", "http://")):
|
||||
# Already a full URL - parse directly
|
||||
parsed_url = urllib.parse.urlparse(repo_url)
|
||||
else:
|
||||
# Safely construct GitHub URL from various input formats
|
||||
parts = repo_url.split("/")
|
||||
if len(parts) >= 3 and parts[0] == "github.com":
|
||||
# Format: github.com/user/repo (must be precisely so)
|
||||
user_repo = "/".join(parts[1:3])
|
||||
elif len(parts) >= 2 and "." not in parts[0]:
|
||||
# Format: user/repo (no dot in user part, so not a domain)
|
||||
user_repo = "/".join(parts[:2])
|
||||
else:
|
||||
raise ValueError(f"Only GitHub repositories are supported. Use 'user/repo' or 'github.com/user/repo' format")
|
||||
|
||||
# Validate format before URL construction (security critical)
|
||||
if not user_repo or "/" not in user_repo:
|
||||
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo' or 'github.com/user/repo'")
|
||||
|
||||
parts = user_repo.split("/")
|
||||
if len(parts) < 2 or not parts[0] or not parts[1]:
|
||||
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo' or 'github.com/user/repo'")
|
||||
|
||||
user, repo = parts[0], parts[1]
|
||||
|
||||
# Security: validate characters to prevent injection
|
||||
if not re.match(r'^[a-zA-Z0-9._-]+$', user):
|
||||
raise ValueError(f"Invalid user name: {user}")
|
||||
if not re.match(r'^[a-zA-Z0-9._-]+$', repo.rstrip('.git')):
|
||||
raise ValueError(f"Invalid repository name: {repo}")
|
||||
|
||||
# Safely construct URL - this is now secure
|
||||
github_url = urllib.parse.urljoin("https://github.com/", f"{user}/{repo}")
|
||||
parsed_url = urllib.parse.urlparse(github_url)
|
||||
|
||||
# SECURITY: Validate that this is actually a GitHub URL with exact hostname match
|
||||
if parsed_url.netloc != "github.com":
|
||||
raise ValueError(f"Only GitHub repositories are supported, got hostname: {parsed_url.netloc}")
|
||||
|
||||
# Extract and validate the path
|
||||
path = parsed_url.path.strip("/")
|
||||
if not path:
|
||||
raise ValueError("Repository path cannot be empty")
|
||||
|
||||
# Remove .git suffix if present
|
||||
if path.endswith(".git"):
|
||||
path = path[:-4]
|
||||
|
||||
# Validate path is exactly user/repo format
|
||||
path_parts = path.split("/")
|
||||
if len(path_parts) != 2:
|
||||
raise ValueError(f"Invalid repository path: expected 'user/repo', got '{path}'")
|
||||
|
||||
user, repo = path_parts
|
||||
if not user or not repo:
|
||||
raise ValueError(f"Invalid repository format: user and repo names cannot be empty")
|
||||
|
||||
# Validate user and repo names contain only allowed characters
|
||||
if not re.match(r'^[a-zA-Z0-9._-]+$', user):
|
||||
raise ValueError(f"Invalid user name: {user}")
|
||||
if not re.match(r'^[a-zA-Z0-9._-]+$', repo):
|
||||
raise ValueError(f"Invalid repository name: {repo}")
|
||||
|
||||
repo_url = f"{user}/{repo}"
|
||||
|
||||
# Remove trailing .git if present after normalization
|
||||
if repo_url.endswith(".git"):
|
||||
repo_url = repo_url[:-4]
|
||||
|
||||
|
||||
# Validate repo format (should be user/repo)
|
||||
if not re.match(r'^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$', repo_url):
|
||||
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo'")
|
||||
|
||||
# Validate alias characters if present
|
||||
if alias and not re.match(r'^[a-zA-Z0-9._-]+$', alias):
|
||||
raise ValueError(f"Invalid alias: {alias}. Aliases can only contain letters, numbers, dots, underscores, and hyphens")
|
||||
|
||||
return cls(repo_url=repo_url, reference=reference, alias=alias)
|
||||
|
||||
def to_github_url(self) -> str:
|
||||
"""Convert to full GitHub URL."""
|
||||
return f"https://github.com/{self.repo_url}"
|
||||
|
||||
def get_display_name(self) -> str:
|
||||
"""Get display name for this dependency (alias or repo name)."""
|
||||
if self.alias:
|
||||
return self.alias
|
||||
return self.repo_url # Full repo URL for disambiguation
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the dependency reference."""
|
||||
result = self.repo_url
|
||||
if self.reference:
|
||||
result += f"#{self.reference}"
|
||||
if self.alias:
|
||||
result += f"@{self.alias}"
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class APMPackage:
|
||||
"""Represents an APM package with metadata."""
|
||||
name: str
|
||||
version: str
|
||||
description: Optional[str] = None
|
||||
author: Optional[str] = None
|
||||
license: Optional[str] = None
|
||||
source: Optional[str] = None # Source location (for dependencies)
|
||||
resolved_commit: Optional[str] = None # Resolved commit SHA (for dependencies)
|
||||
dependencies: Optional[Dict[str, List[Union[DependencyReference, str]]]] = None # Mixed types for APM/MCP
|
||||
scripts: Optional[Dict[str, str]] = None
|
||||
package_path: Optional[Path] = None # Local path to package
|
||||
|
||||
@classmethod
|
||||
def from_apm_yml(cls, apm_yml_path: Path) -> "APMPackage":
|
||||
"""Load APM package from apm.yml file.
|
||||
|
||||
Args:
|
||||
apm_yml_path: Path to the apm.yml file
|
||||
|
||||
Returns:
|
||||
APMPackage: Loaded package instance
|
||||
|
||||
Raises:
|
||||
ValueError: If the file is invalid or missing required fields
|
||||
FileNotFoundError: If the file doesn't exist
|
||||
"""
|
||||
if not apm_yml_path.exists():
|
||||
raise FileNotFoundError(f"apm.yml not found: {apm_yml_path}")
|
||||
|
||||
try:
|
||||
with open(apm_yml_path, 'r', encoding='utf-8') as f:
|
||||
data = yaml.safe_load(f)
|
||||
except yaml.YAMLError as e:
|
||||
raise ValueError(f"Invalid YAML format in {apm_yml_path}: {e}")
|
||||
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"apm.yml must contain a YAML object, got {type(data)}")
|
||||
|
||||
# Required fields
|
||||
if 'name' not in data:
|
||||
raise ValueError("Missing required field 'name' in apm.yml")
|
||||
if 'version' not in data:
|
||||
raise ValueError("Missing required field 'version' in apm.yml")
|
||||
|
||||
# Parse dependencies
|
||||
dependencies = None
|
||||
if 'dependencies' in data and isinstance(data['dependencies'], dict):
|
||||
dependencies = {}
|
||||
for dep_type, dep_list in data['dependencies'].items():
|
||||
if isinstance(dep_list, list):
|
||||
if dep_type == 'apm':
|
||||
# APM dependencies need to be parsed as DependencyReference objects
|
||||
parsed_deps = []
|
||||
for dep_str in dep_list:
|
||||
if isinstance(dep_str, str):
|
||||
try:
|
||||
parsed_deps.append(DependencyReference.parse(dep_str))
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid APM dependency '{dep_str}': {e}")
|
||||
dependencies[dep_type] = parsed_deps
|
||||
else:
|
||||
# Other dependencies (like MCP) remain as strings
|
||||
dependencies[dep_type] = [str(dep) for dep in dep_list if isinstance(dep, str)]
|
||||
|
||||
return cls(
|
||||
name=data['name'],
|
||||
version=data['version'],
|
||||
description=data.get('description'),
|
||||
author=data.get('author'),
|
||||
license=data.get('license'),
|
||||
dependencies=dependencies,
|
||||
scripts=data.get('scripts'),
|
||||
package_path=apm_yml_path.parent
|
||||
)
|
||||
|
||||
def get_apm_dependencies(self) -> List[DependencyReference]:
|
||||
"""Get list of APM dependencies."""
|
||||
if not self.dependencies or 'apm' not in self.dependencies:
|
||||
return []
|
||||
# Filter to only return DependencyReference objects
|
||||
return [dep for dep in self.dependencies['apm'] if isinstance(dep, DependencyReference)]
|
||||
|
||||
def get_mcp_dependencies(self) -> List[str]:
|
||||
"""Get list of MCP dependencies (as strings for compatibility)."""
|
||||
if not self.dependencies or 'mcp' not in self.dependencies:
|
||||
return []
|
||||
# MCP deps are stored as strings, not DependencyReference objects
|
||||
return [str(dep) if isinstance(dep, DependencyReference) else dep
|
||||
for dep in self.dependencies.get('mcp', [])]
|
||||
|
||||
def has_apm_dependencies(self) -> bool:
|
||||
"""Check if this package has APM dependencies."""
|
||||
return bool(self.get_apm_dependencies())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of APM package validation."""
|
||||
is_valid: bool
|
||||
errors: List[str]
|
||||
warnings: List[str]
|
||||
package: Optional[APMPackage] = None
|
||||
|
||||
def __init__(self):
|
||||
self.is_valid = True
|
||||
self.errors = []
|
||||
self.warnings = []
|
||||
self.package = None
|
||||
|
||||
def add_error(self, error: str) -> None:
|
||||
"""Add a validation error."""
|
||||
self.errors.append(error)
|
||||
self.is_valid = False
|
||||
|
||||
def add_warning(self, warning: str) -> None:
|
||||
"""Add a validation warning."""
|
||||
self.warnings.append(warning)
|
||||
|
||||
def has_issues(self) -> bool:
|
||||
"""Check if there are any errors or warnings."""
|
||||
return bool(self.errors or self.warnings)
|
||||
|
||||
def summary(self) -> str:
|
||||
"""Get a summary of validation results."""
|
||||
if self.is_valid and not self.warnings:
|
||||
return "✅ Package is valid"
|
||||
elif self.is_valid and self.warnings:
|
||||
return f"⚠️ Package is valid with {len(self.warnings)} warning(s)"
|
||||
else:
|
||||
return f"❌ Package is invalid with {len(self.errors)} error(s)"
|
||||
|
||||
|
||||
@dataclass
|
||||
class PackageInfo:
|
||||
"""Information about a downloaded/installed package."""
|
||||
package: APMPackage
|
||||
install_path: Path
|
||||
resolved_reference: Optional[ResolvedReference] = None
|
||||
installed_at: Optional[str] = None # ISO timestamp
|
||||
|
||||
def get_primitives_path(self) -> Path:
|
||||
"""Get path to the .apm directory for this package."""
|
||||
return self.install_path / ".apm"
|
||||
|
||||
def has_primitives(self) -> bool:
|
||||
"""Check if the package has any primitives."""
|
||||
apm_dir = self.get_primitives_path()
|
||||
if not apm_dir.exists():
|
||||
return False
|
||||
|
||||
# Check for any primitive files in subdirectories
|
||||
for primitive_type in ['instructions', 'chatmodes', 'contexts', 'prompts']:
|
||||
primitive_dir = apm_dir / primitive_type
|
||||
if primitive_dir.exists() and any(primitive_dir.iterdir()):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def validate_apm_package(package_path: Path) -> ValidationResult:
|
||||
"""Validate that a directory contains a valid APM package.
|
||||
|
||||
Args:
|
||||
package_path: Path to the directory to validate
|
||||
|
||||
Returns:
|
||||
ValidationResult: Validation results with any errors/warnings
|
||||
"""
|
||||
result = ValidationResult()
|
||||
|
||||
# Check if directory exists
|
||||
if not package_path.exists():
|
||||
result.add_error(f"Package directory does not exist: {package_path}")
|
||||
return result
|
||||
|
||||
if not package_path.is_dir():
|
||||
result.add_error(f"Package path is not a directory: {package_path}")
|
||||
return result
|
||||
|
||||
# Check for apm.yml
|
||||
apm_yml_path = package_path / "apm.yml"
|
||||
if not apm_yml_path.exists():
|
||||
result.add_error("Missing required file: apm.yml")
|
||||
return result
|
||||
|
||||
# Try to parse apm.yml
|
||||
try:
|
||||
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
result.package = package
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
result.add_error(f"Invalid apm.yml: {e}")
|
||||
return result
|
||||
|
||||
# Check for .apm directory
|
||||
apm_dir = package_path / ".apm"
|
||||
if not apm_dir.exists():
|
||||
result.add_error("Missing required directory: .apm/")
|
||||
return result
|
||||
|
||||
if not apm_dir.is_dir():
|
||||
result.add_error(".apm must be a directory")
|
||||
return result
|
||||
|
||||
# Check if .apm directory has any content
|
||||
primitive_types = ['instructions', 'chatmodes', 'contexts', 'prompts']
|
||||
has_primitives = False
|
||||
|
||||
for primitive_type in primitive_types:
|
||||
primitive_dir = apm_dir / primitive_type
|
||||
if primitive_dir.exists() and primitive_dir.is_dir():
|
||||
# Check if directory has any markdown files
|
||||
md_files = list(primitive_dir.glob("*.md"))
|
||||
if md_files:
|
||||
has_primitives = True
|
||||
# Validate each primitive file has basic structure
|
||||
for md_file in md_files:
|
||||
try:
|
||||
content = md_file.read_text(encoding='utf-8')
|
||||
if not content.strip():
|
||||
result.add_warning(f"Empty primitive file: {md_file.relative_to(package_path)}")
|
||||
except Exception as e:
|
||||
result.add_warning(f"Could not read primitive file {md_file.relative_to(package_path)}: {e}")
|
||||
|
||||
if not has_primitives:
|
||||
result.add_warning("No primitive files found in .apm/ directory")
|
||||
|
||||
# Version format validation (basic semver check)
|
||||
if package and package.version:
|
||||
if not re.match(r'^\d+\.\d+\.\d+', package.version):
|
||||
result.add_warning(f"Version '{package.version}' doesn't follow semantic versioning (x.y.z)")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_git_reference(ref_string: str) -> tuple[GitReferenceType, str]:
|
||||
"""Parse a git reference string to determine its type.
|
||||
|
||||
Args:
|
||||
ref_string: Git reference (branch, tag, or commit)
|
||||
|
||||
Returns:
|
||||
tuple: (GitReferenceType, cleaned_reference)
|
||||
"""
|
||||
if not ref_string:
|
||||
return GitReferenceType.BRANCH, "main" # Default to main branch
|
||||
|
||||
ref = ref_string.strip()
|
||||
|
||||
# Check if it looks like a commit SHA (40 hex chars or 7+ hex chars)
|
||||
if re.match(r'^[a-f0-9]{7,40}$', ref.lower()):
|
||||
return GitReferenceType.COMMIT, ref
|
||||
|
||||
# Check if it looks like a semantic version tag
|
||||
if re.match(r'^v?\d+\.\d+\.\d+', ref):
|
||||
return GitReferenceType.TAG, ref
|
||||
|
||||
# Otherwise assume it's a branch
|
||||
return GitReferenceType.BRANCH, ref
|
||||
12
src/apm_cli/output/__init__.py
Normal file
12
src/apm_cli/output/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""Output formatting and presentation layer for APM CLI."""
|
||||
|
||||
from .formatters import CompilationFormatter
|
||||
from .models import CompilationResults, ProjectAnalysis, OptimizationDecision, OptimizationStats
|
||||
|
||||
__all__ = [
|
||||
'CompilationFormatter',
|
||||
'CompilationResults',
|
||||
'ProjectAnalysis',
|
||||
'OptimizationDecision',
|
||||
'OptimizationStats'
|
||||
]
|
||||
911
src/apm_cli/output/formatters.py
Normal file
911
src/apm_cli/output/formatters.py
Normal file
@@ -0,0 +1,911 @@
|
||||
"""Professional CLI output formatters for APM compilation."""
|
||||
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
try:
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.tree import Tree
|
||||
from rich.text import Text
|
||||
from rich.panel import Panel
|
||||
from rich import box
|
||||
from io import StringIO
|
||||
RICH_AVAILABLE = True
|
||||
except ImportError:
|
||||
RICH_AVAILABLE = False
|
||||
|
||||
from .models import CompilationResults, OptimizationDecision, PlacementStrategy
|
||||
|
||||
|
||||
class CompilationFormatter:
|
||||
"""Professional formatter for compilation output with fallback for no-rich environments."""
|
||||
|
||||
def __init__(self, use_color: bool = True):
|
||||
"""Initialize formatter.
|
||||
|
||||
Args:
|
||||
use_color: Whether to use colors and rich formatting.
|
||||
"""
|
||||
self.use_color = use_color and RICH_AVAILABLE
|
||||
self.console = Console() if self.use_color else None
|
||||
|
||||
def format_default(self, results: CompilationResults) -> str:
|
||||
"""Format default compilation output.
|
||||
|
||||
Args:
|
||||
results: Compilation results to format.
|
||||
|
||||
Returns:
|
||||
Formatted output string.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# Phase 1: Project Discovery
|
||||
lines.extend(self._format_project_discovery(results.project_analysis))
|
||||
lines.append("")
|
||||
|
||||
# Phase 2: Optimization Progress
|
||||
lines.extend(self._format_optimization_progress(results.optimization_decisions, results.project_analysis))
|
||||
lines.append("")
|
||||
|
||||
# Phase 3: Results Summary
|
||||
lines.extend(self._format_results_summary(results))
|
||||
|
||||
# Issues (warnings/errors)
|
||||
if results.has_issues:
|
||||
lines.append("")
|
||||
lines.extend(self._format_issues(results.warnings, results.errors))
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def format_verbose(self, results: CompilationResults) -> str:
|
||||
"""Format verbose compilation output with mathematical details.
|
||||
|
||||
Args:
|
||||
results: Compilation results to format.
|
||||
|
||||
Returns:
|
||||
Formatted verbose output string.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# Phase 1: Project Discovery
|
||||
lines.extend(self._format_project_discovery(results.project_analysis))
|
||||
lines.append("")
|
||||
|
||||
# Phase 2: Optimization Progress
|
||||
lines.extend(self._format_optimization_progress(results.optimization_decisions, results.project_analysis))
|
||||
lines.append("")
|
||||
|
||||
# Phase 3: Mathematical Analysis Section (verbose only)
|
||||
lines.extend(self._format_mathematical_analysis(results.optimization_decisions))
|
||||
lines.append("")
|
||||
|
||||
# Phase 4: Coverage vs. Efficiency Explanation (verbose only)
|
||||
lines.extend(self._format_coverage_explanation(results.optimization_stats))
|
||||
lines.append("")
|
||||
|
||||
# Phase 5: Detailed Performance Metrics (verbose only)
|
||||
lines.extend(self._format_detailed_metrics(results.optimization_stats))
|
||||
lines.append("")
|
||||
|
||||
# Phase 6: Final Summary (Generated X files + placement distribution)
|
||||
lines.extend(self._format_final_summary(results))
|
||||
|
||||
# Issues (warnings/errors)
|
||||
if results.has_issues:
|
||||
lines.append("")
|
||||
lines.extend(self._format_issues(results.warnings, results.errors))
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _format_final_summary(self, results: CompilationResults) -> List[str]:
|
||||
"""Format final summary for verbose mode: Generated files + placement distribution."""
|
||||
lines = []
|
||||
|
||||
# Main result
|
||||
file_count = len(results.placement_summaries)
|
||||
summary_line = f"Generated {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||
|
||||
if results.is_dry_run:
|
||||
summary_line = f"[DRY RUN] Would generate {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||
|
||||
if self.use_color:
|
||||
color = "yellow" if results.is_dry_run else "green"
|
||||
lines.append(self._styled(summary_line, f"{color} bold"))
|
||||
else:
|
||||
lines.append(summary_line)
|
||||
|
||||
# Efficiency metrics with improved formatting
|
||||
stats = results.optimization_stats
|
||||
efficiency_pct = f"{stats.efficiency_percentage:.1f}%"
|
||||
|
||||
# Build metrics with baselines and improvements when available
|
||||
metrics_lines = [
|
||||
f"┌─ Context efficiency: {efficiency_pct}"
|
||||
]
|
||||
|
||||
if stats.efficiency_improvement is not None:
|
||||
improvement = f"(baseline: {stats.baseline_efficiency * 100:.1f}%, improvement: +{stats.efficiency_improvement:.0f}%)" if stats.efficiency_improvement > 0 else f"(baseline: {stats.baseline_efficiency * 100:.1f}%, change: {stats.efficiency_improvement:.0f}%)"
|
||||
metrics_lines[0] += f" {improvement}"
|
||||
|
||||
if stats.pollution_improvement is not None:
|
||||
pollution_pct = f"{(1.0 - stats.pollution_improvement) * 100:.1f}%"
|
||||
improvement_pct = f"-{stats.pollution_improvement * 100:.0f}%" if stats.pollution_improvement > 0 else f"+{abs(stats.pollution_improvement) * 100:.0f}%"
|
||||
metrics_lines.append(f"├─ Average pollution: {pollution_pct} (improvement: {improvement_pct})")
|
||||
|
||||
if stats.placement_accuracy is not None:
|
||||
accuracy_pct = f"{stats.placement_accuracy * 100:.1f}%"
|
||||
metrics_lines.append(f"├─ Placement accuracy: {accuracy_pct} (mathematical optimum)")
|
||||
|
||||
if stats.generation_time_ms is not None:
|
||||
metrics_lines.append(f"└─ Generation time: {stats.generation_time_ms}ms")
|
||||
else:
|
||||
# Change last ├─ to └─
|
||||
if len(metrics_lines) > 1:
|
||||
metrics_lines[-1] = metrics_lines[-1].replace("├─", "└─")
|
||||
|
||||
for line in metrics_lines:
|
||||
if self.use_color:
|
||||
lines.append(self._styled(line, "dim"))
|
||||
else:
|
||||
lines.append(line)
|
||||
|
||||
# Add placement distribution summary
|
||||
lines.append("")
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Placement Distribution", "cyan bold"))
|
||||
else:
|
||||
lines.append("Placement Distribution")
|
||||
|
||||
# Show distribution of AGENTS.md files
|
||||
for summary in results.placement_summaries:
|
||||
rel_path = str(summary.get_relative_path(Path.cwd()))
|
||||
content_text = self._get_placement_description(summary)
|
||||
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
|
||||
|
||||
# Use proper tree formatting
|
||||
prefix = "├─" if summary != results.placement_summaries[-1] else "└─"
|
||||
line = f"{prefix} {rel_path:<30} {content_text} from {source_text}"
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled(line, "dim"))
|
||||
else:
|
||||
lines.append(line)
|
||||
|
||||
return lines
|
||||
|
||||
def format_dry_run(self, results: CompilationResults) -> str:
|
||||
"""Format dry run output.
|
||||
|
||||
Args:
|
||||
results: Compilation results to format.
|
||||
|
||||
Returns:
|
||||
Formatted dry run output string.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# Standard analysis
|
||||
lines.extend(self._format_project_discovery(results.project_analysis))
|
||||
lines.append("")
|
||||
lines.extend(self._format_optimization_progress(results.optimization_decisions, results.project_analysis))
|
||||
lines.append("")
|
||||
|
||||
# Dry run specific output
|
||||
lines.extend(self._format_dry_run_summary(results))
|
||||
|
||||
# Issues (warnings/errors) - important for dry run too!
|
||||
if results.has_issues:
|
||||
lines.append("")
|
||||
lines.extend(self._format_issues(results.warnings, results.errors))
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _format_project_discovery(self, analysis) -> List[str]:
|
||||
"""Format project discovery phase output."""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Analyzing project structure...", "cyan bold"))
|
||||
else:
|
||||
lines.append("Analyzing project structure...")
|
||||
|
||||
# Constitution detection (first priority)
|
||||
if analysis.constitution_detected:
|
||||
constitution_line = f"├─ Constitution detected: {analysis.constitution_path}"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(constitution_line, "dim"))
|
||||
else:
|
||||
lines.append(constitution_line)
|
||||
|
||||
# Structure tree with more detailed information
|
||||
file_types_summary = analysis.get_file_types_summary() if hasattr(analysis, 'get_file_types_summary') else "various"
|
||||
tree_lines = [
|
||||
f"├─ {analysis.directories_scanned} directories scanned (max depth: {analysis.max_depth})",
|
||||
f"├─ {analysis.files_analyzed} files analyzed across {len(analysis.file_types_detected)} file types ({file_types_summary})",
|
||||
f"└─ {analysis.instruction_patterns_detected} instruction patterns detected"
|
||||
]
|
||||
|
||||
for line in tree_lines:
|
||||
if self.use_color:
|
||||
lines.append(self._styled(line, "dim"))
|
||||
else:
|
||||
lines.append(line)
|
||||
|
||||
return lines
|
||||
|
||||
def _format_optimization_progress(self, decisions: List[OptimizationDecision], analysis=None) -> List[str]:
|
||||
"""Format optimization progress display using Rich table for better readability."""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Optimizing placements...", "cyan bold"))
|
||||
else:
|
||||
lines.append("Optimizing placements...")
|
||||
|
||||
if self.use_color and RICH_AVAILABLE:
|
||||
# Create a Rich table for professional display
|
||||
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE_HEAD)
|
||||
table.add_column("Pattern", style="white", width=25)
|
||||
table.add_column("Source", style="yellow", width=20)
|
||||
table.add_column("Coverage", style="dim", width=10)
|
||||
table.add_column("Placement", style="green", width=25)
|
||||
table.add_column("Metrics", style="dim", width=20)
|
||||
|
||||
# Add constitution row first if detected
|
||||
if analysis and analysis.constitution_detected:
|
||||
table.add_row(
|
||||
"**",
|
||||
"constitution.md",
|
||||
"ALL",
|
||||
"./AGENTS.md",
|
||||
"rel: 100%"
|
||||
)
|
||||
|
||||
for decision in decisions:
|
||||
pattern_display = decision.pattern if decision.pattern else "(global)"
|
||||
|
||||
# Extract source information from the instruction
|
||||
source_display = "unknown"
|
||||
if decision.instruction and hasattr(decision.instruction, 'file_path'):
|
||||
try:
|
||||
# Get relative path from base directory if possible
|
||||
rel_path = decision.instruction.file_path.name # Just filename for brevity
|
||||
source_display = rel_path
|
||||
except:
|
||||
source_display = str(decision.instruction.file_path)[-20:] # Last 20 chars
|
||||
|
||||
ratio_display = f"{decision.matching_directories}/{decision.total_directories}"
|
||||
|
||||
if len(decision.placement_directories) == 1:
|
||||
placement = self._get_relative_display_path(decision.placement_directories[0])
|
||||
# Add efficiency details for single placement
|
||||
relevance = getattr(decision, 'relevance_score', 0.0) if hasattr(decision, 'relevance_score') else 1.0
|
||||
pollution = getattr(decision, 'pollution_score', 0.0) if hasattr(decision, 'pollution_score') else 0.0
|
||||
metrics = f"rel: {relevance*100:.0f}%"
|
||||
else:
|
||||
placement_count = len(decision.placement_directories)
|
||||
placement = f"{placement_count} locations"
|
||||
metrics = "distributed"
|
||||
|
||||
# Color code the placement by strategy
|
||||
placement_style = self._get_strategy_color(decision.strategy)
|
||||
placement_text = Text(placement, style=placement_style)
|
||||
|
||||
table.add_row(pattern_display, source_display, ratio_display, placement_text, metrics)
|
||||
|
||||
# Render table to lines
|
||||
if self.console:
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(table)
|
||||
table_output = capture.get()
|
||||
if table_output.strip():
|
||||
lines.extend(table_output.split('\n'))
|
||||
else:
|
||||
# Fallback to simplified text display for non-Rich environments
|
||||
# Add constitution first if detected
|
||||
if analysis and analysis.constitution_detected:
|
||||
lines.append("** constitution.md ALL → ./AGENTS.md (rel: 100%)")
|
||||
|
||||
for decision in decisions:
|
||||
pattern_display = decision.pattern if decision.pattern else "(global)"
|
||||
|
||||
# Extract source information
|
||||
source_display = "unknown"
|
||||
if decision.instruction and hasattr(decision.instruction, 'file_path'):
|
||||
try:
|
||||
source_display = decision.instruction.file_path.name
|
||||
except:
|
||||
source_display = "unknown"
|
||||
|
||||
ratio_display = f"{decision.matching_directories}/{decision.total_directories} dirs"
|
||||
|
||||
if len(decision.placement_directories) == 1:
|
||||
placement = self._get_relative_display_path(decision.placement_directories[0])
|
||||
relevance = getattr(decision, 'relevance_score', 0.0) if hasattr(decision, 'relevance_score') else 1.0
|
||||
pollution = getattr(decision, 'pollution_score', 0.0) if hasattr(decision, 'pollution_score') else 0.0
|
||||
line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} → {placement:<25} (rel: {relevance*100:.0f}%)"
|
||||
else:
|
||||
placement_count = len(decision.placement_directories)
|
||||
line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} → {placement_count} locations"
|
||||
|
||||
lines.append(line)
|
||||
|
||||
return lines
|
||||
|
||||
def _format_results_summary(self, results: CompilationResults) -> List[str]:
|
||||
"""Format final results summary."""
|
||||
lines = []
|
||||
|
||||
# Main result
|
||||
file_count = len(results.placement_summaries)
|
||||
summary_line = f"Generated {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||
|
||||
if results.is_dry_run:
|
||||
summary_line = f"[DRY RUN] Would generate {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||
|
||||
if self.use_color:
|
||||
color = "yellow" if results.is_dry_run else "green"
|
||||
lines.append(self._styled(summary_line, f"{color} bold"))
|
||||
else:
|
||||
lines.append(summary_line)
|
||||
|
||||
# Efficiency metrics with improved formatting
|
||||
stats = results.optimization_stats
|
||||
efficiency_pct = f"{stats.efficiency_percentage:.1f}%"
|
||||
|
||||
# Build metrics with baselines and improvements when available
|
||||
metrics_lines = [
|
||||
f"┌─ Context efficiency: {efficiency_pct}"
|
||||
]
|
||||
|
||||
if stats.efficiency_improvement is not None:
|
||||
improvement = f"(baseline: {stats.baseline_efficiency * 100:.1f}%, improvement: +{stats.efficiency_improvement:.0f}%)" if stats.efficiency_improvement > 0 else f"(baseline: {stats.baseline_efficiency * 100:.1f}%, change: {stats.efficiency_improvement:.0f}%)"
|
||||
metrics_lines[0] += f" {improvement}"
|
||||
|
||||
if stats.pollution_improvement is not None:
|
||||
pollution_pct = f"{(1.0 - stats.pollution_improvement) * 100:.1f}%"
|
||||
improvement_pct = f"-{stats.pollution_improvement * 100:.0f}%" if stats.pollution_improvement > 0 else f"+{abs(stats.pollution_improvement) * 100:.0f}%"
|
||||
metrics_lines.append(f"├─ Average pollution: {pollution_pct} (improvement: {improvement_pct})")
|
||||
|
||||
if stats.placement_accuracy is not None:
|
||||
accuracy_pct = f"{stats.placement_accuracy * 100:.1f}%"
|
||||
metrics_lines.append(f"├─ Placement accuracy: {accuracy_pct} (mathematical optimum)")
|
||||
|
||||
if stats.generation_time_ms is not None:
|
||||
metrics_lines.append(f"└─ Generation time: {stats.generation_time_ms}ms")
|
||||
else:
|
||||
# Change last ├─ to └─
|
||||
if len(metrics_lines) > 1:
|
||||
metrics_lines[-1] = metrics_lines[-1].replace("├─", "└─")
|
||||
|
||||
for line in metrics_lines:
|
||||
if self.use_color:
|
||||
lines.append(self._styled(line, "dim"))
|
||||
else:
|
||||
lines.append(line)
|
||||
|
||||
# Add placement distribution summary
|
||||
lines.append("")
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Placement Distribution", "cyan bold"))
|
||||
else:
|
||||
lines.append("Placement Distribution")
|
||||
|
||||
# Show distribution of AGENTS.md files
|
||||
for summary in results.placement_summaries:
|
||||
rel_path = str(summary.get_relative_path(Path.cwd()))
|
||||
content_text = self._get_placement_description(summary)
|
||||
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
|
||||
|
||||
# Use proper tree formatting
|
||||
prefix = "├─" if summary != results.placement_summaries[-1] else "└─"
|
||||
line = f"{prefix} {rel_path:<30} {content_text} from {source_text}"
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled(line, "dim"))
|
||||
else:
|
||||
lines.append(line)
|
||||
|
||||
return lines
|
||||
|
||||
def _format_dry_run_summary(self, results: CompilationResults) -> List[str]:
|
||||
"""Format dry run specific summary."""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("[DRY RUN] File generation preview:", "yellow bold"))
|
||||
else:
|
||||
lines.append("[DRY RUN] File generation preview:")
|
||||
|
||||
# List files that would be generated
|
||||
for summary in results.placement_summaries:
|
||||
rel_path = str(summary.get_relative_path(Path.cwd()))
|
||||
instruction_text = f"{summary.instruction_count} instruction{'s' if summary.instruction_count != 1 else ''}"
|
||||
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
|
||||
|
||||
line = f"├─ {rel_path:<30} {instruction_text}, {source_text}"
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled(line, "dim"))
|
||||
else:
|
||||
lines.append(line)
|
||||
|
||||
# Change last ├─ to └─
|
||||
if lines and len(lines) > 1:
|
||||
lines[-1] = lines[-1].replace("├─", "└─")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Call to action
|
||||
if self.use_color:
|
||||
lines.append(self._styled("[DRY RUN] No files written. Run 'apm compile' to apply changes.", "yellow"))
|
||||
else:
|
||||
lines.append("[DRY RUN] No files written. Run 'apm compile' to apply changes.")
|
||||
|
||||
return lines
|
||||
|
||||
def _format_mathematical_analysis(self, decisions: List[OptimizationDecision]) -> List[str]:
|
||||
"""Format mathematical analysis for verbose mode with coverage-first principles."""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Mathematical Optimization Analysis", "cyan bold"))
|
||||
else:
|
||||
lines.append("Mathematical Optimization Analysis")
|
||||
|
||||
lines.append("")
|
||||
|
||||
if self.use_color and RICH_AVAILABLE:
|
||||
# Coverage-First Strategy Table
|
||||
strategy_table = Table(title="Three-Tier Coverage-First Strategy", show_header=True, header_style="bold cyan", box=box.SIMPLE_HEAD)
|
||||
strategy_table.add_column("Pattern", style="white", width=25)
|
||||
strategy_table.add_column("Source", style="yellow", width=15)
|
||||
strategy_table.add_column("Distribution", style="yellow", width=12)
|
||||
strategy_table.add_column("Strategy", style="green", width=15)
|
||||
strategy_table.add_column("Coverage Guarantee", style="blue", width=20)
|
||||
|
||||
for decision in decisions:
|
||||
pattern = decision.pattern if decision.pattern else "(global)"
|
||||
|
||||
# Extract source information
|
||||
source_display = "unknown"
|
||||
if decision.instruction and hasattr(decision.instruction, 'file_path'):
|
||||
try:
|
||||
source_display = decision.instruction.file_path.name
|
||||
except:
|
||||
source_display = "unknown"
|
||||
|
||||
# Distribution score with threshold classification
|
||||
score = decision.distribution_score
|
||||
if score < 0.3:
|
||||
dist_display = f"{score:.3f} (Low)"
|
||||
strategy_name = "Single Point"
|
||||
coverage_status = "✅ Perfect"
|
||||
elif score > 0.7:
|
||||
dist_display = f"{score:.3f} (High)"
|
||||
strategy_name = "Distributed"
|
||||
coverage_status = "✅ Universal"
|
||||
else:
|
||||
dist_display = f"{score:.3f} (Medium)"
|
||||
strategy_name = "Selective Multi"
|
||||
# Check if root placement was used (indicates coverage fallback)
|
||||
if any("." == str(p) or p.name == "" for p in decision.placement_directories):
|
||||
coverage_status = "⚠️ Root Fallback"
|
||||
else:
|
||||
coverage_status = "✅ Verified"
|
||||
|
||||
strategy_table.add_row(pattern, source_display, dist_display, strategy_name, coverage_status)
|
||||
|
||||
# Render strategy table
|
||||
if self.console:
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(strategy_table)
|
||||
table_output = capture.get()
|
||||
if table_output.strip():
|
||||
lines.extend(table_output.split('\n'))
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Hierarchical Coverage Analysis Table
|
||||
coverage_table = Table(title="Hierarchical Coverage Analysis", show_header=True, header_style="bold cyan", box=box.SIMPLE_HEAD)
|
||||
coverage_table.add_column("Pattern", style="white", width=25)
|
||||
coverage_table.add_column("Matching Files", style="yellow", width=15)
|
||||
coverage_table.add_column("Placement", style="green", width=20)
|
||||
coverage_table.add_column("Coverage Result", style="blue", width=25)
|
||||
|
||||
for decision in decisions:
|
||||
pattern = decision.pattern if decision.pattern else "(global)"
|
||||
matching_files = f"{decision.matching_directories} dirs"
|
||||
|
||||
if len(decision.placement_directories) == 1:
|
||||
placement = self._get_relative_display_path(decision.placement_directories[0])
|
||||
|
||||
# Analyze coverage outcome
|
||||
if str(decision.placement_directories[0]).endswith('.'):
|
||||
coverage_result = "Root → All files inherit"
|
||||
elif decision.distribution_score < 0.3:
|
||||
coverage_result = "Local → Perfect efficiency"
|
||||
else:
|
||||
coverage_result = "Selective → Coverage verified"
|
||||
else:
|
||||
placement = f"{len(decision.placement_directories)} locations"
|
||||
coverage_result = "Multi-point → Full coverage"
|
||||
|
||||
coverage_table.add_row(pattern, matching_files, placement, coverage_result)
|
||||
|
||||
# Render coverage table
|
||||
if self.console:
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(coverage_table)
|
||||
table_output = capture.get()
|
||||
if table_output.strip():
|
||||
lines.extend(table_output.split('\n'))
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Updated Mathematical Foundation Panel
|
||||
foundation_text = """Objective: minimize Σ(context_pollution × directory_weight)
|
||||
Constraints: ∀file_matching_pattern → can_inherit_instruction
|
||||
Variables: placement_matrix ∈ {0,1}
|
||||
Algorithm: Three-tier strategy with hierarchical coverage verification
|
||||
|
||||
Coverage Guarantee: Every file can access applicable instructions through
|
||||
hierarchical inheritance. Coverage takes priority over efficiency."""
|
||||
|
||||
if self.console:
|
||||
from rich.panel import Panel
|
||||
try:
|
||||
panel = Panel(foundation_text, title="Coverage-Constrained Optimization", border_style="cyan")
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(panel)
|
||||
panel_output = capture.get()
|
||||
if panel_output.strip():
|
||||
lines.extend(panel_output.split('\n'))
|
||||
except:
|
||||
# Fallback to simple text
|
||||
lines.append("Coverage-Constrained Optimization:")
|
||||
for line in foundation_text.split('\n'):
|
||||
lines.append(f" {line}")
|
||||
|
||||
else:
|
||||
# Fallback for non-Rich environments
|
||||
lines.append("Coverage-First Strategy Analysis:")
|
||||
for decision in decisions:
|
||||
pattern = decision.pattern if decision.pattern else "(global)"
|
||||
score = f"{decision.distribution_score:.3f}"
|
||||
strategy = decision.strategy.value
|
||||
coverage = "✅ Verified" if decision.distribution_score < 0.7 else "⚠️ Root Fallback"
|
||||
lines.append(f" {pattern:<30} {score:<8} {strategy:<15} {coverage}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("Mathematical Foundation:")
|
||||
lines.append(" Objective: minimize Σ(context_pollution × directory_weight)")
|
||||
lines.append(" Constraints: ∀file_matching_pattern → can_inherit_instruction")
|
||||
lines.append(" Algorithm: Three-tier strategy with coverage verification")
|
||||
lines.append(" Principle: Coverage guarantee takes priority over efficiency")
|
||||
|
||||
return lines
|
||||
|
||||
def _format_detailed_metrics(self, stats) -> List[str]:
|
||||
"""Format detailed performance metrics table with interpretations."""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Performance Metrics", "cyan bold"))
|
||||
else:
|
||||
lines.append("Performance Metrics")
|
||||
|
||||
# Create metrics table
|
||||
if self.use_color and RICH_AVAILABLE:
|
||||
table = Table(box=box.SIMPLE)
|
||||
table.add_column("Metric", style="white", width=20)
|
||||
table.add_column("Value", style="white", width=12)
|
||||
table.add_column("Assessment", style="blue", width=35)
|
||||
|
||||
# Context Efficiency with coverage-first interpretation
|
||||
efficiency = stats.efficiency_percentage
|
||||
if efficiency >= 80:
|
||||
assessment = "Excellent - perfect pattern locality"
|
||||
assessment_color = "bright_green"
|
||||
value_color = "bright_green"
|
||||
elif efficiency >= 60:
|
||||
assessment = "Good - well-optimized with minimal coverage conflicts"
|
||||
assessment_color = "green"
|
||||
value_color = "green"
|
||||
elif efficiency >= 40:
|
||||
assessment = "Fair - moderate coverage-driven pollution"
|
||||
assessment_color = "yellow"
|
||||
value_color = "yellow"
|
||||
elif efficiency >= 20:
|
||||
assessment = "Poor - significant coverage constraints"
|
||||
assessment_color = "orange1"
|
||||
value_color = "orange1"
|
||||
else:
|
||||
assessment = "Very Poor - may be mathematically optimal given coverage"
|
||||
assessment_color = "red"
|
||||
value_color = "red"
|
||||
|
||||
table.add_row(
|
||||
"Context Efficiency",
|
||||
Text(f"{efficiency:.1f}%", style=value_color),
|
||||
Text(assessment, style=assessment_color)
|
||||
)
|
||||
|
||||
# Calculate pollution level with coverage-aware interpretation
|
||||
pollution_level = 100 - efficiency
|
||||
if pollution_level <= 20:
|
||||
pollution_assessment = "Excellent - perfect pattern locality"
|
||||
pollution_color = "bright_green"
|
||||
elif pollution_level <= 40:
|
||||
pollution_assessment = "Good - minimal coverage conflicts"
|
||||
pollution_color = "green"
|
||||
elif pollution_level <= 60:
|
||||
pollution_assessment = "Fair - acceptable coverage-driven pollution"
|
||||
pollution_color = "yellow"
|
||||
elif pollution_level <= 80:
|
||||
pollution_assessment = "Poor - high coverage constraints"
|
||||
pollution_color = "orange1"
|
||||
else:
|
||||
pollution_assessment = "Very Poor - but may guarantee coverage"
|
||||
pollution_color = "red"
|
||||
|
||||
table.add_row(
|
||||
"Pollution Level",
|
||||
Text(f"{pollution_level:.1f}%", style=pollution_color),
|
||||
Text(pollution_assessment, style=pollution_color)
|
||||
)
|
||||
|
||||
if stats.placement_accuracy:
|
||||
accuracy = stats.placement_accuracy * 100
|
||||
if accuracy >= 95:
|
||||
accuracy_assessment = "Excellent - mathematically optimal"
|
||||
accuracy_color = "bright_green"
|
||||
elif accuracy >= 85:
|
||||
accuracy_assessment = "Good - near optimal"
|
||||
accuracy_color = "green"
|
||||
elif accuracy >= 70:
|
||||
accuracy_assessment = "Fair - reasonably placed"
|
||||
accuracy_color = "yellow"
|
||||
else:
|
||||
accuracy_assessment = "Poor - suboptimal placement"
|
||||
accuracy_color = "orange1"
|
||||
|
||||
table.add_row(
|
||||
"Placement Accuracy",
|
||||
Text(f"{accuracy:.1f}%", style=accuracy_color),
|
||||
Text(accuracy_assessment, style=accuracy_color)
|
||||
)
|
||||
|
||||
# Render table
|
||||
if self.console:
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(table)
|
||||
table_output = capture.get()
|
||||
if table_output.strip():
|
||||
lines.extend(table_output.split('\n'))
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Add interpretation guide
|
||||
if self.console:
|
||||
try:
|
||||
interpretation_text = """📊 How These Metrics Are Calculated
|
||||
|
||||
Context Efficiency = Average across all directories of (Relevant Instructions / Total Instructions)
|
||||
• For each directory, APM analyzes what instructions agents would inherit from AGENTS.md files
|
||||
• Calculates ratio of instructions that apply to files in that directory vs total instructions loaded
|
||||
• Takes weighted average across all project directories with files
|
||||
|
||||
Pollution Level = 100% - Context Efficiency (inverse relationship)
|
||||
• High pollution = agents load many irrelevant instructions when working in specific directories
|
||||
• Low pollution = agents see mostly relevant instructions for their current context
|
||||
|
||||
🎯 Interpretation Benchmarks
|
||||
|
||||
Context Efficiency:
|
||||
• 80-100%: Excellent - Instructions perfectly targeted to usage context
|
||||
• 60-80%: Good - Well-optimized with minimal wasted context
|
||||
• 40-60%: Fair - Some optimization opportunities exist
|
||||
• 20-40%: Poor - Significant context pollution, consider restructuring
|
||||
• 0-20%: Very Poor - High pollution, instructions poorly distributed
|
||||
|
||||
Pollution Level:
|
||||
• 0-10%: Excellent - Agents see highly relevant instructions only
|
||||
• 10-25%: Good - Low noise, mostly relevant context
|
||||
• 25-50%: Fair - Moderate noise, some irrelevant instructions
|
||||
• 50%+: Poor - High noise, agents see many irrelevant instructions
|
||||
|
||||
💡 Example: 36.7% efficiency means agents working in specific directories see only 36.7% relevant instructions and 63.3% irrelevant context pollution."""
|
||||
|
||||
panel = Panel(interpretation_text, title="Metrics Guide", border_style="dim", title_align="left")
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(panel)
|
||||
panel_output = capture.get()
|
||||
if panel_output.strip():
|
||||
lines.extend(panel_output.split('\n'))
|
||||
except:
|
||||
# Fallback to simple text
|
||||
lines.extend([
|
||||
"Metrics Guide:",
|
||||
"• Context Efficiency 80-100%: Excellent | 60-80%: Good | 40-60%: Fair | <40%: Poor",
|
||||
"• Pollution 0-10%: Excellent | 10-25%: Good | 25-50%: Fair | >50%: Poor"
|
||||
])
|
||||
else:
|
||||
# Fallback for non-Rich environments
|
||||
efficiency = stats.efficiency_percentage
|
||||
pollution = 100 - efficiency
|
||||
|
||||
if efficiency >= 80:
|
||||
efficiency_assessment = "Excellent"
|
||||
elif efficiency >= 60:
|
||||
efficiency_assessment = "Good"
|
||||
elif efficiency >= 40:
|
||||
efficiency_assessment = "Fair"
|
||||
elif efficiency >= 20:
|
||||
efficiency_assessment = "Poor"
|
||||
else:
|
||||
efficiency_assessment = "Very Poor"
|
||||
|
||||
if pollution <= 10:
|
||||
pollution_assessment = "Excellent"
|
||||
elif pollution <= 25:
|
||||
pollution_assessment = "Good"
|
||||
elif pollution <= 50:
|
||||
pollution_assessment = "Fair"
|
||||
else:
|
||||
pollution_assessment = "Poor"
|
||||
|
||||
lines.extend([
|
||||
f"Context Efficiency: {efficiency:.1f}% ({efficiency_assessment})",
|
||||
f"Pollution Level: {pollution:.1f}% ({pollution_assessment})",
|
||||
"Guide: 80-100% Excellent | 60-80% Good | 40-60% Fair | 20-40% Poor | <20% Very Poor"
|
||||
])
|
||||
|
||||
return lines
|
||||
|
||||
def _format_issues(self, warnings: List[str], errors: List[str]) -> List[str]:
|
||||
"""Format warnings and errors as professional blocks."""
|
||||
lines = []
|
||||
|
||||
# Errors first
|
||||
for error in errors:
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f"✗ Error: {error}", "red"))
|
||||
else:
|
||||
lines.append(f"✗ Error: {error}")
|
||||
|
||||
# Then warnings - handle multi-line warnings as cohesive blocks
|
||||
for warning in warnings:
|
||||
if '\n' in warning:
|
||||
# Multi-line warning - format as a professional block
|
||||
warning_lines = warning.split('\n')
|
||||
# First line gets the warning symbol and styling
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f"⚠ Warning: {warning_lines[0]}", "yellow"))
|
||||
else:
|
||||
lines.append(f"⚠ Warning: {warning_lines[0]}")
|
||||
|
||||
# Subsequent lines are indented and styled consistently
|
||||
for line in warning_lines[1:]:
|
||||
if line.strip(): # Skip empty lines
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f" {line}", "yellow"))
|
||||
else:
|
||||
lines.append(f" {line}")
|
||||
else:
|
||||
# Single-line warning - standard format
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f"⚠ Warning: {warning}", "yellow"))
|
||||
else:
|
||||
lines.append(f"⚠ Warning: {warning}")
|
||||
|
||||
return lines
|
||||
|
||||
def _get_strategy_symbol(self, strategy: PlacementStrategy) -> str:
|
||||
"""Get symbol for placement strategy."""
|
||||
symbols = {
|
||||
PlacementStrategy.SINGLE_POINT: "●",
|
||||
PlacementStrategy.SELECTIVE_MULTI: "◆",
|
||||
PlacementStrategy.DISTRIBUTED: "◇"
|
||||
}
|
||||
return symbols.get(strategy, "•")
|
||||
|
||||
def _get_strategy_color(self, strategy: PlacementStrategy) -> str:
|
||||
"""Get color for placement strategy."""
|
||||
colors = {
|
||||
PlacementStrategy.SINGLE_POINT: "green",
|
||||
PlacementStrategy.SELECTIVE_MULTI: "yellow",
|
||||
PlacementStrategy.DISTRIBUTED: "blue"
|
||||
}
|
||||
return colors.get(strategy, "white")
|
||||
|
||||
def _get_relative_display_path(self, path: Path) -> str:
|
||||
"""Get display-friendly relative path."""
|
||||
try:
|
||||
rel_path = path.relative_to(Path.cwd())
|
||||
if rel_path == Path('.'):
|
||||
return "./AGENTS.md"
|
||||
return str(rel_path / "AGENTS.md")
|
||||
except ValueError:
|
||||
return str(path / "AGENTS.md")
|
||||
|
||||
def _format_coverage_explanation(self, stats) -> List[str]:
|
||||
"""Explain the coverage vs. efficiency trade-off."""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Coverage vs. Efficiency Analysis", "cyan bold"))
|
||||
else:
|
||||
lines.append("Coverage vs. Efficiency Analysis")
|
||||
|
||||
lines.append("")
|
||||
|
||||
efficiency = stats.efficiency_percentage
|
||||
|
||||
if efficiency < 30:
|
||||
lines.append("⚠️ Low Efficiency Detected:")
|
||||
lines.append(" • Coverage guarantee requires some instructions at root level")
|
||||
lines.append(" • This creates pollution for specialized directories")
|
||||
lines.append(" • Trade-off: Guaranteed coverage vs. optimal efficiency")
|
||||
lines.append(" • Alternative: Higher efficiency with coverage violations (data loss)")
|
||||
lines.append("")
|
||||
lines.append("💡 This may be mathematically optimal given coverage constraints")
|
||||
elif efficiency < 60:
|
||||
lines.append("✅ Moderate Efficiency:")
|
||||
lines.append(" • Good balance between coverage and efficiency")
|
||||
lines.append(" • Some coverage-driven pollution is acceptable")
|
||||
lines.append(" • Most patterns are well-localized")
|
||||
else:
|
||||
lines.append("🎯 High Efficiency:")
|
||||
lines.append(" • Excellent pattern locality achieved")
|
||||
lines.append(" • Minimal coverage conflicts")
|
||||
lines.append(" • Instructions are optimally placed")
|
||||
|
||||
lines.append("")
|
||||
lines.append("📚 Why Coverage Takes Priority:")
|
||||
lines.append(" • Every file must access applicable instructions")
|
||||
lines.append(" • Hierarchical inheritance prevents data loss")
|
||||
lines.append(" • Better low efficiency than missing instructions")
|
||||
|
||||
return lines
|
||||
|
||||
def _get_placement_description(self, summary) -> str:
|
||||
"""Get description of what's included in a placement summary.
|
||||
|
||||
Args:
|
||||
summary: PlacementSummary object
|
||||
|
||||
Returns:
|
||||
str: Description like "Constitution and 1 instruction" or "Constitution"
|
||||
"""
|
||||
# Check if constitution is included
|
||||
has_constitution = any("constitution.md" in source for source in summary.sources)
|
||||
|
||||
# Build the description based on what's included
|
||||
parts = []
|
||||
if has_constitution:
|
||||
parts.append("Constitution")
|
||||
|
||||
if summary.instruction_count > 0:
|
||||
instruction_text = f"{summary.instruction_count} instruction{'s' if summary.instruction_count != 1 else ''}"
|
||||
parts.append(instruction_text)
|
||||
|
||||
if parts:
|
||||
return " and ".join(parts)
|
||||
else:
|
||||
return "content"
|
||||
|
||||
def _styled(self, text: str, style: str) -> str:
|
||||
"""Apply styling to text with rich fallback."""
|
||||
if self.use_color and RICH_AVAILABLE:
|
||||
styled_text = Text(text)
|
||||
styled_text.style = style
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(styled_text, end="")
|
||||
return capture.get()
|
||||
else:
|
||||
return text
|
||||
122
src/apm_cli/output/models.py
Normal file
122
src/apm_cli/output/models.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""Data models for compilation output and results."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set
|
||||
from enum import Enum
|
||||
|
||||
from ..primitives.models import Instruction
|
||||
|
||||
|
||||
class PlacementStrategy(Enum):
|
||||
"""Placement strategy types for optimization decisions."""
|
||||
SINGLE_POINT = "Single Point"
|
||||
SELECTIVE_MULTI = "Selective Multi"
|
||||
DISTRIBUTED = "Distributed"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProjectAnalysis:
|
||||
"""Analysis of the project structure and file distribution."""
|
||||
directories_scanned: int
|
||||
files_analyzed: int
|
||||
file_types_detected: Set[str]
|
||||
instruction_patterns_detected: int
|
||||
max_depth: int
|
||||
constitution_detected: bool = False
|
||||
constitution_path: Optional[str] = None
|
||||
|
||||
def get_file_types_summary(self) -> str:
|
||||
"""Get a concise summary of detected file types."""
|
||||
if not self.file_types_detected:
|
||||
return "none"
|
||||
|
||||
# Remove leading dots and sort
|
||||
types = sorted([t.lstrip('.') for t in self.file_types_detected if t])
|
||||
if len(types) <= 3:
|
||||
return ', '.join(types)
|
||||
else:
|
||||
return f"{', '.join(types[:3])} and {len(types) - 3} more"
|
||||
|
||||
|
||||
@dataclass
|
||||
class OptimizationDecision:
|
||||
"""Details about a specific optimization decision for an instruction."""
|
||||
instruction: Instruction
|
||||
pattern: str
|
||||
matching_directories: int
|
||||
total_directories: int
|
||||
distribution_score: float
|
||||
strategy: PlacementStrategy
|
||||
placement_directories: List[Path]
|
||||
reasoning: str
|
||||
relevance_score: float = 0.0 # Coverage efficiency for primary placement directory
|
||||
|
||||
@property
|
||||
def distribution_ratio(self) -> float:
|
||||
"""Get the distribution ratio (matching/total)."""
|
||||
return self.matching_directories / self.total_directories if self.total_directories > 0 else 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlacementSummary:
|
||||
"""Summary of a single AGENTS.md file placement."""
|
||||
path: Path
|
||||
instruction_count: int
|
||||
source_count: int
|
||||
sources: List[str] = field(default_factory=list)
|
||||
|
||||
def get_relative_path(self, base_dir: Path) -> Path:
|
||||
"""Get path relative to base directory."""
|
||||
try:
|
||||
rel_path = self.path.relative_to(base_dir)
|
||||
return Path('.') if rel_path == Path('.') else rel_path
|
||||
except ValueError:
|
||||
return self.path
|
||||
|
||||
|
||||
@dataclass
|
||||
class OptimizationStats:
|
||||
"""Performance and efficiency statistics from optimization."""
|
||||
average_context_efficiency: float
|
||||
pollution_improvement: Optional[float] = None
|
||||
baseline_efficiency: Optional[float] = None
|
||||
placement_accuracy: Optional[float] = None
|
||||
generation_time_ms: Optional[int] = None
|
||||
total_agents_files: int = 0
|
||||
directories_analyzed: int = 0
|
||||
|
||||
@property
|
||||
def efficiency_improvement(self) -> Optional[float]:
|
||||
"""Calculate efficiency improvement percentage."""
|
||||
if self.baseline_efficiency is not None:
|
||||
return ((self.average_context_efficiency - self.baseline_efficiency)
|
||||
/ self.baseline_efficiency * 100)
|
||||
return None
|
||||
|
||||
@property
|
||||
def efficiency_percentage(self) -> float:
|
||||
"""Get efficiency as percentage."""
|
||||
return self.average_context_efficiency * 100
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilationResults:
|
||||
"""Complete results from compilation process."""
|
||||
project_analysis: ProjectAnalysis
|
||||
optimization_decisions: List[OptimizationDecision]
|
||||
placement_summaries: List[PlacementSummary]
|
||||
optimization_stats: OptimizationStats
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
is_dry_run: bool = False
|
||||
|
||||
@property
|
||||
def total_instructions(self) -> int:
|
||||
"""Get total number of instructions processed."""
|
||||
return sum(summary.instruction_count for summary in self.placement_summaries)
|
||||
|
||||
@property
|
||||
def has_issues(self) -> bool:
|
||||
"""Check if there are any warnings or errors."""
|
||||
return len(self.warnings) > 0 or len(self.errors) > 0
|
||||
320
src/apm_cli/output/script_formatters.py
Normal file
320
src/apm_cli/output/script_formatters.py
Normal file
@@ -0,0 +1,320 @@
|
||||
"""Professional CLI output formatters for APM script execution."""
|
||||
|
||||
from typing import Dict, List, Optional
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from rich.console import Console
|
||||
from rich.text import Text
|
||||
from rich.panel import Panel
|
||||
from rich.tree import Tree
|
||||
from rich import box
|
||||
RICH_AVAILABLE = True
|
||||
except ImportError:
|
||||
RICH_AVAILABLE = False
|
||||
|
||||
|
||||
class ScriptExecutionFormatter:
|
||||
"""Professional formatter for script execution output following CLI UX design plan."""
|
||||
|
||||
def __init__(self, use_color: bool = True):
|
||||
"""Initialize formatter.
|
||||
|
||||
Args:
|
||||
use_color: Whether to use colors and rich formatting.
|
||||
"""
|
||||
self.use_color = use_color and RICH_AVAILABLE
|
||||
self.console = Console() if self.use_color else None
|
||||
|
||||
def format_script_header(self, script_name: str, params: Dict[str, str]) -> List[str]:
|
||||
"""Format the script execution header with parameters.
|
||||
|
||||
Args:
|
||||
script_name: Name of the script being executed
|
||||
params: Parameters passed to the script
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# Main header
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f"🚀 Running script: {script_name}", "cyan bold"))
|
||||
else:
|
||||
lines.append(f"🚀 Running script: {script_name}")
|
||||
|
||||
# Parameters tree if any exist
|
||||
if params:
|
||||
for param_name, param_value in params.items():
|
||||
param_line = f" - {param_name}: {param_value}"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(param_line, "dim"))
|
||||
else:
|
||||
lines.append(param_line)
|
||||
|
||||
return lines
|
||||
|
||||
def format_compilation_progress(self, prompt_files: List[str]) -> List[str]:
|
||||
"""Format prompt compilation progress.
|
||||
|
||||
Args:
|
||||
prompt_files: List of prompt files being compiled
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
if not prompt_files:
|
||||
return []
|
||||
|
||||
lines = []
|
||||
|
||||
if len(prompt_files) == 1:
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Compiling prompt...", "cyan"))
|
||||
else:
|
||||
lines.append("Compiling prompt...")
|
||||
else:
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f"Compiling {len(prompt_files)} prompts...", "cyan"))
|
||||
else:
|
||||
lines.append(f"Compiling {len(prompt_files)} prompts...")
|
||||
|
||||
# Show each file being compiled
|
||||
for prompt_file in prompt_files:
|
||||
file_line = f"├─ {prompt_file}"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(file_line, "dim"))
|
||||
else:
|
||||
lines.append(file_line)
|
||||
|
||||
# Change last ├─ to └─
|
||||
if lines and len(lines) > 1:
|
||||
lines[-1] = lines[-1].replace("├─", "└─")
|
||||
|
||||
return lines
|
||||
|
||||
def format_runtime_execution(self, runtime: str, command: str, content_length: int) -> List[str]:
|
||||
"""Format runtime command execution with content preview.
|
||||
|
||||
Args:
|
||||
runtime: Name of the runtime (copilot, codex, llm)
|
||||
command: The command being executed
|
||||
content_length: Length of the content being passed
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# Runtime detection and styling
|
||||
runtime_colors = {
|
||||
'copilot': 'blue',
|
||||
'codex': 'green',
|
||||
'llm': 'magenta',
|
||||
'unknown': 'white'
|
||||
}
|
||||
|
||||
runtime_color = runtime_colors.get(runtime, 'white')
|
||||
|
||||
# Execution header
|
||||
if self.use_color:
|
||||
lines.append(self._styled(f"Executing {runtime} runtime...", f"{runtime_color} bold"))
|
||||
else:
|
||||
lines.append(f"Executing {runtime} runtime...")
|
||||
|
||||
# Command structure
|
||||
command_line = f"├─ Command: {command}"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(command_line, "dim"))
|
||||
else:
|
||||
lines.append(command_line)
|
||||
|
||||
# Content size
|
||||
content_line = f"└─ Prompt content: {content_length:,} characters"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(content_line, "dim"))
|
||||
else:
|
||||
lines.append(content_line)
|
||||
|
||||
return lines
|
||||
|
||||
def format_content_preview(self, content: str, max_preview: int = 200) -> List[str]:
|
||||
"""Format content preview with professional styling.
|
||||
|
||||
Args:
|
||||
content: The full content to preview
|
||||
max_preview: Maximum characters to show in preview
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
lines = []
|
||||
|
||||
# Content preview
|
||||
content_preview = content[:max_preview] + "..." if len(content) > max_preview else content
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Prompt preview:", "cyan"))
|
||||
else:
|
||||
lines.append("Prompt preview:")
|
||||
|
||||
# Content in a box for better readability
|
||||
if self.use_color and RICH_AVAILABLE and self.console:
|
||||
try:
|
||||
panel = Panel(
|
||||
content_preview,
|
||||
title=f"Content ({len(content):,} characters)",
|
||||
border_style="dim",
|
||||
title_align="left"
|
||||
)
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(panel)
|
||||
panel_output = capture.get()
|
||||
if panel_output.strip():
|
||||
lines.extend(panel_output.split('\n'))
|
||||
except:
|
||||
# Fallback to simple formatting
|
||||
lines.append("─" * 50)
|
||||
lines.append(content_preview)
|
||||
lines.append("─" * 50)
|
||||
else:
|
||||
# Simple text fallback
|
||||
lines.append("─" * 50)
|
||||
lines.append(content_preview)
|
||||
lines.append("─" * 50)
|
||||
|
||||
return lines
|
||||
|
||||
def format_environment_setup(self, runtime: str, env_vars_set: List[str]) -> List[str]:
|
||||
"""Format environment setup information.
|
||||
|
||||
Args:
|
||||
runtime: Name of the runtime
|
||||
env_vars_set: List of environment variables that were set
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
if not env_vars_set:
|
||||
return []
|
||||
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Environment setup:", "cyan"))
|
||||
else:
|
||||
lines.append("Environment setup:")
|
||||
|
||||
for env_var in env_vars_set:
|
||||
env_line = f"├─ {env_var}: configured"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(env_line, "dim"))
|
||||
else:
|
||||
lines.append(env_line)
|
||||
|
||||
# Change last ├─ to └─
|
||||
if lines and len(lines) > 1:
|
||||
lines[-1] = lines[-1].replace("├─", "└─")
|
||||
|
||||
return lines
|
||||
|
||||
def format_execution_success(self, runtime: str, execution_time: Optional[float] = None) -> List[str]:
|
||||
"""Format successful execution result.
|
||||
|
||||
Args:
|
||||
runtime: Name of the runtime that executed
|
||||
execution_time: Optional execution time in seconds
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
lines = []
|
||||
|
||||
success_msg = f"✅ {runtime.title()} execution completed successfully"
|
||||
if execution_time is not None:
|
||||
success_msg += f" ({execution_time:.2f}s)"
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled(success_msg, "green bold"))
|
||||
else:
|
||||
lines.append(success_msg)
|
||||
|
||||
return lines
|
||||
|
||||
def format_execution_error(self, runtime: str, error_code: int, error_msg: Optional[str] = None) -> List[str]:
|
||||
"""Format execution error result.
|
||||
|
||||
Args:
|
||||
runtime: Name of the runtime that failed
|
||||
error_code: Exit code from the failed execution
|
||||
error_msg: Optional error message
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
lines = []
|
||||
|
||||
error_header = f"✗ {runtime.title()} execution failed (exit code: {error_code})"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(error_header, "red bold"))
|
||||
else:
|
||||
lines.append(error_header)
|
||||
|
||||
if error_msg:
|
||||
# Format error message with proper indentation
|
||||
error_lines = error_msg.split('\n')
|
||||
for line in error_lines:
|
||||
if line.strip():
|
||||
formatted_line = f" {line}"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(formatted_line, "red"))
|
||||
else:
|
||||
lines.append(formatted_line)
|
||||
|
||||
return lines
|
||||
|
||||
def format_subprocess_details(self, args: List[str], content_length: int) -> List[str]:
|
||||
"""Format subprocess execution details for debugging.
|
||||
|
||||
Args:
|
||||
args: The subprocess arguments (without content)
|
||||
content_length: Length of content being passed
|
||||
|
||||
Returns:
|
||||
List of formatted lines
|
||||
"""
|
||||
lines = []
|
||||
|
||||
if self.use_color:
|
||||
lines.append(self._styled("Subprocess execution:", "cyan"))
|
||||
else:
|
||||
lines.append("Subprocess execution:")
|
||||
|
||||
# Show command structure
|
||||
args_display = " ".join(f'"{arg}"' if " " in arg else arg for arg in args)
|
||||
command_line = f"├─ Args: {args_display}"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(command_line, "dim"))
|
||||
else:
|
||||
lines.append(command_line)
|
||||
|
||||
# Show content info
|
||||
content_line = f"└─ Content: +{content_length:,} chars appended"
|
||||
if self.use_color:
|
||||
lines.append(self._styled(content_line, "dim"))
|
||||
else:
|
||||
lines.append(content_line)
|
||||
|
||||
return lines
|
||||
|
||||
def _styled(self, text: str, style: str) -> str:
|
||||
"""Apply styling to text with rich fallback."""
|
||||
if self.use_color and RICH_AVAILABLE and self.console:
|
||||
styled_text = Text(text)
|
||||
styled_text.style = style
|
||||
with self.console.capture() as capture:
|
||||
self.console.print(styled_text, end="")
|
||||
return capture.get()
|
||||
else:
|
||||
return text
|
||||
18
src/apm_cli/primitives/__init__.py
Normal file
18
src/apm_cli/primitives/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Primitives package for APM CLI - discovery and parsing of APM context."""
|
||||
|
||||
from .models import Chatmode, Instruction, Context, PrimitiveCollection, PrimitiveConflict
|
||||
from .discovery import discover_primitives, find_primitive_files, discover_primitives_with_dependencies
|
||||
from .parser import parse_primitive_file, validate_primitive
|
||||
|
||||
__all__ = [
|
||||
'Chatmode',
|
||||
'Instruction',
|
||||
'Context',
|
||||
'PrimitiveCollection',
|
||||
'PrimitiveConflict',
|
||||
'discover_primitives',
|
||||
'discover_primitives_with_dependencies',
|
||||
'find_primitive_files',
|
||||
'parse_primitive_file',
|
||||
'validate_primitive'
|
||||
]
|
||||
316
src/apm_cli/primitives/discovery.py
Normal file
316
src/apm_cli/primitives/discovery.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""Discovery functionality for primitive files."""
|
||||
|
||||
import os
|
||||
import glob
|
||||
from pathlib import Path
|
||||
from typing import List, Dict
|
||||
|
||||
from .models import PrimitiveCollection
|
||||
from .parser import parse_primitive_file
|
||||
from ..models.apm_package import APMPackage
|
||||
|
||||
|
||||
# Common primitive patterns for local discovery (with recursive search)
|
||||
LOCAL_PRIMITIVE_PATTERNS: Dict[str, List[str]] = {
|
||||
'chatmode': [
|
||||
"**/.apm/chatmodes/*.chatmode.md",
|
||||
"**/.github/chatmodes/*.chatmode.md",
|
||||
"**/*.chatmode.md" # Generic .chatmode.md files
|
||||
],
|
||||
'instruction': [
|
||||
"**/.apm/instructions/*.instructions.md",
|
||||
"**/.github/instructions/*.instructions.md",
|
||||
"**/*.instructions.md" # Generic .instructions.md files
|
||||
],
|
||||
'context': [
|
||||
"**/.apm/context/*.context.md",
|
||||
"**/.apm/memory/*.memory.md", # APM memory convention
|
||||
"**/.github/context/*.context.md",
|
||||
"**/.github/memory/*.memory.md", # VSCode compatibility
|
||||
"**/*.context.md", # Generic .context.md files
|
||||
"**/*.memory.md" # Generic .memory.md files
|
||||
]
|
||||
}
|
||||
|
||||
# Dependency primitive patterns (for .apm directory within dependencies)
|
||||
DEPENDENCY_PRIMITIVE_PATTERNS: Dict[str, List[str]] = {
|
||||
'chatmode': ["chatmodes/*.chatmode.md"],
|
||||
'instruction': ["instructions/*.instructions.md"],
|
||||
'context': [
|
||||
"context/*.context.md",
|
||||
"memory/*.memory.md"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def discover_primitives(base_dir: str = ".") -> PrimitiveCollection:
|
||||
"""Find all APM primitive files in the project.
|
||||
|
||||
Searches for .chatmode.md, .instructions.md, .context.md, and .memory.md files
|
||||
in both .apm/ and .github/ directory structures.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory to search in. Defaults to current directory.
|
||||
|
||||
Returns:
|
||||
PrimitiveCollection: Collection of discovered and parsed primitives.
|
||||
"""
|
||||
collection = PrimitiveCollection()
|
||||
|
||||
# Find and parse files for each primitive type
|
||||
for primitive_type, patterns in LOCAL_PRIMITIVE_PATTERNS.items():
|
||||
files = find_primitive_files(base_dir, patterns)
|
||||
|
||||
for file_path in files:
|
||||
try:
|
||||
primitive = parse_primitive_file(file_path, source="local")
|
||||
collection.add_primitive(primitive)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse {file_path}: {e}")
|
||||
|
||||
return collection
|
||||
|
||||
|
||||
def discover_primitives_with_dependencies(base_dir: str = ".") -> PrimitiveCollection:
|
||||
"""Enhanced primitive discovery including dependency sources.
|
||||
|
||||
Priority Order:
|
||||
1. Local .apm/ (highest priority - always wins)
|
||||
2. Dependencies in declaration order (first declared wins)
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory to search in. Defaults to current directory.
|
||||
|
||||
Returns:
|
||||
PrimitiveCollection: Collection of discovered and parsed primitives with source tracking.
|
||||
"""
|
||||
collection = PrimitiveCollection()
|
||||
|
||||
# Phase 1: Local primitives (highest priority)
|
||||
scan_local_primitives(base_dir, collection)
|
||||
|
||||
# Phase 2: Dependency primitives (lower priority, with conflict detection)
|
||||
scan_dependency_primitives(base_dir, collection)
|
||||
|
||||
return collection
|
||||
|
||||
|
||||
def scan_local_primitives(base_dir: str, collection: PrimitiveCollection) -> None:
|
||||
"""Scan local .apm/ directory for primitives.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory to search in.
|
||||
collection (PrimitiveCollection): Collection to add primitives to.
|
||||
"""
|
||||
# Find and parse files for each primitive type
|
||||
for primitive_type, patterns in LOCAL_PRIMITIVE_PATTERNS.items():
|
||||
files = find_primitive_files(base_dir, patterns)
|
||||
|
||||
# Filter out files from apm_modules to avoid conflicts with dependency scanning
|
||||
local_files = []
|
||||
base_path = Path(base_dir)
|
||||
apm_modules_path = base_path / "apm_modules"
|
||||
|
||||
for file_path in files:
|
||||
# Only include files that are NOT in apm_modules directory
|
||||
if not _is_under_directory(file_path, apm_modules_path):
|
||||
local_files.append(file_path)
|
||||
|
||||
for file_path in local_files:
|
||||
try:
|
||||
primitive = parse_primitive_file(file_path, source="local")
|
||||
collection.add_primitive(primitive)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse local primitive {file_path}: {e}")
|
||||
|
||||
|
||||
def _is_under_directory(file_path: Path, directory: Path) -> bool:
|
||||
"""Check if a file path is under a specific directory.
|
||||
|
||||
Args:
|
||||
file_path (Path): Path to check.
|
||||
directory (Path): Directory to check against.
|
||||
|
||||
Returns:
|
||||
bool: True if file_path is under directory, False otherwise.
|
||||
"""
|
||||
try:
|
||||
file_path.resolve().relative_to(directory.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def scan_dependency_primitives(base_dir: str, collection: PrimitiveCollection) -> None:
|
||||
"""Scan all dependencies in apm_modules/ with priority handling.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory to search in.
|
||||
collection (PrimitiveCollection): Collection to add primitives to.
|
||||
"""
|
||||
apm_modules_path = Path(base_dir) / "apm_modules"
|
||||
if not apm_modules_path.exists():
|
||||
return
|
||||
|
||||
# Get dependency declaration order from apm.yml
|
||||
dependency_order = get_dependency_declaration_order(base_dir)
|
||||
|
||||
# Process dependencies in declaration order
|
||||
for dep_name in dependency_order:
|
||||
# Handle org-namespaced structure (e.g., "github/design-guidelines")
|
||||
if "/" in dep_name:
|
||||
org_name, repo_name = dep_name.split("/", 1)
|
||||
dep_path = apm_modules_path / org_name / repo_name
|
||||
else:
|
||||
# Fallback for non-namespaced dependencies
|
||||
dep_path = apm_modules_path / dep_name
|
||||
|
||||
if dep_path.exists() and dep_path.is_dir():
|
||||
scan_directory_with_source(dep_path, collection, source=f"dependency:{dep_name}")
|
||||
|
||||
|
||||
def get_dependency_declaration_order(base_dir: str) -> List[str]:
|
||||
"""Get APM dependency names in their declaration order from apm.yml.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory containing apm.yml.
|
||||
|
||||
Returns:
|
||||
List[str]: List of dependency names in declaration order.
|
||||
"""
|
||||
try:
|
||||
apm_yml_path = Path(base_dir) / "apm.yml"
|
||||
if not apm_yml_path.exists():
|
||||
return []
|
||||
|
||||
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||
apm_dependencies = package.get_apm_dependencies()
|
||||
|
||||
# Extract package names from dependency references
|
||||
# Use alias if provided, otherwise use full org/repo path for org-namespaced structure
|
||||
dependency_names = []
|
||||
for dep in apm_dependencies:
|
||||
if dep.alias:
|
||||
dependency_names.append(dep.alias)
|
||||
else:
|
||||
# Use full org/repo path (e.g., "github/design-guidelines")
|
||||
# This matches our org-namespaced directory structure
|
||||
dependency_names.append(dep.repo_url)
|
||||
|
||||
return dependency_names
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse dependency order from apm.yml: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def scan_directory_with_source(directory: Path, collection: PrimitiveCollection, source: str) -> None:
|
||||
"""Scan a directory for primitives with a specific source tag.
|
||||
|
||||
Args:
|
||||
directory (Path): Directory to scan (e.g., apm_modules/package_name).
|
||||
collection (PrimitiveCollection): Collection to add primitives to.
|
||||
source (str): Source identifier for discovered primitives.
|
||||
"""
|
||||
# Look for .apm directory within the dependency
|
||||
apm_dir = directory / ".apm"
|
||||
if not apm_dir.exists():
|
||||
return
|
||||
|
||||
# Find and parse files for each primitive type
|
||||
for primitive_type, patterns in DEPENDENCY_PRIMITIVE_PATTERNS.items():
|
||||
for pattern in patterns:
|
||||
full_pattern = str(apm_dir / pattern)
|
||||
matching_files = glob.glob(full_pattern, recursive=True)
|
||||
|
||||
for file_path_str in matching_files:
|
||||
file_path = Path(file_path_str)
|
||||
if file_path.is_file() and _is_readable(file_path):
|
||||
try:
|
||||
primitive = parse_primitive_file(file_path, source=source)
|
||||
collection.add_primitive(primitive)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse dependency primitive {file_path}: {e}")
|
||||
|
||||
|
||||
def find_primitive_files(base_dir: str, patterns: List[str]) -> List[Path]:
|
||||
"""Find primitive files matching the given patterns.
|
||||
|
||||
Args:
|
||||
base_dir (str): Base directory to search in.
|
||||
patterns (List[str]): List of glob patterns to match.
|
||||
|
||||
Returns:
|
||||
List[Path]: List of unique file paths found.
|
||||
"""
|
||||
if not os.path.isdir(base_dir):
|
||||
return []
|
||||
|
||||
all_files = []
|
||||
|
||||
for pattern in patterns:
|
||||
# Use glob to find files matching the pattern
|
||||
matching_files = glob.glob(os.path.join(base_dir, pattern), recursive=True)
|
||||
all_files.extend(matching_files)
|
||||
|
||||
# Remove duplicates while preserving order and convert to Path objects
|
||||
seen = set()
|
||||
unique_files = []
|
||||
|
||||
for file_path in all_files:
|
||||
abs_path = os.path.abspath(file_path)
|
||||
if abs_path not in seen:
|
||||
seen.add(abs_path)
|
||||
unique_files.append(Path(abs_path))
|
||||
|
||||
# Filter out directories and ensure files are readable
|
||||
valid_files = []
|
||||
for file_path in unique_files:
|
||||
if file_path.is_file() and _is_readable(file_path):
|
||||
valid_files.append(file_path)
|
||||
|
||||
return valid_files
|
||||
|
||||
|
||||
def _is_readable(file_path: Path) -> bool:
|
||||
"""Check if a file is readable.
|
||||
|
||||
Args:
|
||||
file_path (Path): Path to check.
|
||||
|
||||
Returns:
|
||||
bool: True if file is readable, False otherwise.
|
||||
"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
# Try to read first few bytes to verify it's readable
|
||||
f.read(1)
|
||||
return True
|
||||
except (PermissionError, UnicodeDecodeError, OSError):
|
||||
return False
|
||||
|
||||
|
||||
def _should_skip_directory(dir_path: str) -> bool:
|
||||
"""Check if a directory should be skipped during scanning.
|
||||
|
||||
Args:
|
||||
dir_path (str): Directory path to check.
|
||||
|
||||
Returns:
|
||||
bool: True if directory should be skipped, False otherwise.
|
||||
"""
|
||||
skip_patterns = {
|
||||
'.git',
|
||||
'node_modules',
|
||||
'__pycache__',
|
||||
'.pytest_cache',
|
||||
'.venv',
|
||||
'venv',
|
||||
'.tox',
|
||||
'build',
|
||||
'dist',
|
||||
'.mypy_cache'
|
||||
}
|
||||
|
||||
dir_name = os.path.basename(dir_path)
|
||||
return dir_name in skip_patterns
|
||||
212
src/apm_cli/primitives/models.py
Normal file
212
src/apm_cli/primitives/models.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""Data models for APM context."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Union, Dict
|
||||
|
||||
|
||||
@dataclass
|
||||
class Chatmode:
|
||||
"""Represents a chatmode primitive."""
|
||||
name: str
|
||||
file_path: Path
|
||||
description: str
|
||||
apply_to: Optional[str] # Glob pattern for file targeting (optional for chatmodes)
|
||||
content: str
|
||||
author: Optional[str] = None
|
||||
version: Optional[str] = None
|
||||
source: Optional[str] = None # Source of primitive: "local" or "dependency:{package_name}"
|
||||
|
||||
def validate(self) -> List[str]:
|
||||
"""Validate chatmode structure.
|
||||
|
||||
Returns:
|
||||
List[str]: List of validation errors.
|
||||
"""
|
||||
errors = []
|
||||
if not self.description:
|
||||
errors.append("Missing 'description' in frontmatter")
|
||||
if not self.content.strip():
|
||||
errors.append("Empty content")
|
||||
return errors
|
||||
|
||||
|
||||
@dataclass
|
||||
class Instruction:
|
||||
"""Represents an instruction primitive."""
|
||||
name: str
|
||||
file_path: Path
|
||||
description: str
|
||||
apply_to: str # Glob pattern for file targeting (required for instructions)
|
||||
content: str
|
||||
author: Optional[str] = None
|
||||
version: Optional[str] = None
|
||||
source: Optional[str] = None # Source of primitive: "local" or "dependency:{package_name}"
|
||||
|
||||
def validate(self) -> List[str]:
|
||||
"""Validate instruction structure.
|
||||
|
||||
Returns:
|
||||
List[str]: List of validation errors.
|
||||
"""
|
||||
errors = []
|
||||
if not self.description:
|
||||
errors.append("Missing 'description' in frontmatter")
|
||||
if not self.apply_to:
|
||||
errors.append("Missing 'applyTo' in frontmatter (required for instructions)")
|
||||
if not self.content.strip():
|
||||
errors.append("Empty content")
|
||||
return errors
|
||||
|
||||
|
||||
@dataclass
|
||||
class Context:
|
||||
"""Represents a context primitive."""
|
||||
name: str
|
||||
file_path: Path
|
||||
content: str
|
||||
description: Optional[str] = None
|
||||
author: Optional[str] = None
|
||||
version: Optional[str] = None
|
||||
source: Optional[str] = None # Source of primitive: "local" or "dependency:{package_name}"
|
||||
|
||||
def validate(self) -> List[str]:
|
||||
"""Validate context structure.
|
||||
|
||||
Returns:
|
||||
List[str]: List of validation errors.
|
||||
"""
|
||||
errors = []
|
||||
if not self.content.strip():
|
||||
errors.append("Empty content")
|
||||
return errors
|
||||
|
||||
|
||||
# Union type for all primitive types
|
||||
Primitive = Union[Chatmode, Instruction, Context]
|
||||
|
||||
|
||||
@dataclass
|
||||
class PrimitiveConflict:
|
||||
"""Represents a conflict between primitives from different sources."""
|
||||
primitive_name: str
|
||||
primitive_type: str # 'chatmode', 'instruction', 'context'
|
||||
winning_source: str # Source that won the conflict
|
||||
losing_sources: List[str] # Sources that lost the conflict
|
||||
file_path: Path # Path of the winning primitive
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the conflict."""
|
||||
losing_list = ", ".join(self.losing_sources)
|
||||
return f"{self.primitive_type} '{self.primitive_name}': {self.winning_source} overrides {losing_list}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class PrimitiveCollection:
|
||||
"""Collection of discovered primitives."""
|
||||
chatmodes: List[Chatmode]
|
||||
instructions: List[Instruction]
|
||||
contexts: List[Context]
|
||||
conflicts: List[PrimitiveConflict] # Track conflicts during discovery
|
||||
|
||||
def __init__(self):
|
||||
self.chatmodes = []
|
||||
self.instructions = []
|
||||
self.contexts = []
|
||||
self.conflicts = []
|
||||
|
||||
def add_primitive(self, primitive: Primitive) -> None:
|
||||
"""Add a primitive to the appropriate collection.
|
||||
|
||||
If a primitive with the same name already exists, the new primitive
|
||||
will only be added if it has higher priority (lower priority primitives
|
||||
are tracked as conflicts).
|
||||
"""
|
||||
if isinstance(primitive, Chatmode):
|
||||
self._add_with_conflict_detection(primitive, self.chatmodes, "chatmode")
|
||||
elif isinstance(primitive, Instruction):
|
||||
self._add_with_conflict_detection(primitive, self.instructions, "instruction")
|
||||
elif isinstance(primitive, Context):
|
||||
self._add_with_conflict_detection(primitive, self.contexts, "context")
|
||||
else:
|
||||
raise ValueError(f"Unknown primitive type: {type(primitive)}")
|
||||
|
||||
def _add_with_conflict_detection(self, new_primitive: Primitive, collection: List[Primitive], primitive_type: str) -> None:
|
||||
"""Add primitive with conflict detection."""
|
||||
# Find existing primitive with same name
|
||||
existing_index = None
|
||||
for i, existing in enumerate(collection):
|
||||
if existing.name == new_primitive.name:
|
||||
existing_index = i
|
||||
break
|
||||
|
||||
if existing_index is None:
|
||||
# No conflict, just add the primitive
|
||||
collection.append(new_primitive)
|
||||
else:
|
||||
# Conflict detected - apply priority rules
|
||||
existing = collection[existing_index]
|
||||
|
||||
# Priority rules:
|
||||
# 1. Local always wins over dependency
|
||||
# 2. Earlier dependency wins over later dependency
|
||||
should_replace = self._should_replace_primitive(existing, new_primitive)
|
||||
|
||||
if should_replace:
|
||||
# Replace existing with new primitive and record conflict
|
||||
conflict = PrimitiveConflict(
|
||||
primitive_name=new_primitive.name,
|
||||
primitive_type=primitive_type,
|
||||
winning_source=new_primitive.source or "unknown",
|
||||
losing_sources=[existing.source or "unknown"],
|
||||
file_path=new_primitive.file_path
|
||||
)
|
||||
self.conflicts.append(conflict)
|
||||
collection[existing_index] = new_primitive
|
||||
else:
|
||||
# Keep existing and record that new primitive was ignored
|
||||
conflict = PrimitiveConflict(
|
||||
primitive_name=existing.name,
|
||||
primitive_type=primitive_type,
|
||||
winning_source=existing.source or "unknown",
|
||||
losing_sources=[new_primitive.source or "unknown"],
|
||||
file_path=existing.file_path
|
||||
)
|
||||
self.conflicts.append(conflict)
|
||||
# Don't add new_primitive to collection
|
||||
|
||||
def _should_replace_primitive(self, existing: Primitive, new: Primitive) -> bool:
|
||||
"""Determine if new primitive should replace existing based on priority."""
|
||||
existing_source = existing.source or "unknown"
|
||||
new_source = new.source or "unknown"
|
||||
|
||||
# Local always wins
|
||||
if existing_source == "local":
|
||||
return False # Never replace local
|
||||
if new_source == "local":
|
||||
return True # Always replace with local
|
||||
|
||||
# Both are dependencies - this shouldn't happen in correct usage
|
||||
# since dependencies should be processed in order, but handle gracefully
|
||||
return False # Keep first dependency (existing)
|
||||
|
||||
def all_primitives(self) -> List[Primitive]:
|
||||
"""Get all primitives as a single list."""
|
||||
return self.chatmodes + self.instructions + self.contexts
|
||||
|
||||
def count(self) -> int:
|
||||
"""Get total count of all primitives."""
|
||||
return len(self.chatmodes) + len(self.instructions) + len(self.contexts)
|
||||
|
||||
def has_conflicts(self) -> bool:
|
||||
"""Check if any conflicts were detected during discovery."""
|
||||
return len(self.conflicts) > 0
|
||||
|
||||
def get_conflicts_by_type(self, primitive_type: str) -> List[PrimitiveConflict]:
|
||||
"""Get conflicts for a specific primitive type."""
|
||||
return [c for c in self.conflicts if c.primitive_type == primitive_type]
|
||||
|
||||
def get_primitives_by_source(self, source: str) -> List[Primitive]:
|
||||
"""Get all primitives from a specific source."""
|
||||
all_primitives = self.all_primitives()
|
||||
return [p for p in all_primitives if p.source == source]
|
||||
204
src/apm_cli/primitives/parser.py
Normal file
204
src/apm_cli/primitives/parser.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""Parser for primitive definition files."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Union, List
|
||||
import frontmatter
|
||||
|
||||
from .models import Chatmode, Instruction, Context, Primitive
|
||||
|
||||
|
||||
def parse_primitive_file(file_path: Union[str, Path], source: str = None) -> Primitive:
|
||||
"""Parse a primitive file.
|
||||
|
||||
Determines the primitive type based on file extension and parses accordingly.
|
||||
|
||||
Args:
|
||||
file_path (Union[str, Path]): Path to the primitive file.
|
||||
source (str, optional): Source identifier for the primitive (e.g., "local", "dependency:package_name").
|
||||
|
||||
Returns:
|
||||
Primitive: Parsed primitive (Chatmode, Instruction, or Context).
|
||||
|
||||
Raises:
|
||||
ValueError: If file cannot be parsed or has invalid format.
|
||||
"""
|
||||
file_path = Path(file_path)
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
post = frontmatter.load(f)
|
||||
|
||||
# Extract name based on file structure
|
||||
name = _extract_primitive_name(file_path)
|
||||
metadata = post.metadata
|
||||
content = post.content
|
||||
|
||||
# Determine primitive type based on file extension
|
||||
if file_path.name.endswith('.chatmode.md'):
|
||||
return _parse_chatmode(name, file_path, metadata, content, source)
|
||||
elif file_path.name.endswith('.instructions.md'):
|
||||
return _parse_instruction(name, file_path, metadata, content, source)
|
||||
elif file_path.name.endswith('.context.md') or file_path.name.endswith('.memory.md') or _is_context_file(file_path):
|
||||
return _parse_context(name, file_path, metadata, content, source)
|
||||
else:
|
||||
raise ValueError(f"Unknown primitive file type: {file_path}")
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to parse primitive file {file_path}: {e}")
|
||||
|
||||
|
||||
def _parse_chatmode(name: str, file_path: Path, metadata: dict, content: str, source: str = None) -> Chatmode:
|
||||
"""Parse a chatmode primitive.
|
||||
|
||||
Args:
|
||||
name (str): Name of the chatmode.
|
||||
file_path (Path): Path to the file.
|
||||
metadata (dict): Metadata from frontmatter.
|
||||
content (str): Content of the file.
|
||||
source (str, optional): Source identifier for the primitive.
|
||||
|
||||
Returns:
|
||||
Chatmode: Parsed chatmode primitive.
|
||||
"""
|
||||
return Chatmode(
|
||||
name=name,
|
||||
file_path=file_path,
|
||||
description=metadata.get('description', ''),
|
||||
apply_to=metadata.get('applyTo'), # Optional for chatmodes
|
||||
content=content,
|
||||
author=metadata.get('author'),
|
||||
version=metadata.get('version'),
|
||||
source=source
|
||||
)
|
||||
|
||||
|
||||
def _parse_instruction(name: str, file_path: Path, metadata: dict, content: str, source: str = None) -> Instruction:
|
||||
"""Parse an instruction primitive.
|
||||
|
||||
Args:
|
||||
name (str): Name of the instruction.
|
||||
file_path (Path): Path to the file.
|
||||
metadata (dict): Metadata from frontmatter.
|
||||
content (str): Content of the file.
|
||||
source (str, optional): Source identifier for the primitive.
|
||||
|
||||
Returns:
|
||||
Instruction: Parsed instruction primitive.
|
||||
"""
|
||||
return Instruction(
|
||||
name=name,
|
||||
file_path=file_path,
|
||||
description=metadata.get('description', ''),
|
||||
apply_to=metadata.get('applyTo', ''), # Required for instructions
|
||||
content=content,
|
||||
author=metadata.get('author'),
|
||||
version=metadata.get('version'),
|
||||
source=source
|
||||
)
|
||||
|
||||
|
||||
def _parse_context(name: str, file_path: Path, metadata: dict, content: str, source: str = None) -> Context:
|
||||
"""Parse a context primitive.
|
||||
|
||||
Args:
|
||||
name (str): Name of the context.
|
||||
file_path (Path): Path to the file.
|
||||
metadata (dict): Metadata from frontmatter.
|
||||
content (str): Content of the file.
|
||||
source (str, optional): Source identifier for the primitive.
|
||||
|
||||
Returns:
|
||||
Context: Parsed context primitive.
|
||||
"""
|
||||
return Context(
|
||||
name=name,
|
||||
file_path=file_path,
|
||||
content=content,
|
||||
description=metadata.get('description'), # Optional for contexts
|
||||
author=metadata.get('author'),
|
||||
version=metadata.get('version'),
|
||||
source=source
|
||||
)
|
||||
|
||||
|
||||
def _extract_primitive_name(file_path: Path) -> str:
|
||||
"""Extract primitive name from file path based on naming conventions.
|
||||
|
||||
Args:
|
||||
file_path (Path): Path to the primitive file.
|
||||
|
||||
Returns:
|
||||
str: Extracted primitive name.
|
||||
"""
|
||||
# Normalize path
|
||||
path_parts = file_path.parts
|
||||
|
||||
# Check if it's in a structured directory (.apm/ or .github/)
|
||||
if '.apm' in path_parts or '.github' in path_parts:
|
||||
try:
|
||||
# Find the base directory index
|
||||
if '.apm' in path_parts:
|
||||
base_idx = path_parts.index('.apm')
|
||||
else:
|
||||
base_idx = path_parts.index('.github')
|
||||
|
||||
# For structured directories like .apm/chatmodes/name.chatmode.md
|
||||
if (base_idx + 2 < len(path_parts) and
|
||||
path_parts[base_idx + 1] in ['chatmodes', 'instructions', 'context', 'memory']):
|
||||
basename = file_path.name
|
||||
# Remove the double extension (.chatmode.md, .instructions.md, etc.)
|
||||
if basename.endswith('.chatmode.md'):
|
||||
return basename.replace('.chatmode.md', '')
|
||||
elif basename.endswith('.instructions.md'):
|
||||
return basename.replace('.instructions.md', '')
|
||||
elif basename.endswith('.context.md'):
|
||||
return basename.replace('.context.md', '')
|
||||
elif basename.endswith('.memory.md'):
|
||||
return basename.replace('.memory.md', '')
|
||||
elif basename.endswith('.md'):
|
||||
return basename.replace('.md', '')
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
|
||||
# Fallback: extract from filename
|
||||
basename = file_path.name
|
||||
if basename.endswith('.chatmode.md'):
|
||||
return basename.replace('.chatmode.md', '')
|
||||
elif basename.endswith('.instructions.md'):
|
||||
return basename.replace('.instructions.md', '')
|
||||
elif basename.endswith('.context.md'):
|
||||
return basename.replace('.context.md', '')
|
||||
elif basename.endswith('.memory.md'):
|
||||
return basename.replace('.memory.md', '')
|
||||
elif basename.endswith('.md'):
|
||||
return basename.replace('.md', '')
|
||||
|
||||
# Final fallback: use filename without extension
|
||||
return file_path.stem
|
||||
|
||||
|
||||
def _is_context_file(file_path: Path) -> bool:
|
||||
"""Check if a file should be treated as a context file based on its directory.
|
||||
|
||||
Args:
|
||||
file_path (Path): Path to check.
|
||||
|
||||
Returns:
|
||||
bool: True if file is in .apm/memory/ or .github/memory/ directory.
|
||||
"""
|
||||
# Only files directly under .apm/memory/ or .github/memory/ are considered context files here
|
||||
parent_parts = file_path.parent.parts[-2:] # Get last two parts of parent path
|
||||
return parent_parts in [('.apm', 'memory'), ('.github', 'memory')]
|
||||
|
||||
|
||||
def validate_primitive(primitive: Primitive) -> List[str]:
|
||||
"""Validate a primitive and return any errors.
|
||||
|
||||
Args:
|
||||
primitive (Primitive): Primitive to validate.
|
||||
|
||||
Returns:
|
||||
List[str]: List of validation errors.
|
||||
"""
|
||||
return primitive.validate()
|
||||
7
src/apm_cli/registry/__init__.py
Normal file
7
src/apm_cli/registry/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""MCP Registry module for APM-CLI."""
|
||||
|
||||
from .client import SimpleRegistryClient
|
||||
from .integration import RegistryIntegration
|
||||
from .operations import MCPServerOperations
|
||||
|
||||
__all__ = ["SimpleRegistryClient", "RegistryIntegration", "MCPServerOperations"]
|
||||
253
src/apm_cli/registry/client.py
Normal file
253
src/apm_cli/registry/client.py
Normal file
@@ -0,0 +1,253 @@
|
||||
"""Simple MCP Registry client for server discovery."""
|
||||
|
||||
import os
|
||||
import requests
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
|
||||
|
||||
class SimpleRegistryClient:
|
||||
"""Simple client for querying MCP registries for server discovery."""
|
||||
|
||||
def __init__(self, registry_url: Optional[str] = None):
|
||||
"""Initialize the registry client.
|
||||
|
||||
Args:
|
||||
registry_url (str, optional): URL of the MCP registry.
|
||||
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||
or falls back to the default demo registry.
|
||||
"""
|
||||
self.registry_url = registry_url or os.environ.get(
|
||||
"MCP_REGISTRY_URL", "https://api.mcp.github.com"
|
||||
)
|
||||
self.session = requests.Session()
|
||||
|
||||
def list_servers(self, limit: int = 100, cursor: Optional[str] = None) -> Tuple[List[Dict[str, Any]], Optional[str]]:
|
||||
"""List all available servers in the registry.
|
||||
|
||||
Args:
|
||||
limit (int, optional): Maximum number of entries to return. Defaults to 100.
|
||||
cursor (str, optional): Pagination cursor for retrieving next set of results.
|
||||
|
||||
Returns:
|
||||
Tuple[List[Dict[str, Any]], Optional[str]]: List of server metadata dictionaries and the next cursor if available.
|
||||
|
||||
Raises:
|
||||
requests.RequestException: If the request fails.
|
||||
"""
|
||||
url = f"{self.registry_url}/v0/servers"
|
||||
params = {}
|
||||
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
if cursor is not None:
|
||||
params['cursor'] = cursor
|
||||
|
||||
response = self.session.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# Extract servers - they're nested under "server" key in each item
|
||||
raw_servers = data.get("servers", [])
|
||||
servers = []
|
||||
for item in raw_servers:
|
||||
if "server" in item:
|
||||
servers.append(item["server"])
|
||||
else:
|
||||
servers.append(item) # Fallback for different structure
|
||||
|
||||
metadata = data.get("metadata", {})
|
||||
next_cursor = metadata.get("next_cursor")
|
||||
|
||||
return servers, next_cursor
|
||||
|
||||
def search_servers(self, query: str) -> List[Dict[str, Any]]:
|
||||
"""Search for servers in the registry using the API search endpoint.
|
||||
|
||||
Args:
|
||||
query (str): Search query string.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of matching server metadata dictionaries.
|
||||
|
||||
Raises:
|
||||
requests.RequestException: If the request fails.
|
||||
"""
|
||||
# The MCP Registry API now only accepts repository names (e.g., "github-mcp-server")
|
||||
# If the query looks like a full identifier (e.g., "io.github.github/github-mcp-server"),
|
||||
# extract the repository name for the search
|
||||
search_query = self._extract_repository_name(query)
|
||||
|
||||
url = f"{self.registry_url}/v0/servers/search"
|
||||
params = {'q': search_query}
|
||||
|
||||
response = self.session.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# Extract servers - they're nested under "server" key in each item
|
||||
raw_servers = data.get("servers", [])
|
||||
servers = []
|
||||
for item in raw_servers:
|
||||
if "server" in item:
|
||||
servers.append(item["server"])
|
||||
else:
|
||||
servers.append(item) # Fallback for different structure
|
||||
|
||||
return servers
|
||||
|
||||
def get_server_info(self, server_id: str) -> Dict[str, Any]:
|
||||
"""Get detailed information about a specific server.
|
||||
|
||||
Args:
|
||||
server_id (str): ID of the server.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Server metadata dictionary.
|
||||
|
||||
Raises:
|
||||
requests.RequestException: If the request fails.
|
||||
ValueError: If the server is not found.
|
||||
"""
|
||||
url = f"{self.registry_url}/v0/servers/{server_id}"
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# Return the complete response including x-github and other metadata
|
||||
# but ensure the main server info is accessible at the top level
|
||||
if "server" in data:
|
||||
# Merge server info to top level while preserving x-github and other sections
|
||||
result = data["server"].copy()
|
||||
for key, value in data.items():
|
||||
if key != "server":
|
||||
result[key] = value
|
||||
|
||||
if not result:
|
||||
raise ValueError(f"Server '{server_id}' not found in registry")
|
||||
|
||||
return result
|
||||
else:
|
||||
if not data:
|
||||
raise ValueError(f"Server '{server_id}' not found in registry")
|
||||
return data
|
||||
|
||||
def get_server_by_name(self, name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Find a server by its name using the search API.
|
||||
|
||||
Args:
|
||||
name (str): Name of the server to find.
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: Server metadata dictionary or None if not found.
|
||||
|
||||
Raises:
|
||||
requests.RequestException: If the request fails.
|
||||
"""
|
||||
# Use search API to find by name - more efficient than listing all servers
|
||||
try:
|
||||
search_results = self.search_servers(name)
|
||||
|
||||
# Look for an exact match in search results
|
||||
for server in search_results:
|
||||
if server.get("name") == name:
|
||||
return self.get_server_info(server["id"])
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
||||
def find_server_by_reference(self, reference: str) -> Optional[Dict[str, Any]]:
|
||||
"""Find a server by exact name match or server ID.
|
||||
|
||||
This is an efficient lookup that uses the search API:
|
||||
1. Server ID (UUID format) - direct API call
|
||||
2. Server name - search API for exact match (automatically handles identifier extraction)
|
||||
|
||||
Args:
|
||||
reference (str): Server reference (ID or exact name).
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: Server metadata dictionary or None if not found.
|
||||
|
||||
Raises:
|
||||
requests.RequestException: If the request fails.
|
||||
"""
|
||||
# Strategy 1: Try as server ID first (direct lookup)
|
||||
try:
|
||||
# Check if it looks like a UUID (contains hyphens and is 36 chars)
|
||||
if len(reference) == 36 and reference.count('-') == 4:
|
||||
return self.get_server_info(reference)
|
||||
except (ValueError, Exception):
|
||||
pass
|
||||
|
||||
# Strategy 2: Use search API to find by name
|
||||
# search_servers now handles extracting repository names internally
|
||||
try:
|
||||
search_results = self.search_servers(reference)
|
||||
|
||||
# Look for matches in search results - check both exact reference match
|
||||
# and the server name from the registry
|
||||
for server in search_results:
|
||||
server_name = server.get("name", "")
|
||||
# Check exact match with original reference
|
||||
if server_name == reference:
|
||||
return self.get_server_info(server["id"])
|
||||
# Check match with common identifier patterns
|
||||
if self._is_server_match(reference, server_name):
|
||||
return self.get_server_info(server["id"])
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If not found by ID or exact name, server is not in registry
|
||||
return None
|
||||
|
||||
def _extract_repository_name(self, reference: str) -> str:
|
||||
"""Extract the repository name from various identifier formats.
|
||||
|
||||
This method handles various naming patterns by extracting the part after
|
||||
the last slash, which typically represents the actual server/repository name.
|
||||
|
||||
Examples:
|
||||
- "io.github.github/github-mcp-server" -> "github-mcp-server"
|
||||
- "abc.dllde.io/some-server" -> "some-server"
|
||||
- "adb.ok/another-server" -> "another-server"
|
||||
- "github/github-mcp-server" -> "github-mcp-server"
|
||||
- "github-mcp-server" -> "github-mcp-server"
|
||||
|
||||
Args:
|
||||
reference (str): Server reference in various formats.
|
||||
|
||||
Returns:
|
||||
str: Repository name suitable for API search.
|
||||
"""
|
||||
# If there's a slash, extract the part after the last slash
|
||||
# This works for any pattern like domain.tld/server, owner/repo, etc.
|
||||
if "/" in reference:
|
||||
return reference.split("/")[-1]
|
||||
|
||||
# Already a simple repo name
|
||||
return reference
|
||||
|
||||
def _is_server_match(self, reference: str, server_name: str) -> bool:
|
||||
"""Check if a reference matches a server name using common patterns.
|
||||
|
||||
Args:
|
||||
reference (str): Original reference from user.
|
||||
server_name (str): Server name from registry.
|
||||
|
||||
Returns:
|
||||
bool: True if they represent the same server.
|
||||
"""
|
||||
# Direct match
|
||||
if reference == server_name:
|
||||
return True
|
||||
|
||||
# Extract repo names and compare
|
||||
ref_repo = self._extract_repository_name(reference)
|
||||
server_repo = self._extract_repository_name(server_name)
|
||||
|
||||
return ref_repo == server_repo
|
||||
157
src/apm_cli/registry/integration.py
Normal file
157
src/apm_cli/registry/integration.py
Normal file
@@ -0,0 +1,157 @@
|
||||
"""Integration module for connecting registry client with package manager."""
|
||||
|
||||
import requests
|
||||
from typing import Dict, List, Any, Optional
|
||||
from .client import SimpleRegistryClient
|
||||
|
||||
|
||||
class RegistryIntegration:
|
||||
"""Integration class for connecting registry discovery to package manager."""
|
||||
|
||||
def __init__(self, registry_url: Optional[str] = None):
|
||||
"""Initialize the registry integration.
|
||||
|
||||
Args:
|
||||
registry_url (str, optional): URL of the MCP registry.
|
||||
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||
or falls back to the default demo registry.
|
||||
"""
|
||||
self.client = SimpleRegistryClient(registry_url)
|
||||
|
||||
def list_available_packages(self) -> List[Dict[str, Any]]:
|
||||
"""List all available packages in the registry.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of package metadata dictionaries.
|
||||
"""
|
||||
servers, _ = self.client.list_servers()
|
||||
# Transform server data to package format for backward compatibility
|
||||
return [self._server_to_package(server) for server in servers]
|
||||
|
||||
def search_packages(self, query: str) -> List[Dict[str, Any]]:
|
||||
"""Search for packages in the registry.
|
||||
|
||||
Args:
|
||||
query (str): Search query string.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of matching package metadata dictionaries.
|
||||
"""
|
||||
servers = self.client.search_servers(query)
|
||||
# Transform server data to package format for backward compatibility
|
||||
return [self._server_to_package(server) for server in servers]
|
||||
|
||||
def get_package_info(self, name: str) -> Dict[str, Any]:
|
||||
"""Get detailed information about a specific package.
|
||||
|
||||
Args:
|
||||
name (str): Name of the package.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Package metadata dictionary.
|
||||
|
||||
Raises:
|
||||
ValueError: If the package is not found.
|
||||
"""
|
||||
# Use find_server_by_reference which handles all identifier formats:
|
||||
# - UUIDs (direct lookup)
|
||||
# - Full identifiers like "io.github.github/github-mcp-server"
|
||||
# - Registry names like "github/github-mcp-server"
|
||||
# - Simple names like "github-mcp-server"
|
||||
server_info = self.client.find_server_by_reference(name)
|
||||
if not server_info:
|
||||
raise ValueError(f"Package '{name}' not found in registry")
|
||||
return self._server_to_package_detail(server_info)
|
||||
|
||||
def get_latest_version(self, name: str) -> str:
|
||||
"""Get the latest version of a package.
|
||||
|
||||
Args:
|
||||
name (str): Name of the package.
|
||||
|
||||
Returns:
|
||||
str: Latest version string.
|
||||
|
||||
Raises:
|
||||
ValueError: If the package has no versions.
|
||||
"""
|
||||
package_info = self.get_package_info(name)
|
||||
|
||||
# Check for version_detail in server format
|
||||
if "version_detail" in package_info:
|
||||
version_detail = package_info.get("version_detail", {})
|
||||
if version_detail and "version" in version_detail:
|
||||
return version_detail["version"]
|
||||
|
||||
# Check packages list for version information
|
||||
packages = package_info.get("packages", [])
|
||||
if packages:
|
||||
for pkg in packages:
|
||||
if "version" in pkg:
|
||||
return pkg["version"]
|
||||
|
||||
# Fall back to versions list (backward compatibility)
|
||||
versions = package_info.get("versions", [])
|
||||
if versions:
|
||||
return versions[-1].get("version", "latest")
|
||||
|
||||
raise ValueError(f"Package '{name}' has no versions")
|
||||
|
||||
def _server_to_package(self, server: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Convert server data format to package format for compatibility.
|
||||
|
||||
Args:
|
||||
server (Dict[str, Any]): Server data from registry.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Package formatted data.
|
||||
"""
|
||||
package = {
|
||||
"id": server.get("id", ""),
|
||||
"name": server.get("name", "Unknown"),
|
||||
"description": server.get("description", "No description available"),
|
||||
}
|
||||
|
||||
# Add repository information if available
|
||||
if "repository" in server:
|
||||
package["repository"] = server["repository"]
|
||||
|
||||
# Add version information if available
|
||||
if "version_detail" in server:
|
||||
package["version_detail"] = server["version_detail"]
|
||||
|
||||
return package
|
||||
|
||||
def _server_to_package_detail(self, server: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Convert detailed server data to package detail format.
|
||||
|
||||
Args:
|
||||
server (Dict[str, Any]): Server data from registry.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Package detail formatted data.
|
||||
"""
|
||||
# Start with the basic package data
|
||||
package_detail = self._server_to_package(server)
|
||||
|
||||
# Add packages information
|
||||
if "packages" in server:
|
||||
package_detail["packages"] = server["packages"]
|
||||
|
||||
# Add remotes information (crucial for deployment type detection)
|
||||
if "remotes" in server:
|
||||
package_detail["remotes"] = server["remotes"]
|
||||
|
||||
if "package_canonical" in server:
|
||||
package_detail["package_canonical"] = server["package_canonical"]
|
||||
|
||||
# For backward compatibility, create a versions list
|
||||
if "version_detail" in server and server["version_detail"]:
|
||||
version_info = server["version_detail"]
|
||||
package_detail["versions"] = [{
|
||||
"version": version_info.get("version", "latest"),
|
||||
"release_date": version_info.get("release_date", ""),
|
||||
"is_latest": version_info.get("is_latest", True)
|
||||
}]
|
||||
|
||||
return package_detail
|
||||
398
src/apm_cli/registry/operations.py
Normal file
398
src/apm_cli/registry/operations.py
Normal file
@@ -0,0 +1,398 @@
|
||||
"""MCP server operations and installation logic."""
|
||||
|
||||
import os
|
||||
from typing import List, Dict, Set, Optional, Tuple
|
||||
from pathlib import Path
|
||||
|
||||
from .client import SimpleRegistryClient
|
||||
|
||||
|
||||
class MCPServerOperations:
|
||||
"""Handles MCP server operations like conflict detection and installation status."""
|
||||
|
||||
def __init__(self, registry_url: Optional[str] = None):
|
||||
"""Initialize MCP server operations.
|
||||
|
||||
Args:
|
||||
registry_url: Optional registry URL override
|
||||
"""
|
||||
self.registry_client = SimpleRegistryClient(registry_url)
|
||||
|
||||
def check_servers_needing_installation(self, target_runtimes: List[str], server_references: List[str]) -> List[str]:
|
||||
"""Check which MCP servers actually need installation across target runtimes.
|
||||
|
||||
This method checks the actual MCP configuration files to see which servers
|
||||
are already installed by comparing server IDs (UUIDs), not names.
|
||||
|
||||
Args:
|
||||
target_runtimes: List of target runtimes to check
|
||||
server_references: List of MCP server references (names or IDs)
|
||||
|
||||
Returns:
|
||||
List of server references that need installation in at least one runtime
|
||||
"""
|
||||
servers_needing_installation = set()
|
||||
|
||||
# Check each server reference
|
||||
for server_ref in server_references:
|
||||
try:
|
||||
# Get server info from registry to find the canonical ID
|
||||
server_info = self.registry_client.find_server_by_reference(server_ref)
|
||||
|
||||
if not server_info:
|
||||
# Server not found in registry, might be a local/custom server
|
||||
# Add to installation list for safety
|
||||
servers_needing_installation.add(server_ref)
|
||||
continue
|
||||
|
||||
server_id = server_info.get("id")
|
||||
if not server_id:
|
||||
# No ID available, add to installation list
|
||||
servers_needing_installation.add(server_ref)
|
||||
continue
|
||||
|
||||
# Check if this server needs installation in ANY of the target runtimes
|
||||
needs_installation = False
|
||||
for runtime in target_runtimes:
|
||||
runtime_installed_ids = self._get_installed_server_ids([runtime])
|
||||
if server_id not in runtime_installed_ids:
|
||||
needs_installation = True
|
||||
break
|
||||
|
||||
if needs_installation:
|
||||
servers_needing_installation.add(server_ref)
|
||||
|
||||
except Exception as e:
|
||||
# If we can't check the server, assume it needs installation
|
||||
servers_needing_installation.add(server_ref)
|
||||
|
||||
return list(servers_needing_installation)
|
||||
|
||||
def _get_installed_server_ids(self, target_runtimes: List[str]) -> Set[str]:
|
||||
"""Get all installed server IDs across target runtimes.
|
||||
|
||||
Args:
|
||||
target_runtimes: List of runtimes to check
|
||||
|
||||
Returns:
|
||||
Set of server IDs that are currently installed
|
||||
"""
|
||||
installed_ids = set()
|
||||
|
||||
# Import here to avoid circular imports
|
||||
try:
|
||||
from ..factory import ClientFactory
|
||||
except ImportError:
|
||||
return installed_ids
|
||||
|
||||
for runtime in target_runtimes:
|
||||
try:
|
||||
client = ClientFactory.create_client(runtime)
|
||||
config = client.get_current_config()
|
||||
|
||||
if isinstance(config, dict):
|
||||
if runtime == 'copilot':
|
||||
# Copilot stores servers in mcpServers object in mcp-config.json
|
||||
mcp_servers = config.get("mcpServers", {})
|
||||
for server_name, server_config in mcp_servers.items():
|
||||
if isinstance(server_config, dict):
|
||||
server_id = server_config.get("id")
|
||||
if server_id:
|
||||
installed_ids.add(server_id)
|
||||
|
||||
elif runtime == 'codex':
|
||||
# Codex stores servers as mcp_servers.{name} sections in config.toml
|
||||
mcp_servers = config.get("mcp_servers", {})
|
||||
for server_name, server_config in mcp_servers.items():
|
||||
if isinstance(server_config, dict):
|
||||
server_id = server_config.get("id")
|
||||
if server_id:
|
||||
installed_ids.add(server_id)
|
||||
|
||||
elif runtime == 'vscode':
|
||||
# VS Code stores servers in settings.json with different structure
|
||||
# Check both mcpServers and any nested structure
|
||||
mcp_servers = config.get("mcpServers", {})
|
||||
for server_name, server_config in mcp_servers.items():
|
||||
if isinstance(server_config, dict):
|
||||
server_id = (
|
||||
server_config.get("id") or
|
||||
server_config.get("serverId") or
|
||||
server_config.get("server_id")
|
||||
)
|
||||
if server_id:
|
||||
installed_ids.add(server_id)
|
||||
|
||||
except Exception:
|
||||
# If we can't read a runtime's config, skip it
|
||||
continue
|
||||
|
||||
return installed_ids
|
||||
|
||||
def validate_servers_exist(self, server_references: List[str]) -> Tuple[List[str], List[str]]:
|
||||
"""Validate that all servers exist in the registry before attempting installation.
|
||||
|
||||
This implements fail-fast validation similar to npm's behavior.
|
||||
|
||||
Args:
|
||||
server_references: List of MCP server references to validate
|
||||
|
||||
Returns:
|
||||
Tuple of (valid_servers, invalid_servers)
|
||||
"""
|
||||
valid_servers = []
|
||||
invalid_servers = []
|
||||
|
||||
for server_ref in server_references:
|
||||
try:
|
||||
server_info = self.registry_client.find_server_by_reference(server_ref)
|
||||
if server_info:
|
||||
valid_servers.append(server_ref)
|
||||
else:
|
||||
invalid_servers.append(server_ref)
|
||||
except Exception:
|
||||
invalid_servers.append(server_ref)
|
||||
|
||||
return valid_servers, invalid_servers
|
||||
|
||||
def batch_fetch_server_info(self, server_references: List[str]) -> Dict[str, Optional[Dict]]:
|
||||
"""Batch fetch server info for all servers to avoid duplicate registry calls.
|
||||
|
||||
Args:
|
||||
server_references: List of MCP server references
|
||||
|
||||
Returns:
|
||||
Dictionary mapping server reference to server info (or None if not found)
|
||||
"""
|
||||
server_info_cache = {}
|
||||
|
||||
for server_ref in server_references:
|
||||
try:
|
||||
server_info = self.registry_client.find_server_by_reference(server_ref)
|
||||
server_info_cache[server_ref] = server_info
|
||||
except Exception:
|
||||
server_info_cache[server_ref] = None
|
||||
|
||||
return server_info_cache
|
||||
|
||||
def collect_runtime_variables(self, server_references: List[str], server_info_cache: Dict[str, Optional[Dict]] = None) -> Dict[str, str]:
|
||||
"""Collect runtime variables from runtime_arguments.variables fields.
|
||||
|
||||
These are NOT environment variables but CLI argument placeholders that need
|
||||
to be substituted directly into the command arguments (e.g., {ado_org}).
|
||||
|
||||
Args:
|
||||
server_references: List of MCP server references
|
||||
server_info_cache: Pre-fetched server info to avoid duplicate registry calls
|
||||
|
||||
Returns:
|
||||
Dictionary mapping runtime variable names to their values
|
||||
"""
|
||||
all_required_vars = {} # var_name -> {description, required, etc.}
|
||||
|
||||
# Use cached server info if available, otherwise fetch on-demand
|
||||
if server_info_cache is None:
|
||||
server_info_cache = self.batch_fetch_server_info(server_references)
|
||||
|
||||
# Collect all unique runtime variables from runtime_arguments
|
||||
for server_ref in server_references:
|
||||
try:
|
||||
server_info = server_info_cache.get(server_ref)
|
||||
if not server_info:
|
||||
continue
|
||||
|
||||
# Extract runtime variables from runtime_arguments
|
||||
packages = server_info.get("packages", [])
|
||||
for package in packages:
|
||||
if isinstance(package, dict):
|
||||
runtime_arguments = package.get("runtime_arguments", [])
|
||||
for arg in runtime_arguments:
|
||||
if isinstance(arg, dict) and "variables" in arg:
|
||||
variables = arg.get("variables", {})
|
||||
for var_name, var_info in variables.items():
|
||||
if isinstance(var_info, dict):
|
||||
all_required_vars[var_name] = {
|
||||
"description": var_info.get("description", ""),
|
||||
"required": var_info.get("is_required", True)
|
||||
}
|
||||
|
||||
except Exception:
|
||||
# Skip servers we can't analyze
|
||||
continue
|
||||
|
||||
# Prompt user for each runtime variable
|
||||
if all_required_vars:
|
||||
return self._prompt_for_environment_variables(all_required_vars)
|
||||
|
||||
return {}
|
||||
|
||||
def collect_environment_variables(self, server_references: List[str], server_info_cache: Dict[str, Optional[Dict]] = None) -> Dict[str, str]:
|
||||
"""Collect environment variables needed by the specified servers.
|
||||
|
||||
Args:
|
||||
server_references: List of MCP server references
|
||||
server_info_cache: Pre-fetched server info to avoid duplicate registry calls
|
||||
|
||||
Returns:
|
||||
Dictionary mapping environment variable names to their values
|
||||
"""
|
||||
shared_env_vars = {}
|
||||
all_required_vars = {} # var_name -> {description, required, etc.}
|
||||
|
||||
# Use cached server info if available, otherwise fetch on-demand
|
||||
if server_info_cache is None:
|
||||
server_info_cache = self.batch_fetch_server_info(server_references)
|
||||
|
||||
# Collect all unique environment variables needed
|
||||
for server_ref in server_references:
|
||||
try:
|
||||
server_info = server_info_cache.get(server_ref)
|
||||
if not server_info:
|
||||
continue
|
||||
|
||||
# Extract environment variables from Docker args (legacy support)
|
||||
if "docker" in server_info and "args" in server_info["docker"]:
|
||||
docker_args = server_info["docker"]["args"]
|
||||
if isinstance(docker_args, list):
|
||||
for arg in docker_args:
|
||||
if isinstance(arg, str) and arg.startswith("${") and arg.endswith("}"):
|
||||
var_name = arg[2:-1] # Remove ${ and }
|
||||
if var_name not in all_required_vars:
|
||||
all_required_vars[var_name] = {
|
||||
"description": f"Environment variable for {server_info.get('name', server_ref)}",
|
||||
"required": True
|
||||
}
|
||||
|
||||
# Check packages for environment variables (preferred method)
|
||||
packages = server_info.get("packages", [])
|
||||
for package in packages:
|
||||
if isinstance(package, dict):
|
||||
# Try both camelCase and snake_case field names
|
||||
env_vars = package.get("environmentVariables", []) or package.get("environment_variables", [])
|
||||
for env_var in env_vars:
|
||||
if isinstance(env_var, dict) and "name" in env_var:
|
||||
var_name = env_var["name"]
|
||||
all_required_vars[var_name] = {
|
||||
"description": env_var.get("description", ""),
|
||||
"required": env_var.get("required", True)
|
||||
}
|
||||
|
||||
except Exception:
|
||||
# Skip servers we can't analyze
|
||||
continue
|
||||
|
||||
# Prompt user for each environment variable
|
||||
if all_required_vars:
|
||||
shared_env_vars = self._prompt_for_environment_variables(all_required_vars)
|
||||
|
||||
return shared_env_vars
|
||||
|
||||
def _prompt_for_environment_variables(self, required_vars: Dict[str, Dict]) -> Dict[str, str]:
|
||||
"""Prompt user for environment variables.
|
||||
|
||||
Args:
|
||||
required_vars: Dictionary mapping var names to their metadata
|
||||
|
||||
Returns:
|
||||
Dictionary mapping variable names to their values
|
||||
"""
|
||||
env_vars = {}
|
||||
|
||||
# Check if we're in E2E test mode or CI environment - don't prompt interactively
|
||||
is_e2e_tests = os.getenv('APM_E2E_TESTS', '').lower() in ('1', 'true', 'yes')
|
||||
is_ci_environment = any(os.getenv(var) for var in ['CI', 'GITHUB_ACTIONS', 'TRAVIS', 'JENKINS_URL', 'BUILDKITE'])
|
||||
|
||||
if is_e2e_tests or is_ci_environment:
|
||||
# In E2E tests or CI, provide reasonable defaults instead of prompting
|
||||
for var_name in sorted(required_vars.keys()):
|
||||
var_info = required_vars[var_name]
|
||||
existing_value = os.getenv(var_name)
|
||||
|
||||
if existing_value:
|
||||
env_vars[var_name] = existing_value
|
||||
else:
|
||||
# Provide sensible defaults for known variables
|
||||
if var_name == 'GITHUB_DYNAMIC_TOOLSETS':
|
||||
env_vars[var_name] = '1' # Enable dynamic toolsets for GitHub MCP server
|
||||
elif 'token' in var_name.lower() or 'key' in var_name.lower():
|
||||
# For tokens/keys, try environment defaults with fallback chain
|
||||
# Priority: GITHUB_APM_PAT (APM modules) > GITHUB_TOKEN (user tokens)
|
||||
env_vars[var_name] = os.getenv('GITHUB_APM_PAT') or os.getenv('GITHUB_TOKEN', '')
|
||||
else:
|
||||
# For other variables, use empty string or reasonable default
|
||||
env_vars[var_name] = ''
|
||||
|
||||
if is_e2e_tests:
|
||||
print("E2E test mode detected")
|
||||
else:
|
||||
print("CI environment detected")
|
||||
|
||||
return env_vars
|
||||
|
||||
try:
|
||||
# Try to use Rich for better prompts
|
||||
from rich.console import Console
|
||||
from rich.prompt import Prompt
|
||||
|
||||
console = Console()
|
||||
console.print("Environment variables needed:", style="cyan")
|
||||
|
||||
for var_name in sorted(required_vars.keys()):
|
||||
var_info = required_vars[var_name]
|
||||
description = var_info.get("description", "")
|
||||
required = var_info.get("required", True)
|
||||
|
||||
# Check if already set in environment
|
||||
existing_value = os.getenv(var_name)
|
||||
|
||||
if existing_value:
|
||||
console.print(f" ✅ {var_name}: [dim]using existing value[/dim]")
|
||||
env_vars[var_name] = existing_value
|
||||
else:
|
||||
# Determine if this looks like a password/secret
|
||||
is_sensitive = any(keyword in var_name.lower()
|
||||
for keyword in ['password', 'secret', 'key', 'token', 'api'])
|
||||
|
||||
prompt_text = f" {var_name}"
|
||||
if description:
|
||||
prompt_text += f" ({description})"
|
||||
|
||||
if required:
|
||||
value = Prompt.ask(prompt_text, password=is_sensitive)
|
||||
else:
|
||||
value = Prompt.ask(prompt_text, default="", password=is_sensitive)
|
||||
|
||||
env_vars[var_name] = value
|
||||
|
||||
console.print()
|
||||
|
||||
except ImportError:
|
||||
# Fallback to simple input
|
||||
import click
|
||||
|
||||
click.echo("Environment variables needed:")
|
||||
|
||||
for var_name in sorted(required_vars.keys()):
|
||||
var_info = required_vars[var_name]
|
||||
description = var_info.get("description", "")
|
||||
|
||||
existing_value = os.getenv(var_name)
|
||||
|
||||
if existing_value:
|
||||
click.echo(f" ✅ {var_name}: using existing value")
|
||||
env_vars[var_name] = existing_value
|
||||
else:
|
||||
prompt_text = f" {var_name}"
|
||||
if description:
|
||||
prompt_text += f" ({description})"
|
||||
|
||||
# Simple input for fallback
|
||||
is_sensitive = any(keyword in var_name.lower()
|
||||
for keyword in ['password', 'secret', 'key', 'token', 'api'])
|
||||
|
||||
value = click.prompt(prompt_text, hide_input=is_sensitive, default="", show_default=False)
|
||||
env_vars[var_name] = value
|
||||
|
||||
click.echo()
|
||||
|
||||
return env_vars
|
||||
25
src/apm_cli/utils/__init__.py
Normal file
25
src/apm_cli/utils/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""Utility modules for APM CLI."""
|
||||
|
||||
from .console import (
|
||||
_rich_success,
|
||||
_rich_error,
|
||||
_rich_warning,
|
||||
_rich_info,
|
||||
_rich_echo,
|
||||
_rich_panel,
|
||||
_create_files_table,
|
||||
_get_console,
|
||||
STATUS_SYMBOLS
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'_rich_success',
|
||||
'_rich_error',
|
||||
'_rich_warning',
|
||||
'_rich_info',
|
||||
'_rich_echo',
|
||||
'_rich_panel',
|
||||
'_create_files_table',
|
||||
'_get_console',
|
||||
'STATUS_SYMBOLS'
|
||||
]
|
||||
159
src/apm_cli/utils/console.py
Normal file
159
src/apm_cli/utils/console.py
Normal file
@@ -0,0 +1,159 @@
|
||||
"""Console utility functions for formatting and output."""
|
||||
|
||||
import click
|
||||
import sys
|
||||
from typing import Optional, Any
|
||||
|
||||
# Rich library imports with fallbacks
|
||||
try:
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich import print as rich_print
|
||||
RICH_AVAILABLE = True
|
||||
except ImportError:
|
||||
RICH_AVAILABLE = False
|
||||
Console = Any
|
||||
Panel = Any
|
||||
Table = Any
|
||||
rich_print = None
|
||||
|
||||
# Colorama imports for fallback
|
||||
try:
|
||||
from colorama import Fore, Style, init
|
||||
init(autoreset=True)
|
||||
COLORAMA_AVAILABLE = True
|
||||
except ImportError:
|
||||
COLORAMA_AVAILABLE = False
|
||||
Fore = None
|
||||
Style = None
|
||||
|
||||
|
||||
# Status symbols for consistent iconography
|
||||
STATUS_SYMBOLS = {
|
||||
'success': '✨',
|
||||
'sparkles': '✨',
|
||||
'running': '🚀',
|
||||
'gear': '⚙️',
|
||||
'info': '💡',
|
||||
'warning': '⚠️',
|
||||
'error': '❌',
|
||||
'check': '✅',
|
||||
'list': '📋',
|
||||
'preview': '👀',
|
||||
'robot': '🤖',
|
||||
'metrics': '📊'
|
||||
}
|
||||
|
||||
|
||||
def _get_console() -> Optional[Any]:
|
||||
"""Get Rich console instance if available."""
|
||||
if RICH_AVAILABLE:
|
||||
try:
|
||||
return Console()
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _rich_echo(message: str, color: str = "white", style: str = None, bold: bool = False, symbol: str = None):
|
||||
"""Echo message with Rich formatting or colorama fallback."""
|
||||
# Handle backward compatibility - if style is provided, use it as color
|
||||
if style is not None:
|
||||
color = style
|
||||
|
||||
if symbol and symbol in STATUS_SYMBOLS:
|
||||
symbol_char = STATUS_SYMBOLS[symbol]
|
||||
message = f"{symbol_char} {message}"
|
||||
|
||||
console = _get_console()
|
||||
if console:
|
||||
try:
|
||||
style_str = color
|
||||
if bold:
|
||||
style_str = f"bold {color}"
|
||||
console.print(message, style=style_str)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Colorama fallback
|
||||
if COLORAMA_AVAILABLE and Fore:
|
||||
color_map = {
|
||||
'red': Fore.RED,
|
||||
'green': Fore.GREEN,
|
||||
'yellow': Fore.YELLOW,
|
||||
'blue': Fore.BLUE,
|
||||
'cyan': Fore.CYAN,
|
||||
'white': Fore.WHITE,
|
||||
'magenta': Fore.MAGENTA,
|
||||
'muted': Fore.WHITE, # Add muted mapping
|
||||
'info': Fore.BLUE
|
||||
}
|
||||
color_code = color_map.get(color, Fore.WHITE)
|
||||
style_code = Style.BRIGHT if bold else ""
|
||||
click.echo(f"{color_code}{style_code}{message}{Style.RESET_ALL}")
|
||||
else:
|
||||
click.echo(message)
|
||||
|
||||
|
||||
def _rich_success(message: str, symbol: str = None):
|
||||
"""Display success message with green color and bold styling."""
|
||||
_rich_echo(message, color="green", symbol=symbol, bold=True)
|
||||
|
||||
|
||||
def _rich_error(message: str, symbol: str = None):
|
||||
"""Display error message with red color."""
|
||||
_rich_echo(message, color="red", symbol=symbol)
|
||||
|
||||
|
||||
def _rich_warning(message: str, symbol: str = None):
|
||||
"""Display warning message with yellow color."""
|
||||
_rich_echo(message, color="yellow", symbol=symbol)
|
||||
|
||||
|
||||
def _rich_info(message: str, symbol: str = None):
|
||||
"""Display info message with blue color."""
|
||||
_rich_echo(message, color="blue", symbol=symbol)
|
||||
|
||||
|
||||
def _rich_panel(content: str, title: str = None, style: str = "cyan"):
|
||||
"""Display content in a Rich panel with fallback."""
|
||||
console = _get_console()
|
||||
if console and Panel:
|
||||
try:
|
||||
panel = Panel(content, title=title, border_style=style)
|
||||
console.print(panel)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback to simple text display
|
||||
if title:
|
||||
click.echo(f"\n--- {title} ---")
|
||||
click.echo(content)
|
||||
if title:
|
||||
click.echo("-" * (len(title) + 8))
|
||||
|
||||
|
||||
def _create_files_table(files_data: list, title: str = "Files") -> Optional[Any]:
|
||||
"""Create a Rich table for file display."""
|
||||
if not RICH_AVAILABLE or not Table:
|
||||
return None
|
||||
|
||||
try:
|
||||
table = Table(title=f"📋 {title}", show_header=True, header_style="bold cyan")
|
||||
table.add_column("File", style="bold white")
|
||||
table.add_column("Description", style="white")
|
||||
|
||||
for file_info in files_data:
|
||||
if isinstance(file_info, dict):
|
||||
table.add_row(file_info.get('name', ''), file_info.get('description', ''))
|
||||
elif isinstance(file_info, (list, tuple)) and len(file_info) >= 2:
|
||||
table.add_row(str(file_info[0]), str(file_info[1]))
|
||||
else:
|
||||
table.add_row(str(file_info), "")
|
||||
|
||||
return table
|
||||
except Exception:
|
||||
return None
|
||||
101
src/apm_cli/utils/helpers.py
Normal file
101
src/apm_cli/utils/helpers.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Helper utility functions for APM-CLI."""
|
||||
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def is_tool_available(tool_name):
|
||||
"""Check if a command-line tool is available.
|
||||
|
||||
Args:
|
||||
tool_name (str): Name of the tool to check.
|
||||
|
||||
Returns:
|
||||
bool: True if the tool is available, False otherwise.
|
||||
"""
|
||||
# First try using shutil.which which is more reliable across platforms
|
||||
if shutil.which(tool_name):
|
||||
return True
|
||||
|
||||
# Fall back to subprocess approach if shutil.which returns None
|
||||
try:
|
||||
# Different approaches for different platforms
|
||||
if sys.platform == 'win32':
|
||||
# On Windows, use 'where' command but WITHOUT shell=True
|
||||
result = subprocess.run(['where', tool_name],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
shell=False, # Changed from True to False
|
||||
check=False)
|
||||
return result.returncode == 0
|
||||
else:
|
||||
# On Unix-like systems, use 'which' command
|
||||
result = subprocess.run(['which', tool_name],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
check=False)
|
||||
return result.returncode == 0
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def get_available_package_managers():
|
||||
"""Get available package managers on the system.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary of available package managers and their paths.
|
||||
"""
|
||||
package_managers = {}
|
||||
|
||||
# Check for Python package managers
|
||||
if is_tool_available("uv"):
|
||||
package_managers["uv"] = "uv"
|
||||
if is_tool_available("pip"):
|
||||
package_managers["pip"] = "pip"
|
||||
if is_tool_available("pipx"):
|
||||
package_managers["pipx"] = "pipx"
|
||||
|
||||
# Check for JavaScript package managers
|
||||
if is_tool_available("npm"):
|
||||
package_managers["npm"] = "npm"
|
||||
if is_tool_available("yarn"):
|
||||
package_managers["yarn"] = "yarn"
|
||||
if is_tool_available("pnpm"):
|
||||
package_managers["pnpm"] = "pnpm"
|
||||
|
||||
# Check for system package managers
|
||||
if is_tool_available("brew"): # macOS
|
||||
package_managers["brew"] = "brew"
|
||||
if is_tool_available("apt"): # Debian/Ubuntu
|
||||
package_managers["apt"] = "apt"
|
||||
if is_tool_available("yum"): # CentOS/RHEL
|
||||
package_managers["yum"] = "yum"
|
||||
if is_tool_available("dnf"): # Fedora
|
||||
package_managers["dnf"] = "dnf"
|
||||
if is_tool_available("apk"): # Alpine
|
||||
package_managers["apk"] = "apk"
|
||||
if is_tool_available("pacman"): # Arch
|
||||
package_managers["pacman"] = "pacman"
|
||||
|
||||
return package_managers
|
||||
|
||||
|
||||
def detect_platform():
|
||||
"""Detect the current platform.
|
||||
|
||||
Returns:
|
||||
str: Platform name (macos, linux, windows).
|
||||
"""
|
||||
system = platform.system().lower()
|
||||
|
||||
if system == "darwin":
|
||||
return "macos"
|
||||
elif system == "linux":
|
||||
return "linux"
|
||||
elif system == "windows":
|
||||
return "windows"
|
||||
else:
|
||||
return "unknown"
|
||||
54
src/apm_cli/version.py
Normal file
54
src/apm_cli/version.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Version management for APM CLI."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Build-time version constant (will be injected during build)
|
||||
# This avoids TOML parsing overhead during runtime
|
||||
__BUILD_VERSION__ = None
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
"""
|
||||
Get the current version efficiently.
|
||||
|
||||
First tries build-time constant, then falls back to pyproject.toml parsing.
|
||||
|
||||
Returns:
|
||||
str: Version string
|
||||
"""
|
||||
# Use build-time constant if available (fastest path)
|
||||
if __BUILD_VERSION__:
|
||||
return __BUILD_VERSION__
|
||||
|
||||
# Fallback to reading from pyproject.toml (for development)
|
||||
try:
|
||||
# Handle PyInstaller bundle vs development
|
||||
if getattr(sys, 'frozen', False):
|
||||
# Running in PyInstaller bundle
|
||||
pyproject_path = Path(sys._MEIPASS) / 'pyproject.toml'
|
||||
else:
|
||||
# Running in development
|
||||
pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml"
|
||||
|
||||
if pyproject_path.exists():
|
||||
# Simple regex parsing instead of full TOML library
|
||||
with open(pyproject_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Look for version = "x.y.z" pattern (including PEP 440 prereleases)
|
||||
import re
|
||||
match = re.search(r'version\s*=\s*["\']([^"\']+)["\']', content)
|
||||
if match:
|
||||
version = match.group(1)
|
||||
# Validate PEP 440 version patterns: x.y.z or x.y.z{a|b|rc}N
|
||||
if re.match(r'^\d+\.\d+\.\d+(a\d+|b\d+|rc\d+)?$', version):
|
||||
return version
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return "unknown"
|
||||
|
||||
|
||||
# For backward compatibility
|
||||
__version__ = get_version()
|
||||
1
src/apm_cli/workflow/__init__.py
Normal file
1
src/apm_cli/workflow/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Workflow management package."""
|
||||
100
src/apm_cli/workflow/discovery.py
Normal file
100
src/apm_cli/workflow/discovery.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""Discovery functionality for workflow files."""
|
||||
|
||||
import os
|
||||
import glob
|
||||
from .parser import parse_workflow_file
|
||||
|
||||
|
||||
def discover_workflows(base_dir=None):
|
||||
"""Find all .prompt.md files following VSCode's .github/prompts convention.
|
||||
|
||||
Args:
|
||||
base_dir (str, optional): Base directory to search in. Defaults to current directory.
|
||||
|
||||
Returns:
|
||||
list: List of WorkflowDefinition objects.
|
||||
"""
|
||||
if base_dir is None:
|
||||
base_dir = os.getcwd()
|
||||
|
||||
# Support VSCode's .github/prompts convention with .prompt.md files
|
||||
prompt_patterns = [
|
||||
"**/.github/prompts/*.prompt.md", # VSCode convention: .github/prompts/
|
||||
"**/*.prompt.md" # Generic .prompt.md files
|
||||
]
|
||||
|
||||
workflow_files = []
|
||||
for pattern in prompt_patterns:
|
||||
workflow_files.extend(glob.glob(os.path.join(base_dir, pattern), recursive=True))
|
||||
|
||||
# Remove duplicates while preserving order
|
||||
seen = set()
|
||||
unique_files = []
|
||||
for file_path in workflow_files:
|
||||
if file_path not in seen:
|
||||
seen.add(file_path)
|
||||
unique_files.append(file_path)
|
||||
|
||||
workflows = []
|
||||
for file_path in unique_files:
|
||||
try:
|
||||
workflow = parse_workflow_file(file_path)
|
||||
workflows.append(workflow)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse {file_path}: {e}")
|
||||
|
||||
return workflows
|
||||
|
||||
|
||||
def create_workflow_template(name, output_dir=None, description=None, use_vscode_convention=True):
|
||||
"""Create a basic workflow template file following VSCode's .github/prompts convention.
|
||||
|
||||
Args:
|
||||
name (str): Name of the workflow.
|
||||
output_dir (str, optional): Directory to create the file in. Defaults to current directory.
|
||||
description (str, optional): Description for the workflow. Defaults to generic description.
|
||||
use_vscode_convention (bool): Whether to use VSCode's .github/prompts structure. Defaults to True.
|
||||
|
||||
Returns:
|
||||
str: Path to the created file.
|
||||
"""
|
||||
if output_dir is None:
|
||||
output_dir = os.getcwd()
|
||||
|
||||
title = name.replace("-", " ").title()
|
||||
workflow_description = description or f"Workflow for {title.lower()}"
|
||||
|
||||
template = f"""---
|
||||
description: {workflow_description}
|
||||
author: Your Name
|
||||
mcp:
|
||||
- package1
|
||||
- package2
|
||||
input:
|
||||
- param1
|
||||
- param2
|
||||
---
|
||||
|
||||
# {title}
|
||||
|
||||
1. Step One:
|
||||
- Details for step one
|
||||
- Use parameters like this: ${{input:param1}}
|
||||
|
||||
2. Step Two:
|
||||
- Details for step two
|
||||
"""
|
||||
|
||||
if use_vscode_convention:
|
||||
# Create .github/prompts directory structure
|
||||
prompts_dir = os.path.join(output_dir, ".github", "prompts")
|
||||
os.makedirs(prompts_dir, exist_ok=True)
|
||||
file_path = os.path.join(prompts_dir, f"{name}.prompt.md")
|
||||
else:
|
||||
# Create .prompt.md file in output directory
|
||||
file_path = os.path.join(output_dir, f"{name}.prompt.md")
|
||||
|
||||
with open(file_path, "w", encoding='utf-8') as f:
|
||||
f.write(template)
|
||||
|
||||
return file_path
|
||||
92
src/apm_cli/workflow/parser.py
Normal file
92
src/apm_cli/workflow/parser.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""Parser for workflow definition files."""
|
||||
|
||||
import os
|
||||
import frontmatter
|
||||
|
||||
|
||||
class WorkflowDefinition:
|
||||
"""Simple container for workflow data."""
|
||||
|
||||
def __init__(self, name, file_path, metadata, content):
|
||||
"""Initialize a workflow definition.
|
||||
|
||||
Args:
|
||||
name (str): Name of the workflow.
|
||||
file_path (str): Path to the workflow file.
|
||||
metadata (dict): Metadata from the frontmatter.
|
||||
content (str): Content of the workflow file.
|
||||
"""
|
||||
self.name = name
|
||||
self.file_path = file_path
|
||||
self.description = metadata.get('description', '')
|
||||
self.author = metadata.get('author', '')
|
||||
self.mcp_dependencies = metadata.get('mcp', [])
|
||||
self.input_parameters = metadata.get('input', [])
|
||||
self.llm_model = metadata.get('llm', None) # LLM model specified in frontmatter
|
||||
self.content = content
|
||||
|
||||
def validate(self):
|
||||
"""Basic validation of required fields.
|
||||
|
||||
Returns:
|
||||
list: List of validation errors.
|
||||
"""
|
||||
errors = []
|
||||
if not self.description:
|
||||
errors.append("Missing 'description' in frontmatter")
|
||||
# Input parameters are optional, so we don't check for them
|
||||
return errors
|
||||
|
||||
|
||||
def parse_workflow_file(file_path):
|
||||
"""Parse a workflow file.
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the workflow file.
|
||||
|
||||
Returns:
|
||||
WorkflowDefinition: Parsed workflow definition.
|
||||
"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
post = frontmatter.load(f)
|
||||
|
||||
# Extract name based on file structure
|
||||
name = _extract_workflow_name(file_path)
|
||||
metadata = post.metadata
|
||||
content = post.content
|
||||
|
||||
return WorkflowDefinition(name, file_path, metadata, content)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to parse workflow file: {e}")
|
||||
|
||||
|
||||
def _extract_workflow_name(file_path):
|
||||
"""Extract workflow name from file path based on naming conventions.
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the workflow file.
|
||||
|
||||
Returns:
|
||||
str: Extracted workflow name.
|
||||
"""
|
||||
# Normalize path separators
|
||||
normalized_path = os.path.normpath(file_path)
|
||||
path_parts = normalized_path.split(os.sep)
|
||||
|
||||
# Check if it's a VSCode .github/prompts convention
|
||||
if '.github' in path_parts and 'prompts' in path_parts:
|
||||
# For .github/prompts/name.prompt.md, extract name from filename
|
||||
github_idx = path_parts.index('.github')
|
||||
if (github_idx + 1 < len(path_parts) and
|
||||
path_parts[github_idx + 1] == 'prompts'):
|
||||
basename = os.path.basename(file_path)
|
||||
if basename.endswith('.prompt.md'):
|
||||
return basename.replace('.prompt.md', '')
|
||||
|
||||
# For .prompt.md files, extract name from filename
|
||||
if file_path.endswith('.prompt.md'):
|
||||
return os.path.basename(file_path).replace('.prompt.md', '')
|
||||
|
||||
# Fallback: use filename without extension
|
||||
return os.path.splitext(os.path.basename(file_path))[0]
|
||||
193
src/apm_cli/workflow/runner.py
Normal file
193
src/apm_cli/workflow/runner.py
Normal file
@@ -0,0 +1,193 @@
|
||||
"""Runner for workflow execution."""
|
||||
|
||||
import os
|
||||
import re
|
||||
from colorama import Fore, Style
|
||||
from .parser import WorkflowDefinition
|
||||
from .discovery import discover_workflows
|
||||
from ..runtime.factory import RuntimeFactory
|
||||
|
||||
# Color constants (matching cli.py)
|
||||
WARNING = f"{Fore.YELLOW}"
|
||||
RESET = f"{Style.RESET_ALL}"
|
||||
|
||||
|
||||
def substitute_parameters(content, params):
|
||||
"""Simple string-based parameter substitution.
|
||||
|
||||
Args:
|
||||
content (str): Content to substitute parameters in.
|
||||
params (dict): Parameters to substitute.
|
||||
|
||||
Returns:
|
||||
str: Content with parameters substituted.
|
||||
"""
|
||||
result = content
|
||||
for key, value in params.items():
|
||||
placeholder = f"${{input:{key}}}"
|
||||
result = result.replace(placeholder, str(value))
|
||||
return result
|
||||
|
||||
|
||||
def collect_parameters(workflow_def, provided_params=None):
|
||||
"""Collect parameters from command line or prompt for missing ones.
|
||||
|
||||
Args:
|
||||
workflow_def (WorkflowDefinition): Workflow definition.
|
||||
provided_params (dict, optional): Parameters provided from command line.
|
||||
|
||||
Returns:
|
||||
dict: Complete set of parameters.
|
||||
"""
|
||||
provided_params = provided_params or {}
|
||||
|
||||
# If there are no input parameters defined, return the provided ones
|
||||
if not workflow_def.input_parameters:
|
||||
return provided_params
|
||||
|
||||
# Convert list parameters to dict if they're just names
|
||||
if isinstance(workflow_def.input_parameters, list):
|
||||
# List of parameter names
|
||||
param_names = workflow_def.input_parameters
|
||||
else:
|
||||
# Already a dict
|
||||
param_names = list(workflow_def.input_parameters.keys())
|
||||
|
||||
missing_params = [p for p in param_names if p not in provided_params]
|
||||
|
||||
if missing_params:
|
||||
print(f"Workflow '{workflow_def.name}' requires the following parameters:")
|
||||
for param in missing_params:
|
||||
value = input(f" {param}: ")
|
||||
provided_params[param] = value
|
||||
|
||||
return provided_params
|
||||
|
||||
|
||||
def find_workflow_by_name(name, base_dir=None):
|
||||
"""Find a workflow by name or file path.
|
||||
|
||||
Args:
|
||||
name (str): Name of the workflow or file path.
|
||||
base_dir (str, optional): Base directory to search in.
|
||||
|
||||
Returns:
|
||||
WorkflowDefinition: Workflow definition if found, None otherwise.
|
||||
"""
|
||||
if base_dir is None:
|
||||
base_dir = os.getcwd()
|
||||
|
||||
# If name looks like a file path, try to parse it directly
|
||||
if name.endswith('.prompt.md') or name.endswith('.workflow.md'):
|
||||
# Handle relative paths
|
||||
if not os.path.isabs(name):
|
||||
name = os.path.join(base_dir, name)
|
||||
|
||||
if os.path.exists(name):
|
||||
try:
|
||||
from .parser import parse_workflow_file
|
||||
return parse_workflow_file(name)
|
||||
except Exception as e:
|
||||
print(f"Error parsing workflow file {name}: {e}")
|
||||
return None
|
||||
|
||||
# Otherwise, search by name
|
||||
workflows = discover_workflows(base_dir)
|
||||
for workflow in workflows:
|
||||
if workflow.name == name:
|
||||
return workflow
|
||||
return None
|
||||
|
||||
|
||||
def run_workflow(workflow_name, params=None, base_dir=None):
|
||||
"""Run a workflow with parameters.
|
||||
|
||||
Args:
|
||||
workflow_name (str): Name of the workflow to run.
|
||||
params (dict, optional): Parameters to use.
|
||||
base_dir (str, optional): Base directory to search for workflows.
|
||||
|
||||
Returns:
|
||||
tuple: (bool, str) Success status and result content.
|
||||
"""
|
||||
params = params or {}
|
||||
|
||||
# Extract runtime and model information
|
||||
runtime_name = params.get('_runtime', None)
|
||||
fallback_llm = params.get('_llm', None)
|
||||
|
||||
# Find the workflow
|
||||
workflow = find_workflow_by_name(workflow_name, base_dir)
|
||||
if not workflow:
|
||||
return False, f"Workflow '{workflow_name}' not found."
|
||||
|
||||
# Validate the workflow
|
||||
errors = workflow.validate()
|
||||
if errors:
|
||||
return False, f"Invalid workflow: {', '.join(errors)}"
|
||||
|
||||
# Collect missing parameters
|
||||
all_params = collect_parameters(workflow, params)
|
||||
|
||||
# Substitute parameters
|
||||
result_content = substitute_parameters(workflow.content, all_params)
|
||||
|
||||
# Determine the LLM model to use
|
||||
# Priority: frontmatter llm > --llm flag > runtime default
|
||||
llm_model = workflow.llm_model or fallback_llm
|
||||
|
||||
# Show warning if both frontmatter and --llm flag are specified
|
||||
if workflow.llm_model and fallback_llm:
|
||||
print(f"{WARNING}WARNING: Both frontmatter 'llm: {workflow.llm_model}' and --llm '{fallback_llm}' specified. Using frontmatter value: {workflow.llm_model}{RESET}")
|
||||
|
||||
# Always execute with runtime (use best available if not specified)
|
||||
try:
|
||||
# Use specified runtime type or get best available
|
||||
if runtime_name:
|
||||
# Check if runtime_name is a valid runtime type
|
||||
if RuntimeFactory.runtime_exists(runtime_name):
|
||||
runtime = RuntimeFactory.create_runtime(runtime_name, llm_model)
|
||||
else:
|
||||
# Invalid runtime name - fail with clear error message
|
||||
available_runtimes = [adapter.get_runtime_name() for adapter in RuntimeFactory._RUNTIME_ADAPTERS if adapter.is_available()]
|
||||
return False, f"Invalid runtime '{runtime_name}'. Available runtimes: {', '.join(available_runtimes)}"
|
||||
else:
|
||||
runtime = RuntimeFactory.create_runtime(model_name=llm_model)
|
||||
|
||||
# Execute the prompt with the runtime
|
||||
response = runtime.execute_prompt(result_content)
|
||||
return True, response
|
||||
|
||||
except Exception as e:
|
||||
return False, f"Runtime execution failed: {str(e)}"
|
||||
|
||||
|
||||
def preview_workflow(workflow_name, params=None, base_dir=None):
|
||||
"""Preview a workflow with parameters substituted (without execution).
|
||||
|
||||
Args:
|
||||
workflow_name (str): Name of the workflow to preview.
|
||||
params (dict, optional): Parameters to use.
|
||||
base_dir (str, optional): Base directory to search for workflows.
|
||||
|
||||
Returns:
|
||||
tuple: (bool, str) Success status and processed content.
|
||||
"""
|
||||
params = params or {}
|
||||
|
||||
# Find the workflow
|
||||
workflow = find_workflow_by_name(workflow_name, base_dir)
|
||||
if not workflow:
|
||||
return False, f"Workflow '{workflow_name}' not found."
|
||||
|
||||
# Validate the workflow
|
||||
errors = workflow.validate()
|
||||
if errors:
|
||||
return False, f"Invalid workflow: {', '.join(errors)}"
|
||||
|
||||
# Collect missing parameters
|
||||
all_params = collect_parameters(workflow, params)
|
||||
|
||||
# Substitute parameters and return the processed content
|
||||
result_content = substitute_parameters(workflow.content, all_params)
|
||||
return True, result_content
|
||||
@@ -3,10 +3,20 @@
|
||||
# requires-python = ">=3.11"
|
||||
# dependencies = [
|
||||
# "typer",
|
||||
# "rich",
|
||||
# "rich>=13.0.0",
|
||||
# "platformdirs",
|
||||
# "readchar",
|
||||
# "httpx",
|
||||
# "click>=8.0.0",
|
||||
# "colorama>=0.4.6",
|
||||
# "pyyaml>=6.0.0",
|
||||
# "requests>=2.28.0",
|
||||
# "python-frontmatter>=1.0.0",
|
||||
# "tomli>=1.2.0; python_version<'3.11'",
|
||||
# "toml>=0.10.2",
|
||||
# "rich-click>=1.7.0",
|
||||
# "watchdog>=3.0.0",
|
||||
# "GitPython>=3.1.0",
|
||||
# ]
|
||||
# ///
|
||||
"""
|
||||
@@ -30,7 +40,7 @@ import tempfile
|
||||
import shutil
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import typer
|
||||
import httpx
|
||||
@@ -44,15 +54,32 @@ from rich.table import Table
|
||||
from rich.tree import Tree
|
||||
from typer.core import TyperGroup
|
||||
|
||||
# APM imports
|
||||
from apm_cli.cli import init as apm_init, install as apm_install, compile as apm_compile, prune as apm_prune, uninstall as apm_uninstall
|
||||
from apm_cli.commands.deps import deps as apm_deps
|
||||
import click
|
||||
from click.testing import CliRunner
|
||||
|
||||
# For cross-platform keyboard input
|
||||
import readchar
|
||||
import ssl
|
||||
import truststore
|
||||
|
||||
ssl_context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||
client = httpx.Client(verify=ssl_context)
|
||||
|
||||
# Constants
|
||||
AI_CHOICES = {
|
||||
"copilot": "GitHub Copilot",
|
||||
"claude": "Claude Code",
|
||||
"gemini": "Gemini CLI"
|
||||
"gemini": "Gemini CLI",
|
||||
"cursor": "Cursor"
|
||||
}
|
||||
# Add script type choices
|
||||
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
||||
|
||||
# Claude CLI local installation path after migrate-installer
|
||||
CLAUDE_LOCAL_PATH = Path.home() / ".claude" / "local" / "claude"
|
||||
|
||||
# ASCII Art Banner
|
||||
BANNER = """
|
||||
@@ -283,6 +310,240 @@ app = typer.Typer(
|
||||
cls=BannerGroup,
|
||||
)
|
||||
|
||||
@click.group()
|
||||
def apm_click():
|
||||
"""APM - Agent Package Manager commands"""
|
||||
pass
|
||||
|
||||
# Add APM commands to the Click group
|
||||
apm_click.add_command(apm_init, name="init")
|
||||
apm_click.add_command(apm_install, name="install")
|
||||
apm_click.add_command(apm_uninstall, name="uninstall")
|
||||
apm_click.add_command(apm_compile, name="compile")
|
||||
apm_click.add_command(apm_prune, name="prune")
|
||||
apm_click.add_command(apm_deps, name="deps")
|
||||
|
||||
|
||||
# Create APM subcommands as Typer commands
|
||||
apm_app = typer.Typer(
|
||||
name="apm",
|
||||
help="APM - Agent Package Manager commands for context management.",
|
||||
add_completion=False,
|
||||
)
|
||||
|
||||
@apm_app.command("init", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||
def apm_init_wrapper(
|
||||
ctx: typer.Context,
|
||||
project_name: str = typer.Argument(None, help="Project name"),
|
||||
force: bool = typer.Option(False, "-f", "--force", help="Overwrite existing files without confirmation"),
|
||||
yes: bool = typer.Option(False, "-y", "--yes", help="Skip interactive questionnaire and use defaults"),
|
||||
):
|
||||
"""Initialize a new APM project"""
|
||||
args = []
|
||||
if project_name:
|
||||
args.append(project_name)
|
||||
if force:
|
||||
args.append("--force")
|
||||
if yes:
|
||||
args.append("--yes")
|
||||
if ctx.args:
|
||||
args.extend(ctx.args)
|
||||
|
||||
_run_apm_command(["init"] + args)
|
||||
|
||||
@apm_app.command("install", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||
def apm_install_wrapper(
|
||||
ctx: typer.Context,
|
||||
packages: list[str] = typer.Argument(None, help="APM packages to add and install (owner/repo format)"),
|
||||
runtime: str = typer.Option(None, "--runtime", help="Target specific runtime only (codex, vscode)"),
|
||||
exclude: str = typer.Option(None, "--exclude", help="Exclude specific runtime from installation"),
|
||||
only: str = typer.Option(None, "--only", help="Install only specific dependency type (apm or mcp)"),
|
||||
update: bool = typer.Option(False, "--update", help="Update dependencies to latest Git references"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be installed without installing"),
|
||||
):
|
||||
"""Install APM and MCP dependencies from apm.yml.
|
||||
|
||||
Examples:
|
||||
specify apm install # Install existing deps from apm.yml
|
||||
specify apm install github/design-guidelines # Add package and install
|
||||
specify apm install org/pkg1 org/pkg2 # Add multiple packages and install
|
||||
"""
|
||||
args = []
|
||||
|
||||
# Add package arguments first
|
||||
if packages:
|
||||
args.extend(packages)
|
||||
|
||||
if runtime:
|
||||
args.extend(["--runtime", runtime])
|
||||
if exclude:
|
||||
args.extend(["--exclude", exclude])
|
||||
if only:
|
||||
args.extend(["--only", only])
|
||||
if update:
|
||||
args.append("--update")
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
if ctx.args:
|
||||
args.extend(ctx.args)
|
||||
|
||||
_run_apm_command(["install"] + args)
|
||||
|
||||
@apm_app.command("compile", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||
def apm_compile_wrapper(
|
||||
ctx: typer.Context,
|
||||
output: str = typer.Option(None, "-o", "--output", help="Output file path (for single-file mode)"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="🔍 Preview compilation without writing files (shows placement decisions)"),
|
||||
no_links: bool = typer.Option(False, "--no-links", help="Skip markdown link resolution"),
|
||||
chatmode: str = typer.Option(None, "--chatmode", help="Chatmode to prepend to AGENTS.md files"),
|
||||
watch: bool = typer.Option(False, "--watch", help="Auto-regenerate on changes"),
|
||||
validate: bool = typer.Option(False, "--validate", help="Validate primitives without compiling"),
|
||||
with_constitution: bool = typer.Option(True, "--with-constitution/--no-constitution", help="Include Spec Kit constitution block at top if memory/constitution.md present"),
|
||||
single_agents: bool = typer.Option(False, "--single-agents", help="📄 Force single-file compilation (legacy mode)"),
|
||||
verbose: bool = typer.Option(False, "-v", "--verbose", help="🔍 Show detailed source attribution and optimizer analysis"),
|
||||
local_only: bool = typer.Option(False, "--local-only", help="🏠 Ignore dependencies, compile only local primitives"),
|
||||
clean: bool = typer.Option(False, "--clean", help="🧹 Remove orphaned AGENTS.md files that are no longer generated"),
|
||||
):
|
||||
"""Generate AGENTS.md from APM context"""
|
||||
# Build arguments for the Click command
|
||||
args = []
|
||||
if output:
|
||||
args.extend(["-o", output])
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
if no_links:
|
||||
args.append("--no-links")
|
||||
if chatmode:
|
||||
args.extend(["--chatmode", chatmode])
|
||||
if watch:
|
||||
args.append("--watch")
|
||||
if validate:
|
||||
args.append("--validate")
|
||||
if not with_constitution:
|
||||
args.append("--no-constitution")
|
||||
if single_agents:
|
||||
args.append("--single-agents")
|
||||
if verbose:
|
||||
args.append("--verbose")
|
||||
if local_only:
|
||||
args.append("--local-only")
|
||||
if clean:
|
||||
args.append("--clean")
|
||||
|
||||
# Add any extra arguments
|
||||
if ctx.args:
|
||||
args.extend(ctx.args)
|
||||
|
||||
_run_apm_command(["compile"] + args)
|
||||
|
||||
@apm_app.command("prune", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||
def apm_prune_wrapper(
|
||||
ctx: typer.Context,
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be removed without removing"),
|
||||
):
|
||||
"""Remove APM packages not listed in apm.yml.
|
||||
|
||||
This command cleans up the apm_modules/ directory by removing packages that
|
||||
were previously installed but are no longer declared as dependencies in apm.yml.
|
||||
|
||||
Examples:
|
||||
specify apm prune # Remove orphaned packages
|
||||
specify apm prune --dry-run # Show what would be removed
|
||||
"""
|
||||
args = []
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
|
||||
# Add any extra arguments
|
||||
if ctx.args:
|
||||
args.extend(ctx.args)
|
||||
|
||||
_run_apm_command(["prune"] + args)
|
||||
|
||||
@apm_app.command("uninstall", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||
def apm_uninstall_wrapper(
|
||||
ctx: typer.Context,
|
||||
packages: list[str] = typer.Argument(..., help="APM packages to remove (owner/repo format)"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be removed without removing"),
|
||||
):
|
||||
"""Remove APM packages from apm.yml and apm_modules.
|
||||
|
||||
This command removes packages from both the apm.yml dependencies list
|
||||
and the apm_modules/ directory. It's the opposite of 'specify apm install <package>'.
|
||||
|
||||
Examples:
|
||||
specify apm uninstall github/design-guidelines # Remove one package
|
||||
specify apm uninstall org/pkg1 org/pkg2 # Remove multiple packages
|
||||
specify apm uninstall github/pkg --dry-run # Show what would be removed
|
||||
"""
|
||||
args = []
|
||||
|
||||
# Add package arguments first
|
||||
if packages:
|
||||
args.extend(packages)
|
||||
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
|
||||
# Add any extra arguments
|
||||
if ctx.args:
|
||||
args.extend(ctx.args)
|
||||
|
||||
_run_apm_command(["uninstall"] + args)
|
||||
|
||||
# Create deps subcommands as Typer sub-application
|
||||
deps_app = typer.Typer(
|
||||
name="deps",
|
||||
help="🔗 Manage APM package dependencies",
|
||||
add_completion=False,
|
||||
)
|
||||
|
||||
@deps_app.command("clean")
|
||||
def apm_deps_clean_wrapper(ctx: typer.Context):
|
||||
"""Remove all APM dependencies"""
|
||||
_run_apm_command(["deps", "clean"] + (ctx.args or []))
|
||||
|
||||
@deps_app.command("info")
|
||||
def apm_deps_info_wrapper(ctx: typer.Context):
|
||||
"""Show detailed package information"""
|
||||
_run_apm_command(["deps", "info"] + (ctx.args or []))
|
||||
|
||||
@deps_app.command("list")
|
||||
def apm_deps_list_wrapper(ctx: typer.Context):
|
||||
"""List installed APM dependencies"""
|
||||
_run_apm_command(["deps", "list"] + (ctx.args or []))
|
||||
|
||||
@deps_app.command("tree")
|
||||
def apm_deps_tree_wrapper(ctx: typer.Context):
|
||||
"""Show dependency tree structure"""
|
||||
_run_apm_command(["deps", "tree"] + (ctx.args or []))
|
||||
|
||||
@deps_app.command("update")
|
||||
def apm_deps_update_wrapper(ctx: typer.Context):
|
||||
"""Update APM dependencies"""
|
||||
_run_apm_command(["deps", "update"] + (ctx.args or []))
|
||||
|
||||
# Add the deps sub-application to the APM app
|
||||
apm_app.add_typer(deps_app, name="deps")
|
||||
|
||||
def _run_apm_command(args: list[str]):
|
||||
"""Helper to run APM Click commands"""
|
||||
original_argv = sys.argv.copy()
|
||||
try:
|
||||
sys.argv = ["apm"] + args
|
||||
try:
|
||||
apm_click.main(args, standalone_mode=False)
|
||||
except SystemExit as e:
|
||||
if e.code != 0:
|
||||
raise typer.Exit(e.code)
|
||||
finally:
|
||||
sys.argv = original_argv
|
||||
|
||||
# Add the APM subcommand app to the main app
|
||||
app.add_typer(apm_app, name="apm")
|
||||
|
||||
# Remove the old apm_command since we're using the Typer subcommand app now
|
||||
|
||||
|
||||
def show_banner():
|
||||
"""Display the ASCII art banner."""
|
||||
@@ -330,8 +591,28 @@ def run_command(cmd: list[str], check_return: bool = True, capture: bool = False
|
||||
return None
|
||||
|
||||
|
||||
def check_tool_for_tracker(tool: str, install_hint: str, tracker: StepTracker) -> bool:
|
||||
"""Check if a tool is installed and update tracker."""
|
||||
if shutil.which(tool):
|
||||
tracker.complete(tool, "available")
|
||||
return True
|
||||
else:
|
||||
tracker.error(tool, f"not found - {install_hint}")
|
||||
return False
|
||||
|
||||
|
||||
def check_tool(tool: str, install_hint: str) -> bool:
|
||||
"""Check if a tool is installed."""
|
||||
|
||||
# Special handling for Claude CLI after `claude migrate-installer`
|
||||
# See: https://github.com/github/spec-kit/issues/123
|
||||
# The migrate-installer command REMOVES the original executable from PATH
|
||||
# and creates an alias at ~/.claude/local/claude instead
|
||||
# This path should be prioritized over other claude executables in PATH
|
||||
if tool == "claude":
|
||||
if CLAUDE_LOCAL_PATH.exists() and CLAUDE_LOCAL_PATH.is_file():
|
||||
return True
|
||||
|
||||
if shutil.which(tool):
|
||||
return True
|
||||
else:
|
||||
@@ -385,39 +666,44 @@ def init_git_repo(project_path: Path, quiet: bool = False) -> bool:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
|
||||
def download_template_from_github(ai_assistant: str, download_dir: Path, *, verbose: bool = True, show_progress: bool = True):
|
||||
"""Download the latest template release from GitHub using HTTP requests.
|
||||
Returns (zip_path, metadata_dict)
|
||||
"""
|
||||
def download_template_from_github(ai_assistant: str, download_dir: Path, *, script_type: str = "sh", verbose: bool = True, show_progress: bool = True, client: httpx.Client = None, debug: bool = False) -> Tuple[Path, dict]:
|
||||
repo_owner = "github"
|
||||
repo_name = "spec-kit"
|
||||
if client is None:
|
||||
client = httpx.Client(verify=ssl_context)
|
||||
|
||||
if verbose:
|
||||
console.print("[cyan]Fetching latest release information...[/cyan]")
|
||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
response = httpx.get(api_url, timeout=30, follow_redirects=True)
|
||||
response.raise_for_status()
|
||||
release_data = response.json()
|
||||
except httpx.RequestError as e:
|
||||
if verbose:
|
||||
console.print(f"[red]Error fetching release information:[/red] {e}")
|
||||
response = client.get(api_url, timeout=30, follow_redirects=True)
|
||||
status = response.status_code
|
||||
if status != 200:
|
||||
msg = f"GitHub API returned {status} for {api_url}"
|
||||
if debug:
|
||||
msg += f"\nResponse headers: {response.headers}\nBody (truncated 500): {response.text[:500]}"
|
||||
raise RuntimeError(msg)
|
||||
try:
|
||||
release_data = response.json()
|
||||
except ValueError as je:
|
||||
raise RuntimeError(f"Failed to parse release JSON: {je}\nRaw (truncated 400): {response.text[:400]}")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error fetching release information[/red]")
|
||||
console.print(Panel(str(e), title="Fetch Error", border_style="red"))
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Find the template asset for the specified AI assistant
|
||||
pattern = f"spec-kit-template-{ai_assistant}"
|
||||
pattern = f"spec-kit-template-{ai_assistant}-{script_type}"
|
||||
matching_assets = [
|
||||
asset for asset in release_data.get("assets", [])
|
||||
if pattern in asset["name"] and asset["name"].endswith(".zip")
|
||||
]
|
||||
|
||||
if not matching_assets:
|
||||
if verbose:
|
||||
console.print(f"[red]Error:[/red] No template found for AI assistant '{ai_assistant}'")
|
||||
console.print(f"[yellow]Available assets:[/yellow]")
|
||||
for asset in release_data.get("assets", []):
|
||||
console.print(f" - {asset['name']}")
|
||||
console.print(f"[red]No matching release asset found[/red] for pattern: [bold]{pattern}[/bold]")
|
||||
asset_names = [a.get('name','?') for a in release_data.get('assets', [])]
|
||||
console.print(Panel("\n".join(asset_names) or "(no assets)", title="Available Assets", border_style="yellow"))
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Use the first matching asset
|
||||
@@ -437,18 +723,17 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, verb
|
||||
console.print(f"[cyan]Downloading template...[/cyan]")
|
||||
|
||||
try:
|
||||
with httpx.stream("GET", download_url, timeout=30, follow_redirects=True) as response:
|
||||
response.raise_for_status()
|
||||
with client.stream("GET", download_url, timeout=60, follow_redirects=True) as response:
|
||||
if response.status_code != 200:
|
||||
body_sample = response.text[:400]
|
||||
raise RuntimeError(f"Download failed with {response.status_code}\nHeaders: {response.headers}\nBody (truncated): {body_sample}")
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
|
||||
with open(zip_path, 'wb') as f:
|
||||
if total_size == 0:
|
||||
# No content-length header, download without progress
|
||||
for chunk in response.iter_bytes(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
else:
|
||||
if show_progress:
|
||||
# Show progress bar
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
@@ -462,15 +747,14 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, verb
|
||||
downloaded += len(chunk)
|
||||
progress.update(task, completed=downloaded)
|
||||
else:
|
||||
# Silent download loop
|
||||
for chunk in response.iter_bytes(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
except httpx.RequestError as e:
|
||||
if verbose:
|
||||
console.print(f"[red]Error downloading template:[/red] {e}")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error downloading template[/red]")
|
||||
detail = str(e)
|
||||
if zip_path.exists():
|
||||
zip_path.unlink()
|
||||
console.print(Panel(detail, title="Download Error", border_style="red"))
|
||||
raise typer.Exit(1)
|
||||
if verbose:
|
||||
console.print(f"Downloaded: {filename}")
|
||||
@@ -483,7 +767,7 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, verb
|
||||
return zip_path, metadata
|
||||
|
||||
|
||||
def download_and_extract_template(project_path: Path, ai_assistant: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None) -> Path:
|
||||
def download_and_extract_template(project_path: Path, ai_assistant: str, script_type: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None, client: httpx.Client = None, debug: bool = False) -> Path:
|
||||
"""Download the latest release and extract it to create a new project.
|
||||
Returns project_path. Uses tracker if provided (with keys: fetch, download, extract, cleanup)
|
||||
"""
|
||||
@@ -496,13 +780,16 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, is_curr
|
||||
zip_path, meta = download_template_from_github(
|
||||
ai_assistant,
|
||||
current_dir,
|
||||
script_type=script_type,
|
||||
verbose=verbose and tracker is None,
|
||||
show_progress=(tracker is None)
|
||||
show_progress=(tracker is None),
|
||||
client=client,
|
||||
debug=debug
|
||||
)
|
||||
if tracker:
|
||||
tracker.complete("fetch", f"release {meta['release']} ({meta['size']:,} bytes)")
|
||||
tracker.add("download", "Download template")
|
||||
tracker.complete("download", meta['filename']) # already downloaded inside helper
|
||||
tracker.complete("download", meta['filename'])
|
||||
except Exception as e:
|
||||
if tracker:
|
||||
tracker.error("fetch", str(e))
|
||||
@@ -614,6 +901,8 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, is_curr
|
||||
else:
|
||||
if verbose:
|
||||
console.print(f"[red]Error extracting template:[/red] {e}")
|
||||
if debug:
|
||||
console.print(Panel(str(e), title="Extraction Error", border_style="red"))
|
||||
# Clean up project directory if created and not current directory
|
||||
if not is_current_dir and project_path.exists():
|
||||
shutil.rmtree(project_path)
|
||||
@@ -635,33 +924,131 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, is_curr
|
||||
return project_path
|
||||
|
||||
|
||||
def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = None) -> None:
|
||||
"""Ensure POSIX .sh scripts under .specify/scripts (recursively) have execute bits (no-op on Windows)."""
|
||||
if os.name == "nt":
|
||||
return # Windows: skip silently
|
||||
scripts_root = project_path / ".specify" / "scripts"
|
||||
if not scripts_root.is_dir():
|
||||
return
|
||||
failures: list[str] = []
|
||||
updated = 0
|
||||
for script in scripts_root.rglob("*.sh"):
|
||||
try:
|
||||
if script.is_symlink() or not script.is_file():
|
||||
continue
|
||||
try:
|
||||
with script.open("rb") as f:
|
||||
if f.read(2) != b"#!":
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
st = script.stat(); mode = st.st_mode
|
||||
if mode & 0o111:
|
||||
continue
|
||||
new_mode = mode
|
||||
if mode & 0o400: new_mode |= 0o100
|
||||
if mode & 0o040: new_mode |= 0o010
|
||||
if mode & 0o004: new_mode |= 0o001
|
||||
if not (new_mode & 0o100):
|
||||
new_mode |= 0o100
|
||||
os.chmod(script, new_mode)
|
||||
updated += 1
|
||||
except Exception as e:
|
||||
failures.append(f"{script.relative_to(scripts_root)}: {e}")
|
||||
if tracker:
|
||||
detail = f"{updated} updated" + (f", {len(failures)} failed" if failures else "")
|
||||
tracker.add("chmod", "Set script permissions recursively")
|
||||
(tracker.error if failures else tracker.complete)("chmod", detail)
|
||||
else:
|
||||
if updated:
|
||||
console.print(f"[cyan]Updated execute permissions on {updated} script(s) recursively[/cyan]")
|
||||
if failures:
|
||||
console.print("[yellow]Some scripts could not be updated:[/yellow]")
|
||||
for f in failures:
|
||||
console.print(f" - {f}")
|
||||
|
||||
|
||||
def _create_apm_structure(project_path: Path, project_name: str, ai_assistant: str = "copilot") -> None:
|
||||
"""Create APM structure in the project directory."""
|
||||
# Copy APM template files
|
||||
template_source = Path(__file__).parent.parent.parent / "templates" / "apm" / "hello-world"
|
||||
|
||||
if not template_source.exists():
|
||||
raise FileNotFoundError(f"APM template not found at {template_source}")
|
||||
|
||||
# Copy APM files to project root
|
||||
files_to_copy = [
|
||||
"apm.yml",
|
||||
"hello-world.prompt.md",
|
||||
"feature-implementation.prompt.md",
|
||||
"README.md"
|
||||
]
|
||||
|
||||
for file_name in files_to_copy:
|
||||
src_file = template_source / file_name
|
||||
if src_file.exists():
|
||||
shutil.copy2(src_file, project_path / file_name)
|
||||
|
||||
# Copy .apm directory
|
||||
apm_src = template_source / ".apm"
|
||||
apm_dst = project_path / ".apm"
|
||||
if apm_src.exists():
|
||||
shutil.copytree(apm_src, apm_dst, dirs_exist_ok=True)
|
||||
|
||||
# Update apm.yml with proper template variable replacement
|
||||
apm_yml = project_path / "apm.yml"
|
||||
if apm_yml.exists():
|
||||
content = apm_yml.read_text()
|
||||
|
||||
# Replace template variables with actual values
|
||||
replacements = {
|
||||
"{{project_name}}": project_name,
|
||||
"{{version}}": "1.0.0",
|
||||
"{{description}}": f"AI-native project powered by {ai_assistant}",
|
||||
"{{author}}": "Developer",
|
||||
"hello-world": project_name # Also replace any hello-world references
|
||||
}
|
||||
|
||||
for placeholder, value in replacements.items():
|
||||
content = content.replace(placeholder, value)
|
||||
|
||||
apm_yml.write_text(content)
|
||||
|
||||
|
||||
@app.command()
|
||||
def init(
|
||||
project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here)"),
|
||||
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, or copilot"),
|
||||
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, copilot, or cursor"),
|
||||
script_type: str = typer.Option(None, "--script", help="Script type to use: sh or ps"),
|
||||
ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"),
|
||||
no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"),
|
||||
here: bool = typer.Option(False, "--here", help="Initialize project in the current directory instead of creating a new one"),
|
||||
skip_tls: bool = typer.Option(False, "--skip-tls", help="Skip SSL/TLS verification (not recommended)"),
|
||||
debug: bool = typer.Option(False, "--debug", help="Show verbose diagnostic output for network and extraction failures"),
|
||||
use_apm: bool = typer.Option(False, "--use-apm", help="Include APM (Agent Package Manager) structure for context management"),
|
||||
):
|
||||
"""
|
||||
Initialize a new Specify project from the latest template.
|
||||
|
||||
This command will:
|
||||
1. Check that required tools are installed (git is optional)
|
||||
2. Let you choose your AI assistant (Claude Code, Gemini CLI, or GitHub Copilot)
|
||||
2. Let you choose your AI assistant (Claude Code, Gemini CLI, GitHub Copilot, or Cursor)
|
||||
3. Download the appropriate template from GitHub
|
||||
4. Extract the template to a new project directory or current directory
|
||||
5. Initialize a fresh git repository (if not --no-git and no existing repo)
|
||||
6. Optionally set up AI assistant commands
|
||||
7. Optionally include APM support (with --use-apm flag)
|
||||
|
||||
Examples:
|
||||
specify init my-project
|
||||
specify init my-project --ai claude
|
||||
specify init my-project --ai gemini
|
||||
specify init my-project --ai gemini --use-apm
|
||||
specify init my-project --ai copilot --no-git
|
||||
specify init my-project --ai cursor --use-apm
|
||||
specify init --ignore-agent-tools my-project
|
||||
specify init --here --ai claude
|
||||
specify init --here
|
||||
specify init --here --use-apm
|
||||
"""
|
||||
# Show banner first
|
||||
show_banner()
|
||||
@@ -737,13 +1124,30 @@ def init(
|
||||
if not check_tool("gemini", "Install from: https://github.com/google-gemini/gemini-cli"):
|
||||
console.print("[red]Error:[/red] Gemini CLI is required for Gemini projects")
|
||||
agent_tool_missing = True
|
||||
# GitHub Copilot check is not needed as it's typically available in supported IDEs
|
||||
|
||||
|
||||
if agent_tool_missing:
|
||||
console.print("\n[red]Required AI tool is missing![/red]")
|
||||
console.print("[yellow]Tip:[/yellow] Use --ignore-agent-tools to skip this check")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Determine script type (explicit, interactive, or OS default)
|
||||
if script_type:
|
||||
if script_type not in SCRIPT_TYPE_CHOICES:
|
||||
console.print(f"[red]Error:[/red] Invalid script type '{script_type}'. Choose from: {', '.join(SCRIPT_TYPE_CHOICES.keys())}")
|
||||
raise typer.Exit(1)
|
||||
selected_script = script_type
|
||||
else:
|
||||
# Auto-detect default
|
||||
default_script = "ps" if os.name == "nt" else "sh"
|
||||
# Provide interactive selection similar to AI if stdin is a TTY
|
||||
if sys.stdin.isatty():
|
||||
selected_script = select_with_arrows(SCRIPT_TYPE_CHOICES, "Choose script type (or press Enter)", default_script)
|
||||
else:
|
||||
selected_script = default_script
|
||||
|
||||
console.print(f"[cyan]Selected AI assistant:[/cyan] {selected_ai}")
|
||||
console.print(f"[cyan]Selected script type:[/cyan] {selected_script}")
|
||||
|
||||
# Download and set up project
|
||||
# New tree-based progress (no emojis); include earlier substeps
|
||||
tracker = StepTracker("Initialize Specify Project")
|
||||
@@ -754,12 +1158,16 @@ def init(
|
||||
tracker.complete("precheck", "ok")
|
||||
tracker.add("ai-select", "Select AI assistant")
|
||||
tracker.complete("ai-select", f"{selected_ai}")
|
||||
tracker.add("script-select", "Select script type")
|
||||
tracker.complete("script-select", selected_script)
|
||||
for key, label in [
|
||||
("fetch", "Fetch latest release"),
|
||||
("download", "Download template"),
|
||||
("extract", "Extract template"),
|
||||
("zip-list", "Archive contents"),
|
||||
("extracted-summary", "Extraction summary"),
|
||||
("apm", "Create APM structure"),
|
||||
("chmod", "Ensure scripts executable"),
|
||||
("cleanup", "Cleanup"),
|
||||
("git", "Initialize git repository"),
|
||||
("final", "Finalize")
|
||||
@@ -770,7 +1178,26 @@ def init(
|
||||
with Live(tracker.render(), console=console, refresh_per_second=8, transient=True) as live:
|
||||
tracker.attach_refresh(lambda: live.update(tracker.render()))
|
||||
try:
|
||||
download_and_extract_template(project_path, selected_ai, here, verbose=False, tracker=tracker)
|
||||
# Create a httpx client with verify based on skip_tls
|
||||
verify = not skip_tls
|
||||
local_ssl_context = ssl_context if verify else False
|
||||
local_client = httpx.Client(verify=local_ssl_context)
|
||||
|
||||
download_and_extract_template(project_path, selected_ai, selected_script, here, verbose=False, tracker=tracker, client=local_client, debug=debug)
|
||||
|
||||
# APM structure creation (conditional)
|
||||
if use_apm:
|
||||
tracker.start("apm", "setting up APM structure")
|
||||
try:
|
||||
_create_apm_structure(project_path, project_path.name, selected_ai)
|
||||
tracker.complete("apm", "APM structure created")
|
||||
except Exception as e:
|
||||
tracker.error("apm", f"APM setup failed: {str(e)}")
|
||||
else:
|
||||
tracker.skip("apm", "APM not requested")
|
||||
|
||||
# Ensure scripts are executable (POSIX)
|
||||
ensure_executable_scripts(project_path, tracker=tracker)
|
||||
|
||||
# Git step
|
||||
if not no_git:
|
||||
@@ -790,6 +1217,16 @@ def init(
|
||||
tracker.complete("final", "project ready")
|
||||
except Exception as e:
|
||||
tracker.error("final", str(e))
|
||||
console.print(Panel(f"Initialization failed: {e}", title="Failure", border_style="red"))
|
||||
if debug:
|
||||
_env_pairs = [
|
||||
("Python", sys.version.split()[0]),
|
||||
("Platform", sys.platform),
|
||||
("CWD", str(Path.cwd())),
|
||||
]
|
||||
_label_width = max(len(k) for k, _ in _env_pairs)
|
||||
env_lines = [f"{k.ljust(_label_width)} → [bright_black]{v}[/bright_black]" for k, v in _env_pairs]
|
||||
console.print(Panel("\n".join(env_lines), title="Debug Environment", border_style="magenta"))
|
||||
if not here and project_path.exists():
|
||||
shutil.rmtree(project_path)
|
||||
raise typer.Exit(1)
|
||||
@@ -820,12 +1257,22 @@ def init(
|
||||
steps_lines.append(f"{step_num}. Use / commands with Gemini CLI")
|
||||
steps_lines.append(" - Run gemini /specify to create specifications")
|
||||
steps_lines.append(" - Run gemini /plan to create implementation plans")
|
||||
steps_lines.append(" - Run gemini /tasks to generate tasks")
|
||||
steps_lines.append(" - See GEMINI.md for all available commands")
|
||||
elif selected_ai == "copilot":
|
||||
steps_lines.append(f"{step_num}. Open in Visual Studio Code and use [bold cyan]/specify[/], [bold cyan]/plan[/], [bold cyan]/tasks[/] commands with GitHub Copilot")
|
||||
|
||||
# Removed script variant step (scripts are transparent to users)
|
||||
step_num += 1
|
||||
steps_lines.append(f"{step_num}. Update [bold magenta]CONSTITUTION.md[/bold magenta] with your project's non-negotiable principles")
|
||||
|
||||
# Add APM-specific next steps if APM was enabled
|
||||
if use_apm:
|
||||
step_num += 1
|
||||
steps_lines.append(f"{step_num}. Use APM commands to manage your project context:")
|
||||
steps_lines.append(" - [bold cyan]specify apm compile[/bold cyan] - Generate AGENTS.md from APM instructions and packages")
|
||||
steps_lines.append(" - [bold cyan]specify apm install[/bold cyan] - Install APM packages")
|
||||
steps_lines.append(" - [bold cyan]specify apm deps list[/bold cyan] - List installed APM packages")
|
||||
|
||||
steps_panel = Panel("\n".join(steps_lines), title="Next steps", border_style="cyan", padding=(1,2))
|
||||
console.print() # blank line
|
||||
@@ -838,29 +1285,39 @@ def init(
|
||||
def check():
|
||||
"""Check that all required tools are installed."""
|
||||
show_banner()
|
||||
console.print("[bold]Checking Specify requirements...[/bold]\n")
|
||||
console.print("[bold]Checking for installed tools...[/bold]\n")
|
||||
|
||||
# Create tracker for checking tools
|
||||
tracker = StepTracker("Check Available Tools")
|
||||
|
||||
# Check if we have internet connectivity by trying to reach GitHub API
|
||||
console.print("[cyan]Checking internet connectivity...[/cyan]")
|
||||
try:
|
||||
response = httpx.get("https://api.github.com", timeout=5, follow_redirects=True)
|
||||
console.print("[green]✓[/green] Internet connection available")
|
||||
except httpx.RequestError:
|
||||
console.print("[red]✗[/red] No internet connection - required for downloading templates")
|
||||
console.print("[yellow]Please check your internet connection[/yellow]")
|
||||
# Add all tools we want to check
|
||||
tracker.add("git", "Git version control")
|
||||
tracker.add("claude", "Claude Code CLI")
|
||||
tracker.add("gemini", "Gemini CLI")
|
||||
tracker.add("code", "VS Code (for GitHub Copilot)")
|
||||
tracker.add("cursor-agent", "Cursor IDE agent (optional)")
|
||||
|
||||
console.print("\n[cyan]Optional tools:[/cyan]")
|
||||
git_ok = check_tool("git", "https://git-scm.com/downloads")
|
||||
# Check each tool
|
||||
git_ok = check_tool_for_tracker("git", "https://git-scm.com/downloads", tracker)
|
||||
claude_ok = check_tool_for_tracker("claude", "https://docs.anthropic.com/en/docs/claude-code/setup", tracker)
|
||||
gemini_ok = check_tool_for_tracker("gemini", "https://github.com/google-gemini/gemini-cli", tracker)
|
||||
# Check for VS Code (code or code-insiders)
|
||||
code_ok = check_tool_for_tracker("code", "https://code.visualstudio.com/", tracker)
|
||||
if not code_ok:
|
||||
code_ok = check_tool_for_tracker("code-insiders", "https://code.visualstudio.com/insiders/", tracker)
|
||||
cursor_ok = check_tool_for_tracker("cursor-agent", "https://cursor.sh/", tracker)
|
||||
|
||||
console.print("\n[cyan]Optional AI tools:[/cyan]")
|
||||
claude_ok = check_tool("claude", "Install from: https://docs.anthropic.com/en/docs/claude-code/setup")
|
||||
gemini_ok = check_tool("gemini", "Install from: https://github.com/google-gemini/gemini-cli")
|
||||
# Render the final tree
|
||||
console.print(tracker.render())
|
||||
|
||||
console.print("\n[green]✓ Specify CLI is ready to use![/green]")
|
||||
# Summary
|
||||
console.print("\n[bold green]Specify CLI is ready to use![/bold green]")
|
||||
|
||||
# Recommendations
|
||||
if not git_ok:
|
||||
console.print("[yellow]Consider installing git for repository management[/yellow]")
|
||||
console.print("[dim]Tip: Install git for repository management[/dim]")
|
||||
if not (claude_ok or gemini_ok):
|
||||
console.print("[yellow]Consider installing an AI assistant for the best experience[/yellow]")
|
||||
console.print("[dim]Tip: Install an AI assistant for the best experience[/dim]")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
9
templates/apm/hello-world/apm.yml
Normal file
9
templates/apm/hello-world/apm.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
name: {{project_name}}
|
||||
version: {{version}}
|
||||
description: {{description}}
|
||||
author: {{author}}
|
||||
|
||||
dependencies:
|
||||
apm:
|
||||
# list of APM packages as GitHub repositories: <owner>/<repo>
|
||||
# - github/design-guidelines
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user