Compare commits
13 Commits
v0.0.55
...
add-apm-in
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6cae496a9 | ||
|
|
f9dc5f63b9 | ||
|
|
229193e488 | ||
|
|
6a2f1950ae | ||
|
|
b5092a9dba | ||
|
|
fd77f82760 | ||
|
|
a9512e00fc | ||
|
|
794515d242 | ||
|
|
93bf878908 | ||
|
|
d501ed6939 | ||
|
|
52da4ce9d5 | ||
|
|
6e4f287913 | ||
|
|
9d449539bb |
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -1,2 +1,7 @@
|
|||||||
# Global code owner
|
# Global code owner
|
||||||
* @localden
|
* @localden
|
||||||
|
|
||||||
|
# APM CLI code owner
|
||||||
|
src/apm_cli/ @danielmeppiel
|
||||||
|
templates/apm/ @danielmeppiel
|
||||||
|
docs/context-management.md @danielmeppiel
|
||||||
|
|||||||
92
.github/workflows/release.yml
vendored
92
.github/workflows/release.yml
vendored
@@ -25,13 +25,33 @@ jobs:
|
|||||||
- name: Get latest tag
|
- name: Get latest tag
|
||||||
id: get_tag
|
id: get_tag
|
||||||
run: |
|
run: |
|
||||||
chmod +x .github/workflows/scripts/get-next-version.sh
|
# Get the latest tag, or use v0.0.0 if no tags exist
|
||||||
.github/workflows/scripts/get-next-version.sh
|
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||||
|
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Extract version number and increment
|
||||||
|
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||||
|
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||||
|
MAJOR=${VERSION_PARTS[0]:-0}
|
||||||
|
MINOR=${VERSION_PARTS[1]:-0}
|
||||||
|
PATCH=${VERSION_PARTS[2]:-0}
|
||||||
|
|
||||||
|
# Increment patch version
|
||||||
|
PATCH=$((PATCH + 1))
|
||||||
|
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
||||||
|
|
||||||
|
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||||
|
echo "New version will be: $NEW_VERSION"
|
||||||
- name: Check if release already exists
|
- name: Check if release already exists
|
||||||
id: check_release
|
id: check_release
|
||||||
run: |
|
run: |
|
||||||
chmod +x .github/workflows/scripts/check-release-exists.sh
|
if gh release view ${{ steps.get_tag.outputs.new_version }} >/dev/null 2>&1; then
|
||||||
.github/workflows/scripts/check-release-exists.sh ${{ steps.get_tag.outputs.new_version }}
|
echo "exists=true" >> $GITHUB_OUTPUT
|
||||||
|
echo "Release ${{ steps.get_tag.outputs.new_version }} already exists, skipping..."
|
||||||
|
else
|
||||||
|
echo "exists=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "Release ${{ steps.get_tag.outputs.new_version }} does not exist, proceeding..."
|
||||||
|
fi
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Create release package variants
|
- name: Create release package variants
|
||||||
@@ -43,17 +63,69 @@ jobs:
|
|||||||
if: steps.check_release.outputs.exists == 'false'
|
if: steps.check_release.outputs.exists == 'false'
|
||||||
id: release_notes
|
id: release_notes
|
||||||
run: |
|
run: |
|
||||||
chmod +x .github/workflows/scripts/generate-release-notes.sh
|
# Get commits since last tag
|
||||||
.github/workflows/scripts/generate-release-notes.sh ${{ steps.get_tag.outputs.new_version }} ${{ steps.get_tag.outputs.latest_tag }}
|
LAST_TAG=${{ steps.get_tag.outputs.latest_tag }}
|
||||||
|
if [ "$LAST_TAG" = "v0.0.0" ]; then
|
||||||
|
# Check how many commits we have and use that as the limit
|
||||||
|
COMMIT_COUNT=$(git rev-list --count HEAD)
|
||||||
|
if [ "$COMMIT_COUNT" -gt 10 ]; then
|
||||||
|
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD)
|
||||||
|
else
|
||||||
|
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s")
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create release notes
|
||||||
|
cat > release_notes.md << EOF
|
||||||
|
Template release ${{ steps.get_tag.outputs.new_version }}
|
||||||
|
|
||||||
|
Updated specification-driven development templates for GitHub Copilot, Claude Code, Gemini CLI, and Cursor.
|
||||||
|
|
||||||
|
Now includes per-script variants for POSIX shell (sh) and PowerShell (ps).
|
||||||
|
|
||||||
|
Download the template for your preferred AI assistant + script type:
|
||||||
|
- spec-kit-template-copilot-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-copilot-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-claude-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-claude-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-gemini-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-gemini-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-cursor-sh-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
- spec-kit-template-cursor-ps-${{ steps.get_tag.outputs.new_version }}.zip
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Generated release notes:"
|
||||||
|
cat release_notes.md
|
||||||
- name: Create GitHub Release
|
- name: Create GitHub Release
|
||||||
if: steps.check_release.outputs.exists == 'false'
|
if: steps.check_release.outputs.exists == 'false'
|
||||||
run: |
|
run: |
|
||||||
chmod +x .github/workflows/scripts/create-github-release.sh
|
# Remove 'v' prefix from version for release title
|
||||||
.github/workflows/scripts/create-github-release.sh ${{ steps.get_tag.outputs.new_version }}
|
VERSION_NO_V=${{ steps.get_tag.outputs.new_version }}
|
||||||
|
VERSION_NO_V=${VERSION_NO_V#v}
|
||||||
|
|
||||||
|
gh release create ${{ steps.get_tag.outputs.new_version }} \
|
||||||
|
spec-kit-template-copilot-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-copilot-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-claude-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-claude-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-gemini-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-gemini-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-cursor-sh-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
spec-kit-template-cursor-ps-${{ steps.get_tag.outputs.new_version }}.zip \
|
||||||
|
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||||
|
--notes-file release_notes.md
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Update version in pyproject.toml (for release artifacts only)
|
- name: Update version in pyproject.toml (for release artifacts only)
|
||||||
if: steps.check_release.outputs.exists == 'false'
|
if: steps.check_release.outputs.exists == 'false'
|
||||||
run: |
|
run: |
|
||||||
chmod +x .github/workflows/scripts/update-version.sh
|
# Update version in pyproject.toml (remove 'v' prefix for Python versioning)
|
||||||
.github/workflows/scripts/update-version.sh ${{ steps.get_tag.outputs.new_version }}
|
VERSION=${{ steps.get_tag.outputs.new_version }}
|
||||||
|
PYTHON_VERSION=${VERSION#v}
|
||||||
|
|
||||||
|
if [ -f "pyproject.toml" ]; then
|
||||||
|
sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml
|
||||||
|
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
|
||||||
|
fi
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# check-release-exists.sh
|
|
||||||
# Check if a GitHub release already exists for the given version
|
|
||||||
# Usage: check-release-exists.sh <version>
|
|
||||||
|
|
||||||
if [[ $# -ne 1 ]]; then
|
|
||||||
echo "Usage: $0 <version>" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
VERSION="$1"
|
|
||||||
|
|
||||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
|
||||||
echo "exists=true" >> $GITHUB_OUTPUT
|
|
||||||
echo "Release $VERSION already exists, skipping..."
|
|
||||||
else
|
|
||||||
echo "exists=false" >> $GITHUB_OUTPUT
|
|
||||||
echo "Release $VERSION does not exist, proceeding..."
|
|
||||||
fi
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# create-github-release.sh
|
|
||||||
# Create a GitHub release with all template zip files
|
|
||||||
# Usage: create-github-release.sh <version>
|
|
||||||
|
|
||||||
if [[ $# -ne 1 ]]; then
|
|
||||||
echo "Usage: $0 <version>" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
VERSION="$1"
|
|
||||||
|
|
||||||
# Remove 'v' prefix from version for release title
|
|
||||||
VERSION_NO_V=${VERSION#v}
|
|
||||||
|
|
||||||
gh release create "$VERSION" \
|
|
||||||
.genreleases/spec-kit-template-copilot-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-copilot-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-claude-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-claude-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-gemini-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-gemini-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-cursor-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-cursor-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-opencode-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-opencode-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-qwen-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-qwen-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-windsurf-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-windsurf-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-codex-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-codex-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-kilocode-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-kilocode-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-auggie-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-auggie-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-roo-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-roo-ps-"$VERSION".zip \
|
|
||||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
|
||||||
--notes-file release_notes.md
|
|
||||||
@@ -6,7 +6,7 @@ set -euo pipefail
|
|||||||
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
||||||
# Version argument should include leading 'v'.
|
# Version argument should include leading 'v'.
|
||||||
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
||||||
# AGENTS : space or comma separated subset of: claude gemini copilot cursor qwen opencode windsurf codex (default: all)
|
# AGENTS : space or comma separated subset of: claude gemini copilot (default: all)
|
||||||
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
||||||
# Examples:
|
# Examples:
|
||||||
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
||||||
@@ -25,10 +25,7 @@ fi
|
|||||||
|
|
||||||
echo "Building release packages for $NEW_VERSION"
|
echo "Building release packages for $NEW_VERSION"
|
||||||
|
|
||||||
# Create and use .genreleases directory for all build artifacts
|
rm -rf sdd-package-base* sdd-*-package-* spec-kit-template-*-${NEW_VERSION}.zip || true
|
||||||
GENRELEASES_DIR=".genreleases"
|
|
||||||
mkdir -p "$GENRELEASES_DIR"
|
|
||||||
rm -rf "$GENRELEASES_DIR"/* || true
|
|
||||||
|
|
||||||
rewrite_paths() {
|
rewrite_paths() {
|
||||||
sed -E \
|
sed -E \
|
||||||
@@ -85,7 +82,7 @@ generate_commands() {
|
|||||||
|
|
||||||
build_variant() {
|
build_variant() {
|
||||||
local agent=$1 script=$2
|
local agent=$1 script=$2
|
||||||
local base_dir="$GENRELEASES_DIR/sdd-${agent}-package-${script}"
|
local base_dir="sdd-${agent}-package-${script}"
|
||||||
echo "Building $agent ($script) package..."
|
echo "Building $agent ($script) package..."
|
||||||
mkdir -p "$base_dir"
|
mkdir -p "$base_dir"
|
||||||
|
|
||||||
@@ -117,25 +114,14 @@ build_variant() {
|
|||||||
local plan_tpl="$base_dir/.specify/templates/plan-template.md"
|
local plan_tpl="$base_dir/.specify/templates/plan-template.md"
|
||||||
if [[ -f "$plan_tpl" ]]; then
|
if [[ -f "$plan_tpl" ]]; then
|
||||||
plan_norm=$(tr -d '\r' < "$plan_tpl")
|
plan_norm=$(tr -d '\r' < "$plan_tpl")
|
||||||
# Extract script command from YAML frontmatter
|
variant_line=$(printf '%s\n' "$plan_norm" | grep -E "<!--[[:space:]]*VARIANT:$script" | head -1 | sed -E "s/.*VARIANT:$script[[:space:]]+//; s/-->.*//; s/^[[:space:]]+//; s/[[:space:]]+$//")
|
||||||
script_command=$(printf '%s\n' "$plan_norm" | awk -v sv="$script" '/^[[:space:]]*'"$script"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script"':[[:space:]]*/, ""); print; exit}')
|
if [[ -n $variant_line ]]; then
|
||||||
if [[ -n $script_command ]]; then
|
tmp_file=$(mktemp)
|
||||||
# Always prefix with .specify/ for plan usage
|
sed "s|VARIANT-INJECT|${variant_line}|" "$plan_tpl" | tr -d '\r' | sed "s|__AGENT__|${agent}|g" | sed '/<!--[[:space:]]*VARIANT:sh/d' | sed '/<!--[[:space:]]*VARIANT:ps/d' > "$tmp_file" && mv "$tmp_file" "$plan_tpl"
|
||||||
script_command=".specify/$script_command"
|
|
||||||
# Replace {SCRIPT} placeholder with the script command and __AGENT__ with agent name
|
|
||||||
substituted=$(sed "s|{SCRIPT}|${script_command}|g" "$plan_tpl" | tr -d '\r' | sed "s|__AGENT__|${agent}|g")
|
|
||||||
# Strip YAML frontmatter from plan template output (keep body only)
|
|
||||||
stripped=$(printf '%s\n' "$substituted" | awk 'BEGIN{fm=0;dash=0} /^---$/ {dash++; if(dash==1){fm=1; next} else if(dash==2){fm=0; next}} {if(!fm) print}')
|
|
||||||
printf '%s\n' "$stripped" > "$plan_tpl"
|
|
||||||
else
|
else
|
||||||
echo "Warning: no plan-template script command found for $script in YAML frontmatter" >&2
|
echo "Warning: no plan-template variant for $script (pattern not matched)" >&2
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
# NOTE: We substitute {ARGS} internally. Outward tokens differ intentionally:
|
|
||||||
# * Markdown/prompt (claude, copilot, cursor, opencode): $ARGUMENTS
|
|
||||||
# * TOML (gemini, qwen): {{args}}
|
|
||||||
# This keeps formats readable without extra abstraction.
|
|
||||||
|
|
||||||
case $agent in
|
case $agent in
|
||||||
claude)
|
claude)
|
||||||
mkdir -p "$base_dir/.claude/commands"
|
mkdir -p "$base_dir/.claude/commands"
|
||||||
@@ -150,49 +136,26 @@ build_variant() {
|
|||||||
cursor)
|
cursor)
|
||||||
mkdir -p "$base_dir/.cursor/commands"
|
mkdir -p "$base_dir/.cursor/commands"
|
||||||
generate_commands cursor md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
generate_commands cursor md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
||||||
qwen)
|
|
||||||
mkdir -p "$base_dir/.qwen/commands"
|
|
||||||
generate_commands qwen toml "{{args}}" "$base_dir/.qwen/commands" "$script"
|
|
||||||
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
|
||||||
opencode)
|
|
||||||
mkdir -p "$base_dir/.opencode/command"
|
|
||||||
generate_commands opencode md "\$ARGUMENTS" "$base_dir/.opencode/command" "$script" ;;
|
|
||||||
windsurf)
|
|
||||||
mkdir -p "$base_dir/.windsurf/workflows"
|
|
||||||
generate_commands windsurf md "\$ARGUMENTS" "$base_dir/.windsurf/workflows" "$script" ;;
|
|
||||||
codex)
|
|
||||||
mkdir -p "$base_dir/.codex/prompts"
|
|
||||||
generate_commands codex md "\$ARGUMENTS" "$base_dir/.codex/prompts" "$script" ;;
|
|
||||||
kilocode)
|
|
||||||
mkdir -p "$base_dir/.kilocode/workflows"
|
|
||||||
generate_commands kilocode md "\$ARGUMENTS" "$base_dir/.kilocode/workflows" "$script" ;;
|
|
||||||
auggie)
|
|
||||||
mkdir -p "$base_dir/.augment/commands"
|
|
||||||
generate_commands auggie md "\$ARGUMENTS" "$base_dir/.augment/commands" "$script" ;;
|
|
||||||
roo)
|
|
||||||
mkdir -p "$base_dir/.roo/commands"
|
|
||||||
generate_commands roo md "\$ARGUMENTS" "$base_dir/.roo/commands" "$script" ;;
|
|
||||||
esac
|
esac
|
||||||
( cd "$base_dir" && zip -r "../spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip" . )
|
( cd "$base_dir" && zip -r "../spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip" . )
|
||||||
echo "Created $GENRELEASES_DIR/spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip"
|
echo "Created spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Determine agent list
|
# Determine agent list
|
||||||
ALL_AGENTS=(claude gemini copilot cursor qwen opencode windsurf codex kilocode auggie roo)
|
ALL_AGENTS=(claude gemini copilot cursor)
|
||||||
ALL_SCRIPTS=(sh ps)
|
ALL_SCRIPTS=(sh ps)
|
||||||
|
|
||||||
|
|
||||||
norm_list() {
|
norm_list() {
|
||||||
# convert comma+space separated -> space separated unique while preserving order of first occurrence
|
# convert comma+space separated -> space separated unique while preserving order of first occurrence
|
||||||
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?" ":"") $i)}}}END{printf("\n")}'
|
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?" ":"") $i)}}}END{printf("\n")}'
|
||||||
}
|
}
|
||||||
|
|
||||||
validate_subset() {
|
validate_subset() {
|
||||||
local type=$1; shift; local -n allowed=$1; shift; local items=("$@")
|
local type=$1; shift; local -n allowed=$1; shift; local items=($@)
|
||||||
local ok=1
|
local ok=1
|
||||||
for it in "${items[@]}"; do
|
for it in "${items[@]}"; do
|
||||||
local found=0
|
local found=0
|
||||||
for a in "${allowed[@]}"; do [[ $it == "$a" ]] && { found=1; break; }; done
|
for a in "${allowed[@]}"; do [[ $it == $a ]] && { found=1; break; }; done
|
||||||
if [[ $found -eq 0 ]]; then
|
if [[ $found -eq 0 ]]; then
|
||||||
echo "Error: unknown $type '$it' (allowed: ${allowed[*]})" >&2
|
echo "Error: unknown $type '$it' (allowed: ${allowed[*]})" >&2
|
||||||
ok=0
|
ok=0
|
||||||
@@ -202,17 +165,17 @@ validate_subset() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if [[ -n ${AGENTS:-} ]]; then
|
if [[ -n ${AGENTS:-} ]]; then
|
||||||
mapfile -t AGENT_LIST < <(printf '%s' "$AGENTS" | norm_list)
|
AGENT_LIST=($(printf '%s' "$AGENTS" | norm_list))
|
||||||
validate_subset agent ALL_AGENTS "${AGENT_LIST[@]}" || exit 1
|
validate_subset agent ALL_AGENTS "${AGENT_LIST[@]}" || exit 1
|
||||||
else
|
else
|
||||||
AGENT_LIST=("${ALL_AGENTS[@]}")
|
AGENT_LIST=(${ALL_AGENTS[@]})
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n ${SCRIPTS:-} ]]; then
|
if [[ -n ${SCRIPTS:-} ]]; then
|
||||||
mapfile -t SCRIPT_LIST < <(printf '%s' "$SCRIPTS" | norm_list)
|
SCRIPT_LIST=($(printf '%s' "$SCRIPTS" | norm_list))
|
||||||
validate_subset script ALL_SCRIPTS "${SCRIPT_LIST[@]}" || exit 1
|
validate_subset script ALL_SCRIPTS "${SCRIPT_LIST[@]}" || exit 1
|
||||||
else
|
else
|
||||||
SCRIPT_LIST=("${ALL_SCRIPTS[@]}")
|
SCRIPT_LIST=(${ALL_SCRIPTS[@]})
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Agents: ${AGENT_LIST[*]}"
|
echo "Agents: ${AGENT_LIST[*]}"
|
||||||
@@ -224,5 +187,5 @@ for agent in "${AGENT_LIST[@]}"; do
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Archives in $GENRELEASES_DIR:"
|
echo "Archives:"
|
||||||
ls -1 "$GENRELEASES_DIR"/spec-kit-template-*-"${NEW_VERSION}".zip
|
ls -1 spec-kit-template-*-${NEW_VERSION}.zip
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# generate-release-notes.sh
|
|
||||||
# Generate release notes from git history
|
|
||||||
# Usage: generate-release-notes.sh <new_version> <last_tag>
|
|
||||||
|
|
||||||
if [[ $# -ne 2 ]]; then
|
|
||||||
echo "Usage: $0 <new_version> <last_tag>" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
NEW_VERSION="$1"
|
|
||||||
LAST_TAG="$2"
|
|
||||||
|
|
||||||
# Get commits since last tag
|
|
||||||
if [ "$LAST_TAG" = "v0.0.0" ]; then
|
|
||||||
# Check how many commits we have and use that as the limit
|
|
||||||
COMMIT_COUNT=$(git rev-list --count HEAD)
|
|
||||||
if [ "$COMMIT_COUNT" -gt 10 ]; then
|
|
||||||
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD)
|
|
||||||
else
|
|
||||||
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s")
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create release notes
|
|
||||||
cat > release_notes.md << EOF
|
|
||||||
This is the latest set of releases that you can use with your agent of choice. We recommend using the Specify CLI to scaffold your projects, however you can download these independently and manage them yourself.
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "Generated release notes:"
|
|
||||||
cat release_notes.md
|
|
||||||
24
.github/workflows/scripts/get-next-version.sh
vendored
24
.github/workflows/scripts/get-next-version.sh
vendored
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# get-next-version.sh
|
|
||||||
# Calculate the next version based on the latest git tag and output GitHub Actions variables
|
|
||||||
# Usage: get-next-version.sh
|
|
||||||
|
|
||||||
# Get the latest tag, or use v0.0.0 if no tags exist
|
|
||||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
|
||||||
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# Extract version number and increment
|
|
||||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
|
||||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
|
||||||
MAJOR=${VERSION_PARTS[0]:-0}
|
|
||||||
MINOR=${VERSION_PARTS[1]:-0}
|
|
||||||
PATCH=${VERSION_PARTS[2]:-0}
|
|
||||||
|
|
||||||
# Increment patch version
|
|
||||||
PATCH=$((PATCH + 1))
|
|
||||||
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
|
||||||
|
|
||||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
|
||||||
echo "New version will be: $NEW_VERSION"
|
|
||||||
23
.github/workflows/scripts/update-version.sh
vendored
23
.github/workflows/scripts/update-version.sh
vendored
@@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# update-version.sh
|
|
||||||
# Update version in pyproject.toml (for release artifacts only)
|
|
||||||
# Usage: update-version.sh <version>
|
|
||||||
|
|
||||||
if [[ $# -ne 1 ]]; then
|
|
||||||
echo "Usage: $0 <version>" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
VERSION="$1"
|
|
||||||
|
|
||||||
# Remove 'v' prefix for Python versioning
|
|
||||||
PYTHON_VERSION=${VERSION#v}
|
|
||||||
|
|
||||||
if [ -f "pyproject.toml" ]; then
|
|
||||||
sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml
|
|
||||||
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
|
|
||||||
else
|
|
||||||
echo "Warning: pyproject.toml not found, skipping version update"
|
|
||||||
fi
|
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -38,8 +38,3 @@ env/
|
|||||||
.env
|
.env
|
||||||
.env.local
|
.env.local
|
||||||
*.lock
|
*.lock
|
||||||
|
|
||||||
# Spec Kit-specific files
|
|
||||||
.genreleases/
|
|
||||||
*.zip
|
|
||||||
sdd-*/
|
|
||||||
272
AGENTS.md
272
AGENTS.md
@@ -1,272 +0,0 @@
|
|||||||
# AGENTS.md
|
|
||||||
|
|
||||||
## About Spec Kit and Specify
|
|
||||||
|
|
||||||
**GitHub Spec Kit** is a comprehensive toolkit for implementing Spec-Driven Development (SDD) - a methodology that emphasizes creating clear specifications before implementation. The toolkit includes templates, scripts, and workflows that guide development teams through a structured approach to building software.
|
|
||||||
|
|
||||||
**Specify CLI** is the command-line interface that bootstraps projects with the Spec Kit framework. It sets up the necessary directory structures, templates, and AI agent integrations to support the Spec-Driven Development workflow.
|
|
||||||
|
|
||||||
The toolkit supports multiple AI coding assistants, allowing teams to use their preferred tools while maintaining consistent project structure and development practices.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## General practices
|
|
||||||
|
|
||||||
- Any changes to `__init__.py` for the Specify CLI require a version rev in `pyproject.toml` and addition of entries to `CHANGELOG.md`.
|
|
||||||
|
|
||||||
## Adding New Agent Support
|
|
||||||
|
|
||||||
This section explains how to add support for new AI agents/assistants to the Specify CLI. Use this guide as a reference when integrating new AI tools into the Spec-Driven Development workflow.
|
|
||||||
|
|
||||||
### Overview
|
|
||||||
|
|
||||||
Specify supports multiple AI agents by generating agent-specific command files and directory structures when initializing projects. Each agent has its own conventions for:
|
|
||||||
|
|
||||||
- **Command file formats** (Markdown, TOML, etc.)
|
|
||||||
- **Directory structures** (`.claude/commands/`, `.windsurf/workflows/`, etc.)
|
|
||||||
- **Command invocation patterns** (slash commands, CLI tools, etc.)
|
|
||||||
- **Argument passing conventions** (`$ARGUMENTS`, `{{args}}`, etc.)
|
|
||||||
|
|
||||||
### Current Supported Agents
|
|
||||||
|
|
||||||
| Agent | Directory | Format | CLI Tool | Description |
|
|
||||||
|-------|-----------|---------|----------|-------------|
|
|
||||||
| **Claude Code** | `.claude/commands/` | Markdown | `claude` | Anthropic's Claude Code CLI |
|
|
||||||
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
|
||||||
| **GitHub Copilot** | `.github/prompts/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
|
||||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
|
||||||
| **Qwen Code** | `.qwen/commands/` | TOML | `qwen` | Alibaba's Qwen Code CLI |
|
|
||||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
|
||||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
|
||||||
|
|
||||||
### Step-by-Step Integration Guide
|
|
||||||
|
|
||||||
Follow these steps to add a new agent (using Windsurf as an example):
|
|
||||||
|
|
||||||
#### 1. Update AI_CHOICES Constant
|
|
||||||
|
|
||||||
Add the new agent to the `AI_CHOICES` dictionary in `src/specify_cli/__init__.py`:
|
|
||||||
|
|
||||||
```python
|
|
||||||
AI_CHOICES = {
|
|
||||||
"copilot": "GitHub Copilot",
|
|
||||||
"claude": "Claude Code",
|
|
||||||
"gemini": "Gemini CLI",
|
|
||||||
"cursor": "Cursor",
|
|
||||||
"qwen": "Qwen Code",
|
|
||||||
"opencode": "opencode",
|
|
||||||
"windsurf": "Windsurf" # Add new agent here
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Also update the `agent_folder_map` in the same file to include the new agent's folder for the security notice:
|
|
||||||
|
|
||||||
```python
|
|
||||||
agent_folder_map = {
|
|
||||||
"claude": ".claude/",
|
|
||||||
"gemini": ".gemini/",
|
|
||||||
"cursor": ".cursor/",
|
|
||||||
"qwen": ".qwen/",
|
|
||||||
"opencode": ".opencode/",
|
|
||||||
"codex": ".codex/",
|
|
||||||
"windsurf": ".windsurf/", # Add new agent folder here
|
|
||||||
"kilocode": ".kilocode/",
|
|
||||||
"auggie": ".auggie/",
|
|
||||||
"copilot": ".github/"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 2. Update CLI Help Text
|
|
||||||
|
|
||||||
Update all help text and examples to include the new agent:
|
|
||||||
|
|
||||||
- Command option help: `--ai` parameter description
|
|
||||||
- Function docstrings and examples
|
|
||||||
- Error messages with agent lists
|
|
||||||
|
|
||||||
#### 3. Update README Documentation
|
|
||||||
|
|
||||||
Update the **Supported AI Agents** section in `README.md` to include the new agent:
|
|
||||||
|
|
||||||
- Add the new agent to the table with appropriate support level (Full/Partial)
|
|
||||||
- Include the agent's official website link
|
|
||||||
- Add any relevant notes about the agent's implementation
|
|
||||||
- Ensure the table formatting remains aligned and consistent
|
|
||||||
|
|
||||||
#### 4. Update Release Package Script
|
|
||||||
|
|
||||||
Modify `.github/workflows/scripts/create-release-packages.sh`:
|
|
||||||
|
|
||||||
##### Add to ALL_AGENTS array:
|
|
||||||
```bash
|
|
||||||
ALL_AGENTS=(claude gemini copilot cursor qwen opencode windsurf)
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Add case statement for directory structure:
|
|
||||||
```bash
|
|
||||||
case $agent in
|
|
||||||
# ... existing cases ...
|
|
||||||
windsurf)
|
|
||||||
mkdir -p "$base_dir/.windsurf/workflows"
|
|
||||||
generate_commands windsurf md "\$ARGUMENTS" "$base_dir/.windsurf/workflows" "$script" ;;
|
|
||||||
esac
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 4. Update GitHub Release Script
|
|
||||||
|
|
||||||
Modify `.github/workflows/scripts/create-github-release.sh` to include the new agent's packages:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
gh release create "$VERSION" \
|
|
||||||
# ... existing packages ...
|
|
||||||
.genreleases/spec-kit-template-windsurf-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-windsurf-ps-"$VERSION".zip \
|
|
||||||
# Add new agent packages here
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 5. Update Agent Context Scripts
|
|
||||||
|
|
||||||
##### Bash script (`scripts/bash/update-agent-context.sh`):
|
|
||||||
|
|
||||||
Add file variable:
|
|
||||||
```bash
|
|
||||||
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
|
|
||||||
```
|
|
||||||
|
|
||||||
Add to case statement:
|
|
||||||
```bash
|
|
||||||
case "$AGENT_TYPE" in
|
|
||||||
# ... existing cases ...
|
|
||||||
windsurf) update_agent_file "$WINDSURF_FILE" "Windsurf" ;;
|
|
||||||
"")
|
|
||||||
# ... existing checks ...
|
|
||||||
[ -f "$WINDSURF_FILE" ] && update_agent_file "$WINDSURF_FILE" "Windsurf";
|
|
||||||
# Update default creation condition
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
```
|
|
||||||
|
|
||||||
##### PowerShell script (`scripts/powershell/update-agent-context.ps1`):
|
|
||||||
|
|
||||||
Add file variable:
|
|
||||||
```powershell
|
|
||||||
$windsurfFile = Join-Path $repoRoot '.windsurf/rules/specify-rules.md'
|
|
||||||
```
|
|
||||||
|
|
||||||
Add to switch statement:
|
|
||||||
```powershell
|
|
||||||
switch ($AgentType) {
|
|
||||||
# ... existing cases ...
|
|
||||||
'windsurf' { Update-AgentFile $windsurfFile 'Windsurf' }
|
|
||||||
'' {
|
|
||||||
foreach ($pair in @(
|
|
||||||
# ... existing pairs ...
|
|
||||||
@{file=$windsurfFile; name='Windsurf'}
|
|
||||||
)) {
|
|
||||||
if (Test-Path $pair.file) { Update-AgentFile $pair.file $pair.name }
|
|
||||||
}
|
|
||||||
# Update default creation condition
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 6. Update CLI Tool Checks (Optional)
|
|
||||||
|
|
||||||
For agents that require CLI tools, add checks in the `check()` command and agent validation:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# In check() command
|
|
||||||
tracker.add("windsurf", "Windsurf IDE (optional)")
|
|
||||||
windsurf_ok = check_tool_for_tracker("windsurf", "https://windsurf.com/", tracker)
|
|
||||||
|
|
||||||
# In init validation (only if CLI tool required)
|
|
||||||
elif selected_ai == "windsurf":
|
|
||||||
if not check_tool("windsurf", "Install from: https://windsurf.com/"):
|
|
||||||
console.print("[red]Error:[/red] Windsurf CLI is required for Windsurf projects")
|
|
||||||
agent_tool_missing = True
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note**: Skip CLI checks for IDE-based agents (Copilot, Windsurf).
|
|
||||||
|
|
||||||
## Agent Categories
|
|
||||||
|
|
||||||
### CLI-Based Agents
|
|
||||||
Require a command-line tool to be installed:
|
|
||||||
- **Claude Code**: `claude` CLI
|
|
||||||
- **Gemini CLI**: `gemini` CLI
|
|
||||||
- **Cursor**: `cursor-agent` CLI
|
|
||||||
- **Qwen Code**: `qwen` CLI
|
|
||||||
- **opencode**: `opencode` CLI
|
|
||||||
|
|
||||||
### IDE-Based Agents
|
|
||||||
Work within integrated development environments:
|
|
||||||
- **GitHub Copilot**: Built into VS Code/compatible editors
|
|
||||||
- **Windsurf**: Built into Windsurf IDE
|
|
||||||
|
|
||||||
## Command File Formats
|
|
||||||
|
|
||||||
### Markdown Format
|
|
||||||
Used by: Claude, Cursor, opencode, Windsurf
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
---
|
|
||||||
description: "Command description"
|
|
||||||
---
|
|
||||||
|
|
||||||
Command content with {SCRIPT} and $ARGUMENTS placeholders.
|
|
||||||
```
|
|
||||||
|
|
||||||
### TOML Format
|
|
||||||
Used by: Gemini, Qwen
|
|
||||||
|
|
||||||
```toml
|
|
||||||
description = "Command description"
|
|
||||||
|
|
||||||
prompt = """
|
|
||||||
Command content with {SCRIPT} and {{args}} placeholders.
|
|
||||||
"""
|
|
||||||
```
|
|
||||||
|
|
||||||
## Directory Conventions
|
|
||||||
|
|
||||||
- **CLI agents**: Usually `.<agent-name>/commands/`
|
|
||||||
- **IDE agents**: Follow IDE-specific patterns:
|
|
||||||
- Copilot: `.github/prompts/`
|
|
||||||
- Cursor: `.cursor/commands/`
|
|
||||||
- Windsurf: `.windsurf/workflows/`
|
|
||||||
|
|
||||||
## Argument Patterns
|
|
||||||
|
|
||||||
Different agents use different argument placeholders:
|
|
||||||
- **Markdown/prompt-based**: `$ARGUMENTS`
|
|
||||||
- **TOML-based**: `{{args}}`
|
|
||||||
- **Script placeholders**: `{SCRIPT}` (replaced with actual script path)
|
|
||||||
- **Agent placeholders**: `__AGENT__` (replaced with agent name)
|
|
||||||
|
|
||||||
## Testing New Agent Integration
|
|
||||||
|
|
||||||
1. **Build test**: Run package creation script locally
|
|
||||||
2. **CLI test**: Test `specify init --ai <agent>` command
|
|
||||||
3. **File generation**: Verify correct directory structure and files
|
|
||||||
4. **Command validation**: Ensure generated commands work with the agent
|
|
||||||
5. **Context update**: Test agent context update scripts
|
|
||||||
|
|
||||||
## Common Pitfalls
|
|
||||||
|
|
||||||
1. **Forgetting update scripts**: Both bash and PowerShell scripts must be updated
|
|
||||||
2. **Missing CLI checks**: Only add for agents that actually have CLI tools
|
|
||||||
3. **Wrong argument format**: Use correct placeholder format for each agent type
|
|
||||||
4. **Directory naming**: Follow agent-specific conventions exactly
|
|
||||||
5. **Help text inconsistency**: Update all user-facing text consistently
|
|
||||||
|
|
||||||
## Future Considerations
|
|
||||||
|
|
||||||
When adding new agents:
|
|
||||||
- Consider the agent's native command/workflow patterns
|
|
||||||
- Ensure compatibility with the Spec-Driven Development process
|
|
||||||
- Document any special requirements or limitations
|
|
||||||
- Update this guide with lessons learned
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
*This documentation should be updated whenever new agents are added to maintain accuracy and completeness.*
|
|
||||||
106
CHANGELOG.md
106
CHANGELOG.md
@@ -1,113 +1,11 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
<!-- markdownlint-disable MD024 -->
|
|
||||||
|
|
||||||
All notable changes to the Specify CLI will be documented in this file.
|
All notable changes to the Specify CLI will be documented in this file.
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
## [LATEST_VERSION] - RELEASE_DATE
|
## [Unreleased]
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Support for using `.` as a shorthand for current directory in `specify init .` command, equivalent to `--here` flag but more intuitive for users
|
|
||||||
|
|
||||||
## [0.0.17] - 2025-09-22
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- New `/clarify` command template to surface up to 5 targeted clarification questions for an existing spec and persist answers into a Clarifications section in the spec.
|
|
||||||
- New `/analyze` command template providing a non-destructive cross-artifact discrepancy and alignment report (spec, clarifications, plan, tasks, constitution) inserted after `/tasks` and before `/implement`.
|
|
||||||
- Note: Constitution rules are explicitly treated as non-negotiable; any conflict is a CRITICAL finding requiring artifact remediation, not weakening of principles.
|
|
||||||
|
|
||||||
## [0.0.16] - 2025-09-22
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- `--force` flag for `init` command to bypass confirmation when using `--here` in a non-empty directory and proceed with merging/overwriting files.
|
|
||||||
|
|
||||||
## [0.0.15] - 2025-09-21
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Support for Roo Code.
|
|
||||||
|
|
||||||
## [0.0.14] - 2025-09-21
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Error messages are now shown consistently.
|
|
||||||
|
|
||||||
## [0.0.13] - 2025-09-21
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Support for Kilo Code. Thank you [@shahrukhkhan489](https://github.com/shahrukhkhan489) with [#394](https://github.com/github/spec-kit/pull/394).
|
|
||||||
- Support for Auggie CLI. Thank you [@hungthai1401](https://github.com/hungthai1401) with [#137](https://github.com/github/spec-kit/pull/137).
|
|
||||||
- Agent folder security notice displayed after project provisioning completion, warning users that some agents may store credentials or auth tokens in their agent folders and recommending adding relevant folders to `.gitignore` to prevent accidental credential leakage.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Warning displayed to ensure that folks are aware that they might need to add their agent folder to `.gitignore`.
|
|
||||||
- Cleaned up the `check` command output.
|
|
||||||
|
|
||||||
## [0.0.12] - 2025-09-21
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Added additional context for OpenAI Codex users - they need to set an additional environment variable, as described in [#417](https://github.com/github/spec-kit/issues/417).
|
|
||||||
|
|
||||||
## [0.0.11] - 2025-09-20
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Codex CLI support (thank you [@honjo-hiroaki-gtt](https://github.com/honjo-hiroaki-gtt) for the contribution in [#14](https://github.com/github/spec-kit/pull/14))
|
|
||||||
- Codex-aware context update tooling (Bash and PowerShell) so feature plans refresh `AGENTS.md` alongside existing assistants without manual edits.
|
|
||||||
|
|
||||||
## [0.0.10] - 2025-09-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Addressed [#378](https://github.com/github/spec-kit/issues/378) where a GitHub token may be attached to the request when it was empty.
|
|
||||||
|
|
||||||
## [0.0.9] - 2025-09-19
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved agent selector UI with cyan highlighting for agent keys and gray parentheses for full names
|
|
||||||
|
|
||||||
## [0.0.8] - 2025-09-19
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Windsurf IDE support as additional AI assistant option (thank you [@raedkit](https://github.com/raedkit) for the work in [#151](https://github.com/github/spec-kit/pull/151))
|
|
||||||
- GitHub token support for API requests to handle corporate environments and rate limiting (contributed by [@zryfish](https://github.com/@zryfish) in [#243](https://github.com/github/spec-kit/pull/243))
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated README with Windsurf examples and GitHub token usage
|
|
||||||
- Enhanced release workflow to include Windsurf templates
|
|
||||||
|
|
||||||
## [0.0.7] - 2025-09-18
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated command instructions in the CLI.
|
|
||||||
- Cleaned up the code to not render agent-specific information when it's generic.
|
|
||||||
|
|
||||||
|
|
||||||
## [0.0.6] - 2025-09-17
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- opencode support as additional AI assistant option
|
|
||||||
|
|
||||||
## [0.0.5] - 2025-09-17
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Qwen Code support as additional AI assistant option
|
|
||||||
|
|
||||||
## [0.0.4] - 2025-09-14
|
## [0.0.4] - 2025-09-14
|
||||||
|
|
||||||
@@ -121,4 +19,4 @@ N/A
|
|||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
N/A
|
N/A
|
||||||
@@ -11,7 +11,7 @@ These are one time installations required to be able to test your changes locall
|
|||||||
1. Install [Python 3.11+](https://www.python.org/downloads/)
|
1. Install [Python 3.11+](https://www.python.org/downloads/)
|
||||||
1. Install [uv](https://docs.astral.sh/uv/) for package management
|
1. Install [uv](https://docs.astral.sh/uv/) for package management
|
||||||
1. Install [Git](https://git-scm.com/downloads)
|
1. Install [Git](https://git-scm.com/downloads)
|
||||||
1. Have an [AI coding agent available](README.md#-supported-ai-agents)
|
1. Have an AI coding agent available: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli) are recommended, but we're working on adding support for other agents as well.
|
||||||
|
|
||||||
## Submitting a pull request
|
## Submitting a pull request
|
||||||
|
|
||||||
@@ -31,7 +31,7 @@ Here are a few things you can do that will increase the likelihood of your pull
|
|||||||
|
|
||||||
- Follow the project's coding conventions.
|
- Follow the project's coding conventions.
|
||||||
- Write tests for new functionality.
|
- Write tests for new functionality.
|
||||||
- Update documentation (`README.md`, `spec-driven.md`) if your changes affect user-facing features.
|
- Update documentation (`README.md,` `spec-driven.md`) if your changes affect user-facing features.
|
||||||
- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
|
- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
|
||||||
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
||||||
- Test your changes with the Spec-Driven Development workflow to ensure compatibility.
|
- Test your changes with the Spec-Driven Development workflow to ensure compatibility.
|
||||||
@@ -45,63 +45,6 @@ When working on spec-kit:
|
|||||||
3. Test script functionality in the `scripts/` directory
|
3. Test script functionality in the `scripts/` directory
|
||||||
4. Ensure memory files (`memory/constitution.md`) are updated if major process changes are made
|
4. Ensure memory files (`memory/constitution.md`) are updated if major process changes are made
|
||||||
|
|
||||||
## AI contributions in Spec Kit
|
|
||||||
|
|
||||||
> [!IMPORTANT]
|
|
||||||
>
|
|
||||||
> If you are using **any kind of AI assistance** to contribute to Spec Kit,
|
|
||||||
> it must be disclosed in the pull request or issue.
|
|
||||||
|
|
||||||
We welcome and encourage the use of AI tools to help improve Spec Kit! Many valuable contributions have been enhanced with AI assistance for code generation, issue detection, and feature definition.
|
|
||||||
|
|
||||||
That being said, if you are using any kind of AI assistance (e.g., agents, ChatGPT) while contributing to Spec Kit,
|
|
||||||
**this must be disclosed in the pull request or issue**, along with the extent to which AI assistance was used (e.g., documentation comments vs. code generation).
|
|
||||||
|
|
||||||
If your PR responses or comments are being generated by an AI, disclose that as well.
|
|
||||||
|
|
||||||
As an exception, trivial spacing or typo fixes don't need to be disclosed, so long as the changes are limited to small parts of the code or short phrases.
|
|
||||||
|
|
||||||
An example disclosure:
|
|
||||||
|
|
||||||
> This PR was written primarily by GitHub Copilot.
|
|
||||||
|
|
||||||
Or a more detailed disclosure:
|
|
||||||
|
|
||||||
> I consulted ChatGPT to understand the codebase but the solution
|
|
||||||
> was fully authored manually by myself.
|
|
||||||
|
|
||||||
Failure to disclose this is first and foremost rude to the human operators on the other end of the pull request, but it also makes it difficult to
|
|
||||||
determine how much scrutiny to apply to the contribution.
|
|
||||||
|
|
||||||
In a perfect world, AI assistance would produce equal or higher quality work than any human. That isn't the world we live in today, and in most cases
|
|
||||||
where human supervision or expertise is not in the loop, it's generating code that cannot be reasonably maintained or evolved.
|
|
||||||
|
|
||||||
### What we're looking for
|
|
||||||
|
|
||||||
When submitting AI-assisted contributions, please ensure they include:
|
|
||||||
|
|
||||||
- **Clear disclosure of AI use** - You are transparent about AI use and degree to which you're using it for the contribution
|
|
||||||
- **Human understanding and testing** - You've personally tested the changes and understand what they do
|
|
||||||
- **Clear rationale** - You can explain why the change is needed and how it fits within Spec Kit's goals
|
|
||||||
- **Concrete evidence** - Include test cases, scenarios, or examples that demonstrate the improvement
|
|
||||||
- **Your own analysis** - Share your thoughts on the end-to-end developer experience
|
|
||||||
|
|
||||||
### What we'll close
|
|
||||||
|
|
||||||
We reserve the right to close contributions that appear to be:
|
|
||||||
|
|
||||||
- Untested changes submitted without verification
|
|
||||||
- Generic suggestions that don't address specific Spec Kit needs
|
|
||||||
- Bulk submissions that show no human review or understanding
|
|
||||||
|
|
||||||
### Guidelines for success
|
|
||||||
|
|
||||||
The key is demonstrating that you understand and have validated your proposed changes. If a maintainer can easily tell that a contribution was generated entirely by AI without human input or testing, it likely needs more work before submission.
|
|
||||||
|
|
||||||
Contributors who consistently submit low-effort AI-generated changes may be restricted from further contributions at the maintainers' discretion.
|
|
||||||
|
|
||||||
Please be respectful to maintainers and disclose AI assistance.
|
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- [Spec-Driven Development Methodology](./spec-driven.md)
|
- [Spec-Driven Development Methodology](./spec-driven.md)
|
||||||
|
|||||||
276
README.md
276
README.md
@@ -17,9 +17,9 @@
|
|||||||
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
||||||
- [⚡ Get started](#-get-started)
|
- [⚡ Get started](#-get-started)
|
||||||
- [📽️ Video Overview](#️-video-overview)
|
- [📽️ Video Overview](#️-video-overview)
|
||||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
|
||||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||||
- [📚 Core philosophy](#-core-philosophy)
|
- [<EFBFBD> APM Integration](#-apm-integration)
|
||||||
|
- [<EFBFBD>📚 Core philosophy](#-core-philosophy)
|
||||||
- [🌟 Development phases](#-development-phases)
|
- [🌟 Development phases](#-development-phases)
|
||||||
- [🎯 Experimental goals](#-experimental-goals)
|
- [🎯 Experimental goals](#-experimental-goals)
|
||||||
- [🔧 Prerequisites](#-prerequisites)
|
- [🔧 Prerequisites](#-prerequisites)
|
||||||
@@ -39,47 +39,13 @@ Spec-Driven Development **flips the script** on traditional software development
|
|||||||
|
|
||||||
### 1. Install Specify
|
### 1. Install Specify
|
||||||
|
|
||||||
Choose your preferred installation method:
|
Initialize your project depending on the coding agent you're using:
|
||||||
|
|
||||||
#### Option 1: Persistent Installation (Recommended)
|
|
||||||
|
|
||||||
Install once and use everywhere:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git
|
|
||||||
```
|
|
||||||
|
|
||||||
Then use the tool directly:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
specify init <PROJECT_NAME>
|
|
||||||
specify check
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Option 2: One-time Usage
|
|
||||||
|
|
||||||
Run directly without installing:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||||
```
|
```
|
||||||
|
|
||||||
**Benefits of persistent installation:**
|
### 2. Create the spec
|
||||||
|
|
||||||
- Tool stays installed and available in PATH
|
|
||||||
- No need to create shell aliases
|
|
||||||
- Better tool management with `uv tool list`, `uv tool upgrade`, `uv tool uninstall`
|
|
||||||
- Cleaner shell configuration
|
|
||||||
|
|
||||||
### 2. Establish project principles
|
|
||||||
|
|
||||||
Use the **`/constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/constitution Create principles focused on code quality, testing standards, user experience consistency, and performance requirements
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Create the spec
|
|
||||||
|
|
||||||
Use the **`/specify`** command to describe what you want to build. Focus on the **what** and **why**, not the tech stack.
|
Use the **`/specify`** command to describe what you want to build. Focus on the **what** and **why**, not the tech stack.
|
||||||
|
|
||||||
@@ -87,7 +53,7 @@ Use the **`/specify`** command to describe what you want to build. Focus on the
|
|||||||
/specify Build an application that can help me organize my photos in separate photo albums. Albums are grouped by date and can be re-organized by dragging and dropping on the main page. Albums are never in other nested albums. Within each album, photos are previewed in a tile-like interface.
|
/specify Build an application that can help me organize my photos in separate photo albums. Albums are grouped by date and can be re-organized by dragging and dropping on the main page. Albums are never in other nested albums. Within each album, photos are previewed in a tile-like interface.
|
||||||
```
|
```
|
||||||
|
|
||||||
### 4. Create a technical implementation plan
|
### 3. Create a technical implementation plan
|
||||||
|
|
||||||
Use the **`/plan`** command to provide your tech stack and architecture choices.
|
Use the **`/plan`** command to provide your tech stack and architecture choices.
|
||||||
|
|
||||||
@@ -95,21 +61,9 @@ Use the **`/plan`** command to provide your tech stack and architecture choices.
|
|||||||
/plan The application uses Vite with minimal number of libraries. Use vanilla HTML, CSS, and JavaScript as much as possible. Images are not uploaded anywhere and metadata is stored in a local SQLite database.
|
/plan The application uses Vite with minimal number of libraries. Use vanilla HTML, CSS, and JavaScript as much as possible. Images are not uploaded anywhere and metadata is stored in a local SQLite database.
|
||||||
```
|
```
|
||||||
|
|
||||||
### 5. Break down into tasks
|
### 4. Break down and implement
|
||||||
|
|
||||||
Use **`/tasks`** to create an actionable task list from your implementation plan.
|
Use **`/tasks`** to create an actionable task list, then ask your agent to implement the feature.
|
||||||
|
|
||||||
```bash
|
|
||||||
/tasks
|
|
||||||
```
|
|
||||||
|
|
||||||
### 6. Execute implementation
|
|
||||||
|
|
||||||
Use **`/implement`** to execute all tasks and build your feature according to the plan.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/implement
|
|
||||||
```
|
|
||||||
|
|
||||||
For detailed step-by-step instructions, see our [comprehensive guide](./spec-driven.md).
|
For detailed step-by-step instructions, see our [comprehensive guide](./spec-driven.md).
|
||||||
|
|
||||||
@@ -119,22 +73,6 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
|||||||
|
|
||||||
[](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
|
[](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
|
||||||
|
|
||||||
## 🤖 Supported AI Agents
|
|
||||||
|
|
||||||
| Agent | Support | Notes |
|
|
||||||
|-----------------------------------------------------------|---------|---------------------------------------------------|
|
|
||||||
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | |
|
|
||||||
| [GitHub Copilot](https://code.visualstudio.com/) | ✅ | |
|
|
||||||
| [Gemini CLI](https://github.com/google-gemini/gemini-cli) | ✅ | |
|
|
||||||
| [Cursor](https://cursor.sh/) | ✅ | |
|
|
||||||
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
|
|
||||||
| [opencode](https://opencode.ai/) | ✅ | |
|
|
||||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
|
||||||
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
|
|
||||||
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
|
||||||
| [Roo Code](https://roocode.com/) | ✅ | |
|
|
||||||
| [Codex CLI](https://github.com/openai/codex) | ⚠️ | Codex [does not support](https://github.com/openai/codex/issues/2890) custom arguments for slash commands. |
|
|
||||||
|
|
||||||
## 🔧 Specify CLI Reference
|
## 🔧 Specify CLI Reference
|
||||||
|
|
||||||
The `specify` command supports the following options:
|
The `specify` command supports the following options:
|
||||||
@@ -144,22 +82,22 @@ The `specify` command supports the following options:
|
|||||||
| Command | Description |
|
| Command | Description |
|
||||||
|-------------|----------------------------------------------------------------|
|
|-------------|----------------------------------------------------------------|
|
||||||
| `init` | Initialize a new Specify project from the latest template |
|
| `init` | Initialize a new Specify project from the latest template |
|
||||||
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`) |
|
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`) |
|
||||||
|
| `apm` | APM - Agent Package Manager commands for Context management |
|
||||||
|
|
||||||
### `specify init` Arguments & Options
|
### `specify init` Arguments & Options
|
||||||
|
|
||||||
| Argument/Option | Type | Description |
|
| Argument/Option | Type | Description |
|
||||||
|------------------------|----------|------------------------------------------------------------------------------|
|
|------------------------|----------|------------------------------------------------------------------------------|
|
||||||
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
|
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`) |
|
||||||
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, or `roo` |
|
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, or `cursor` |
|
||||||
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
||||||
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
||||||
| `--no-git` | Flag | Skip git repository initialization |
|
| `--no-git` | Flag | Skip git repository initialization |
|
||||||
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
||||||
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
|
|
||||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||||
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
| `--use-apm` | Flag | Include APM (Agent Package Manager) structure for context management |
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
@@ -170,24 +108,17 @@ specify init my-project
|
|||||||
# Initialize with specific AI assistant
|
# Initialize with specific AI assistant
|
||||||
specify init my-project --ai claude
|
specify init my-project --ai claude
|
||||||
|
|
||||||
|
# Initialize with APM support
|
||||||
|
specify init my-project --ai claude --use-apm
|
||||||
|
|
||||||
# Initialize with Cursor support
|
# Initialize with Cursor support
|
||||||
specify init my-project --ai cursor
|
specify init my-project --ai cursor
|
||||||
|
|
||||||
# Initialize with Windsurf support
|
|
||||||
specify init my-project --ai windsurf
|
|
||||||
|
|
||||||
# Initialize with PowerShell scripts (Windows/cross-platform)
|
# Initialize with PowerShell scripts (Windows/cross-platform)
|
||||||
specify init my-project --ai copilot --script ps
|
specify init my-project --ai copilot --script ps
|
||||||
|
|
||||||
# Initialize in current directory
|
# Initialize in current directory with APM
|
||||||
specify init . --ai copilot
|
specify init --here --ai copilot --use-apm
|
||||||
# or use the --here flag
|
|
||||||
specify init --here --ai copilot
|
|
||||||
|
|
||||||
# Force merge into current (non-empty) directory without confirmation
|
|
||||||
specify init . --force --ai copilot
|
|
||||||
# or
|
|
||||||
specify init --here --force --ai copilot
|
|
||||||
|
|
||||||
# Skip git initialization
|
# Skip git initialization
|
||||||
specify init my-project --ai gemini --no-git
|
specify init my-project --ai gemini --no-git
|
||||||
@@ -195,34 +126,53 @@ specify init my-project --ai gemini --no-git
|
|||||||
# Enable debug output for troubleshooting
|
# Enable debug output for troubleshooting
|
||||||
specify init my-project --ai claude --debug
|
specify init my-project --ai claude --debug
|
||||||
|
|
||||||
# Use GitHub token for API requests (helpful for corporate environments)
|
|
||||||
specify init my-project --ai claude --github-token ghp_your_token_here
|
|
||||||
|
|
||||||
# Check system requirements
|
# Check system requirements
|
||||||
specify check
|
specify check
|
||||||
```
|
```
|
||||||
|
|
||||||
### Available Slash Commands
|
## 📦 APM Integration - NPM for Agent Context
|
||||||
|
|
||||||
After running `specify init`, your AI coding agent will have access to these slash commands for structured development:
|
**Context as Code Packages**: Package and share agent intelligence like npm packages. With APM, your agents get:
|
||||||
|
|
||||||
| Command | Description |
|
- **Team knowledge** from reusable context packages
|
||||||
|-----------------|-----------------------------------------------------------------------|
|
- **Optimized context** through mathematical relevance scoring
|
||||||
| `/constitution` | Create or update project governing principles and development guidelines |
|
- **Universal compatibility** via dynamically generated Agents.md files
|
||||||
| `/specify` | Define what you want to build (requirements and user stories) |
|
|
||||||
| `/clarify` | Clarify underspecified areas (must be run before `/plan` unless explicitly skipped; formerly `/quizme`) |
|
|
||||||
| `/plan` | Create technical implementation plans with your chosen tech stack |
|
|
||||||
| `/tasks` | Generate actionable task lists for implementation |
|
|
||||||
| `/analyze` | Cross-artifact consistency & coverage analysis (run after /tasks, before /implement) |
|
|
||||||
| `/implement` | Execute all tasks to build the feature according to the plan |
|
|
||||||
|
|
||||||
### Environment Variables
|
[Complete Context Management Guide →](docs/context-management.md)
|
||||||
|
|
||||||
| Variable | Description |
|
Spec Kit includes full APM (Agent Package Manager) functionality for managing modular context packages and files:
|
||||||
|------------------|------------------------------------------------------------------------------------------------|
|
|
||||||
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>**Must be set in the context of the agent you're working with prior to using `/plan` or follow-up commands. |
|
|
||||||
|
|
||||||
## 📚 Core philosophy
|
### Unified Initialization
|
||||||
|
```bash
|
||||||
|
# The --use-apm flag creates both SDD and APM structures
|
||||||
|
specify init my-project --ai claude --use-apm
|
||||||
|
```
|
||||||
|
|
||||||
|
### APM Commands
|
||||||
|
```bash
|
||||||
|
# Core APM commands available under 'apm' subcommand
|
||||||
|
|
||||||
|
# Install APM packages from apm.yml
|
||||||
|
specify apm install
|
||||||
|
|
||||||
|
# Add APM package to apm.yml and install
|
||||||
|
specify apm install org/repo
|
||||||
|
|
||||||
|
# Remove package from apm.yml and apm_modules
|
||||||
|
specify apm uninstall org/repo
|
||||||
|
|
||||||
|
# Remove orphaned packages not in apm.yml
|
||||||
|
specify apm prune
|
||||||
|
|
||||||
|
# List installed APM packages
|
||||||
|
specify apm deps list
|
||||||
|
|
||||||
|
# Generate nested optimal AGENTS.md tree
|
||||||
|
# Uses installed APM packages and local context files
|
||||||
|
specify apm compile
|
||||||
|
```
|
||||||
|
|
||||||
|
## <20>📚 Core philosophy
|
||||||
|
|
||||||
Spec-Driven Development is a structured process that emphasizes:
|
Spec-Driven Development is a structured process that emphasizes:
|
||||||
|
|
||||||
@@ -268,13 +218,11 @@ Our research and experimentation focus on:
|
|||||||
## 🔧 Prerequisites
|
## 🔧 Prerequisites
|
||||||
|
|
||||||
- **Linux/macOS** (or WSL2 on Windows)
|
- **Linux/macOS** (or WSL2 on Windows)
|
||||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Gemini CLI](https://github.com/google-gemini/gemini-cli), [Cursor](https://cursor.sh/), [Qwen CLI](https://github.com/QwenLM/qwen-code), [opencode](https://opencode.ai/), [Codex CLI](https://github.com/openai/codex), or [Windsurf](https://windsurf.com/)
|
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Gemini CLI](https://github.com/google-gemini/gemini-cli), or [Cursor](https://cursor.sh/)
|
||||||
- [uv](https://docs.astral.sh/uv/) for package management
|
- [uv](https://docs.astral.sh/uv/) for package management
|
||||||
- [Python 3.11+](https://www.python.org/downloads/)
|
- [Python 3.11+](https://www.python.org/downloads/)
|
||||||
- [Git](https://git-scm.com/downloads)
|
- [Git](https://git-scm.com/downloads)
|
||||||
|
|
||||||
If you encounter issues with an agent, please open an issue so we can refine the integration.
|
|
||||||
|
|
||||||
## 📖 Learn more
|
## 📖 Learn more
|
||||||
|
|
||||||
- **[Complete Spec-Driven Development Methodology](./spec-driven.md)** - Deep dive into the full process
|
- **[Complete Spec-Driven Development Methodology](./spec-driven.md)** - Deep dive into the full process
|
||||||
@@ -296,13 +244,7 @@ specify init <project_name>
|
|||||||
Or initialize in the current directory:
|
Or initialize in the current directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
specify init .
|
|
||||||
# or use the --here flag
|
|
||||||
specify init --here
|
specify init --here
|
||||||
# Skip confirmation when the directory already has files
|
|
||||||
specify init . --force
|
|
||||||
# or
|
|
||||||
specify init --here --force
|
|
||||||
```
|
```
|
||||||
|
|
||||||

|

|
||||||
@@ -313,48 +255,25 @@ You will be prompted to select the AI agent you are using. You can also proactiv
|
|||||||
specify init <project_name> --ai claude
|
specify init <project_name> --ai claude
|
||||||
specify init <project_name> --ai gemini
|
specify init <project_name> --ai gemini
|
||||||
specify init <project_name> --ai copilot
|
specify init <project_name> --ai copilot
|
||||||
specify init <project_name> --ai cursor
|
|
||||||
specify init <project_name> --ai qwen
|
|
||||||
specify init <project_name> --ai opencode
|
|
||||||
specify init <project_name> --ai codex
|
|
||||||
specify init <project_name> --ai windsurf
|
|
||||||
# Or in current directory:
|
# Or in current directory:
|
||||||
specify init . --ai claude
|
|
||||||
specify init . --ai codex
|
|
||||||
# or use --here flag
|
|
||||||
specify init --here --ai claude
|
specify init --here --ai claude
|
||||||
specify init --here --ai codex
|
|
||||||
# Force merge into a non-empty current directory
|
|
||||||
specify init . --force --ai claude
|
|
||||||
# or
|
|
||||||
specify init --here --force --ai claude
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, or Codex CLI installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
The CLI will check if you have Claude Code or Gemini CLI installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
specify init <project_name> --ai claude --ignore-agent-tools
|
specify init <project_name> --ai claude --ignore-agent-tools
|
||||||
```
|
```
|
||||||
|
|
||||||
### **STEP 1:** Establish project principles
|
### **STEP 1:** Bootstrap the project
|
||||||
|
|
||||||
Go to the project folder and run your AI agent. In our example, we're using `claude`.
|
Go to the project folder and run your AI agent. In our example, we're using `claude`.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
You will know that things are configured correctly if you see the `/constitution`, `/specify`, `/plan`, `/tasks`, and `/implement` commands available.
|
You will know that things are configured correctly if you see the `/specify`, `/plan`, and `/tasks` commands available.
|
||||||
|
|
||||||
The first step should be establishing your project's governing principles using the `/constitution` command. This helps ensure consistent decision-making throughout all subsequent development phases:
|
The first step should be creating a new project scaffolding. Use `/specify` command and then provide the concrete requirements for the project you want to develop.
|
||||||
|
|
||||||
```text
|
|
||||||
/constitution Create principles focused on code quality, testing standards, user experience consistency, and performance requirements. Include governance for how these principles should guide technical decisions and implementation choices.
|
|
||||||
```
|
|
||||||
|
|
||||||
This step creates or updates the `.specify/memory/constitution.md` file with your project's foundational guidelines that the AI agent will reference during specification, planning, and implementation phases.
|
|
||||||
|
|
||||||
### **STEP 2:** Create project specifications
|
|
||||||
|
|
||||||
With your project principles established, you can now create the functional specifications. Use the `/specify` command and then provide the concrete requirements for the project you want to develop.
|
|
||||||
|
|
||||||
>[!IMPORTANT]
|
>[!IMPORTANT]
|
||||||
>Be as explicit as possible about _what_ you are trying to build and _why_. **Do not focus on the tech stack at this point**.
|
>Be as explicit as possible about _what_ you are trying to build and _why_. **Do not focus on the tech stack at this point**.
|
||||||
@@ -389,37 +308,28 @@ The produced specification should contain a set of user stories and functional r
|
|||||||
At this stage, your project folder contents should resemble the following:
|
At this stage, your project folder contents should resemble the following:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
└── .specify
|
├── memory
|
||||||
├── memory
|
│ ├── constitution.md
|
||||||
│ └── constitution.md
|
│ └── constitution_update_checklist.md
|
||||||
├── scripts
|
├── scripts
|
||||||
│ ├── check-prerequisites.sh
|
│ ├── check-task-prerequisites.sh
|
||||||
│ ├── common.sh
|
│ ├── common.sh
|
||||||
│ ├── create-new-feature.sh
|
│ ├── create-new-feature.sh
|
||||||
│ ├── setup-plan.sh
|
│ ├── get-feature-paths.sh
|
||||||
│ └── update-claude-md.sh
|
│ ├── setup-plan.sh
|
||||||
├── specs
|
│ └── update-claude-md.sh
|
||||||
│ └── 001-create-taskify
|
├── specs
|
||||||
│ └── spec.md
|
│ └── 001-create-taskify
|
||||||
└── templates
|
│ └── spec.md
|
||||||
├── plan-template.md
|
└── templates
|
||||||
├── spec-template.md
|
├── plan-template.md
|
||||||
└── tasks-template.md
|
├── spec-template.md
|
||||||
|
└── tasks-template.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### **STEP 3:** Functional specification clarification (required before planning)
|
### **STEP 2:** Functional specification clarification
|
||||||
|
|
||||||
With the baseline specification created, you can go ahead and clarify any of the requirements that were not captured properly within the first shot attempt.
|
With the baseline specification created, you can go ahead and clarify any of the requirements that were not captured properly within the first shot attempt. For example, you could use a prompt like this within the same Claude Code session:
|
||||||
|
|
||||||
You should run the structured clarification workflow **before** creating a technical plan to reduce rework downstream.
|
|
||||||
|
|
||||||
Preferred order:
|
|
||||||
1. Use `/clarify` (structured) – sequential, coverage-based questioning that records answers in a Clarifications section.
|
|
||||||
2. Optionally follow up with ad-hoc free-form refinement if something still feels vague.
|
|
||||||
|
|
||||||
If you intentionally want to skip clarification (e.g., spike or exploratory prototype), explicitly state that so the agent doesn't block on missing clarifications.
|
|
||||||
|
|
||||||
Example free-form refinement prompt (after `/clarify` if still needed):
|
|
||||||
|
|
||||||
```text
|
```text
|
||||||
For each sample project or project that you create there should be a variable number of tasks between 5 and 15
|
For each sample project or project that you create there should be a variable number of tasks between 5 and 15
|
||||||
@@ -435,7 +345,7 @@ Read the review and acceptance checklist, and check off each item in the checkli
|
|||||||
|
|
||||||
It's important to use the interaction with Claude Code as an opportunity to clarify and ask questions around the specification - **do not treat its first attempt as final**.
|
It's important to use the interaction with Claude Code as an opportunity to clarify and ask questions around the specification - **do not treat its first attempt as final**.
|
||||||
|
|
||||||
### **STEP 4:** Generate a plan
|
### **STEP 3:** Generate a plan
|
||||||
|
|
||||||
You can now be specific about the tech stack and other technical requirements. You can use the `/plan` command that is built into the project template with a prompt like this:
|
You can now be specific about the tech stack and other technical requirements. You can use the `/plan` command that is built into the project template with a prompt like this:
|
||||||
|
|
||||||
@@ -451,11 +361,13 @@ The output of this step will include a number of implementation detail documents
|
|||||||
.
|
.
|
||||||
├── CLAUDE.md
|
├── CLAUDE.md
|
||||||
├── memory
|
├── memory
|
||||||
│ └── constitution.md
|
│ ├── constitution.md
|
||||||
|
│ └── constitution_update_checklist.md
|
||||||
├── scripts
|
├── scripts
|
||||||
│ ├── check-prerequisites.sh
|
│ ├── check-task-prerequisites.sh
|
||||||
│ ├── common.sh
|
│ ├── common.sh
|
||||||
│ ├── create-new-feature.sh
|
│ ├── create-new-feature.sh
|
||||||
|
│ ├── get-feature-paths.sh
|
||||||
│ ├── setup-plan.sh
|
│ ├── setup-plan.sh
|
||||||
│ └── update-claude-md.sh
|
│ └── update-claude-md.sh
|
||||||
├── specs
|
├── specs
|
||||||
@@ -502,7 +414,7 @@ That's way too untargeted research. The research needs to help you solve a speci
|
|||||||
>[!NOTE]
|
>[!NOTE]
|
||||||
>Claude Code might be over-eager and add components that you did not ask for. Ask it to clarify the rationale and the source of the change.
|
>Claude Code might be over-eager and add components that you did not ask for. Ask it to clarify the rationale and the source of the change.
|
||||||
|
|
||||||
### **STEP 5:** Have Claude Code validate the plan
|
### **STEP 4:** Have Claude Code validate the plan
|
||||||
|
|
||||||
With the plan in place, you should have Claude Code run through it to make sure that there are no missing pieces. You can use a prompt like this:
|
With the plan in place, you should have Claude Code run through it to make sure that there are no missing pieces. You can use a prompt like this:
|
||||||
|
|
||||||
@@ -521,25 +433,20 @@ You can also ask Claude Code (if you have the [GitHub CLI](https://docs.github.c
|
|||||||
>[!NOTE]
|
>[!NOTE]
|
||||||
>Before you have the agent implement it, it's also worth prompting Claude Code to cross-check the details to see if there are any over-engineered pieces (remember - it can be over-eager). If over-engineered components or decisions exist, you can ask Claude Code to resolve them. Ensure that Claude Code follows the [constitution](base/memory/constitution.md) as the foundational piece that it must adhere to when establishing the plan.
|
>Before you have the agent implement it, it's also worth prompting Claude Code to cross-check the details to see if there are any over-engineered pieces (remember - it can be over-eager). If over-engineered components or decisions exist, you can ask Claude Code to resolve them. Ensure that Claude Code follows the [constitution](base/memory/constitution.md) as the foundational piece that it must adhere to when establishing the plan.
|
||||||
|
|
||||||
### STEP 6: Implementation
|
### STEP 5: Implementation
|
||||||
|
|
||||||
Once ready, use the `/implement` command to execute your implementation plan:
|
Once ready, instruct Claude Code to implement your solution (example path included):
|
||||||
|
|
||||||
```text
|
```text
|
||||||
/implement
|
implement specs/002-create-taskify/plan.md
|
||||||
```
|
```
|
||||||
|
|
||||||
The `/implement` command will:
|
Claude Code will spring into action and will start creating the implementation.
|
||||||
- Validate that all prerequisites are in place (constitution, spec, plan, and tasks)
|
|
||||||
- Parse the task breakdown from `tasks.md`
|
|
||||||
- Execute tasks in the correct order, respecting dependencies and parallel execution markers
|
|
||||||
- Follow the TDD approach defined in your task plan
|
|
||||||
- Provide progress updates and handle errors appropriately
|
|
||||||
|
|
||||||
>[!IMPORTANT]
|
>[!IMPORTANT]
|
||||||
>The AI agent will execute local CLI commands (such as `dotnet`, `npm`, etc.) - make sure you have the required tools installed on your machine.
|
>Claude Code will execute local CLI commands (such as `dotnet`) - make sure you have them installed on your machine.
|
||||||
|
|
||||||
Once the implementation is complete, test the application and resolve any runtime errors that may not be visible in CLI logs (e.g., browser console errors). You can copy and paste such errors back to your AI agent for resolution.
|
Once the implementation step is done, ask Claude Code to try to run the application and resolve any emerging build errors. If the application runs, but there are _runtime errors_ that are not directly available to Claude Code through CLI logs (e.g., errors rendered in browser logs), copy and paste the error in Claude Code and have it attempt to resolve it.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -568,6 +475,7 @@ rm gcm-linux_amd64.2.6.1.deb
|
|||||||
|
|
||||||
- Den Delimarsky ([@localden](https://github.com/localden))
|
- Den Delimarsky ([@localden](https://github.com/localden))
|
||||||
- John Lam ([@jflam](https://github.com/jflam))
|
- John Lam ([@jflam](https://github.com/jflam))
|
||||||
|
- Daniel Meppiel [@danielmeppiel](https://github.com/danielmeppiel)
|
||||||
|
|
||||||
## 💬 Support
|
## 💬 Support
|
||||||
|
|
||||||
|
|||||||
59
docs/context-management.md
Normal file
59
docs/context-management.md
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Context Management with APM
|
||||||
|
|
||||||
|
## NPM for Agent Context
|
||||||
|
|
||||||
|
Just like npm revolutionized JavaScript by enabling package reuse, APM creates an ecosystem for sharing agent context.
|
||||||
|
|
||||||
|
## Package Composition & Reuse
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Your project inherits team knowledge via apm.yml file in the root
|
||||||
|
dependencies:
|
||||||
|
apm:
|
||||||
|
- company/design-system # UI patterns, brand guidelines
|
||||||
|
- company/security-standards # Auth patterns, data handling
|
||||||
|
- community/best-practices # Industry standards
|
||||||
|
```
|
||||||
|
|
||||||
|
**Result**: Your project gets all the instructions of above packages applied via dynamically generated Agents.md files using `specify apm compile`. These files are optimally generated to minimize contextual load for Agents compatible with the Agents.md standard.
|
||||||
|
|
||||||
|
**Enterprise Scenario**: Design team creates accessibility guidelines once → entire organization uses them → agents work consistently across all projects.
|
||||||
|
|
||||||
|
## Mathematical Context Optimization
|
||||||
|
|
||||||
|
**The Technical Foundation**: APM uses mathematical optimization to solve the context efficiency problem.
|
||||||
|
|
||||||
|
```
|
||||||
|
Context_Efficiency = Relevant_Instructions / Total_Instructions_Loaded
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why This Matters**: When agents work in `/styles/` directory, they shouldn't load Python compliance rules. APM's Context Optimization Engine ensures agents get minimal, highly relevant context.
|
||||||
|
|
||||||
|
**The Algorithm**: Constraint satisfaction optimization that finds placement minimizing context pollution while maximizing relevance. Each instruction gets mathematically optimal placement across the project hierarchy.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify init my-project --use-apm --ai copilot
|
||||||
|
specify apm install company/design-system
|
||||||
|
specify apm compile # Mathematical optimization generates distributed AGENTS.md files
|
||||||
|
```
|
||||||
|
|
||||||
|
## Universal Agent Compatibility
|
||||||
|
|
||||||
|
APM generates distributed `AGENTS.md` files compatible with the [agents.md standard](https://agents.md), working with any coding agent (GitHub Copilot, Cursor, Claude, Codex, Aider, etc.).
|
||||||
|
|
||||||
|
## Authentication Setup (Optional)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export GITHUB_APM_PAT=your_fine_grained_token_here
|
||||||
|
```
|
||||||
|
|
||||||
|
Only needed for private packages. Public community packages work without authentication.
|
||||||
|
|
||||||
|
## The Complete Value
|
||||||
|
|
||||||
|
1. **Package Ecosystem** - Share and compose agent intelligence like code dependencies
|
||||||
|
2. **Mathematical Optimization** - Context Optimization Engine ensures relevance without pollution
|
||||||
|
3. **Universal Standards** - Works with any agent via industry-standard agents.md format
|
||||||
|
4. **Enterprise Ready** - Team knowledge scales across entire organizations
|
||||||
@@ -55,8 +55,8 @@ Our research and experimentation focus on:
|
|||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Please see our [Contributing Guide](https://github.com/github/spec-kit/blob/main/CONTRIBUTING.md) for information on how to contribute to this project.
|
Please see our [Contributing Guide](CONTRIBUTING.md) for information on how to contribute to this project.
|
||||||
|
|
||||||
## Support
|
## Support
|
||||||
|
|
||||||
For support, please check our [Support Guide](https://github.com/github/spec-kit/blob/main/SUPPORT.md) or open an issue on GitHub.
|
For support, please check our [Support Guide](SUPPORT.md) or open an issue on GitHub.
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME
|
|||||||
Or initialize in the current directory:
|
Or initialize in the current directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git specify init .
|
|
||||||
# or use the --here flag
|
|
||||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here
|
uvx --from git+https://github.com/github/spec-kit.git specify init --here
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
85
memory/constitution_update_checklist.md
Normal file
85
memory/constitution_update_checklist.md
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# Constitution Update Checklist
|
||||||
|
|
||||||
|
When amending the constitution (`/memory/constitution.md`), ensure all dependent documents are updated to maintain consistency.
|
||||||
|
|
||||||
|
## Templates to Update
|
||||||
|
|
||||||
|
### When adding/modifying ANY article:
|
||||||
|
- [ ] `/templates/plan-template.md` - Update Constitution Check section
|
||||||
|
- [ ] `/templates/spec-template.md` - Update if requirements/scope affected
|
||||||
|
- [ ] `/templates/tasks-template.md` - Update if new task types needed
|
||||||
|
- [ ] `/.claude/commands/plan.md` - Update if planning process changes
|
||||||
|
- [ ] `/.claude/commands/tasks.md` - Update if task generation affected
|
||||||
|
- [ ] `/CLAUDE.md` - Update runtime development guidelines
|
||||||
|
|
||||||
|
### Article-specific updates:
|
||||||
|
|
||||||
|
#### Article I (Library-First):
|
||||||
|
- [ ] Ensure templates emphasize library creation
|
||||||
|
- [ ] Update CLI command examples
|
||||||
|
- [ ] Add llms.txt documentation requirements
|
||||||
|
|
||||||
|
#### Article II (CLI Interface):
|
||||||
|
- [ ] Update CLI flag requirements in templates
|
||||||
|
- [ ] Add text I/O protocol reminders
|
||||||
|
|
||||||
|
#### Article III (Test-First):
|
||||||
|
- [ ] Update test order in all templates
|
||||||
|
- [ ] Emphasize TDD requirements
|
||||||
|
- [ ] Add test approval gates
|
||||||
|
|
||||||
|
#### Article IV (Integration Testing):
|
||||||
|
- [ ] List integration test triggers
|
||||||
|
- [ ] Update test type priorities
|
||||||
|
- [ ] Add real dependency requirements
|
||||||
|
|
||||||
|
#### Article V (Observability):
|
||||||
|
- [ ] Add logging requirements to templates
|
||||||
|
- [ ] Include multi-tier log streaming
|
||||||
|
- [ ] Update performance monitoring sections
|
||||||
|
|
||||||
|
#### Article VI (Versioning):
|
||||||
|
- [ ] Add version increment reminders
|
||||||
|
- [ ] Include breaking change procedures
|
||||||
|
- [ ] Update migration requirements
|
||||||
|
|
||||||
|
#### Article VII (Simplicity):
|
||||||
|
- [ ] Update project count limits
|
||||||
|
- [ ] Add pattern prohibition examples
|
||||||
|
- [ ] Include YAGNI reminders
|
||||||
|
|
||||||
|
## Validation Steps
|
||||||
|
|
||||||
|
1. **Before committing constitution changes:**
|
||||||
|
- [ ] All templates reference new requirements
|
||||||
|
- [ ] Examples updated to match new rules
|
||||||
|
- [ ] No contradictions between documents
|
||||||
|
|
||||||
|
2. **After updating templates:**
|
||||||
|
- [ ] Run through a sample implementation plan
|
||||||
|
- [ ] Verify all constitution requirements addressed
|
||||||
|
- [ ] Check that templates are self-contained (readable without constitution)
|
||||||
|
|
||||||
|
3. **Version tracking:**
|
||||||
|
- [ ] Update constitution version number
|
||||||
|
- [ ] Note version in template footers
|
||||||
|
- [ ] Add amendment to constitution history
|
||||||
|
|
||||||
|
## Common Misses
|
||||||
|
|
||||||
|
Watch for these often-forgotten updates:
|
||||||
|
- Command documentation (`/commands/*.md`)
|
||||||
|
- Checklist items in templates
|
||||||
|
- Example code/commands
|
||||||
|
- Domain-specific variations (web vs mobile vs CLI)
|
||||||
|
- Cross-references between documents
|
||||||
|
|
||||||
|
## Template Sync Status
|
||||||
|
|
||||||
|
Last sync check: 2025-07-16
|
||||||
|
- Constitution version: 2.1.1
|
||||||
|
- Templates aligned: ❌ (missing versioning, observability details)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*This checklist ensures the constitution's principles are consistently applied across all project documentation.*
|
||||||
@@ -1,23 +1,44 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "specify-cli"
|
name = "specify-cli"
|
||||||
version = "0.0.17"
|
version = "0.0.4"
|
||||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
description = "Setup tool for Specify spec-driven development projects"
|
||||||
requires-python = ">=3.11"
|
requires-python = ">=3.11"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
# Existing spec-kit dependencies
|
||||||
"typer",
|
"typer",
|
||||||
"rich",
|
"rich>=13.0.0",
|
||||||
"httpx[socks]",
|
"httpx[socks]",
|
||||||
"platformdirs",
|
"platformdirs",
|
||||||
"readchar",
|
"readchar",
|
||||||
"truststore>=0.10.4",
|
"truststore>=0.10.4",
|
||||||
|
# APM dependencies (from awd-cli, excluding runtime/embargo items)
|
||||||
|
"click>=8.0.0",
|
||||||
|
"colorama>=0.4.6",
|
||||||
|
"pyyaml>=6.0.0",
|
||||||
|
"requests>=2.28.0",
|
||||||
|
"python-frontmatter>=1.0.0",
|
||||||
|
"tomli>=1.2.0; python_version<'3.11'",
|
||||||
|
"toml>=0.10.2",
|
||||||
|
"rich-click>=1.7.0",
|
||||||
|
"watchdog>=3.0.0",
|
||||||
|
"GitPython>=3.1.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
specify = "specify_cli:main"
|
specify = "specify_cli:main"
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.0.0",
|
||||||
|
"pytest-cov>=4.0.0",
|
||||||
|
"black>=23.0.0",
|
||||||
|
"isort>=5.0.0",
|
||||||
|
"mypy>=1.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["hatchling"]
|
requires = ["hatchling"]
|
||||||
build-backend = "hatchling.build"
|
build-backend = "hatchling.build"
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
packages = ["src/specify_cli"]
|
packages = ["src/specify_cli", "src/apm_cli"]
|
||||||
|
|||||||
@@ -1,166 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Consolidated prerequisite checking script
|
|
||||||
#
|
|
||||||
# This script provides unified prerequisite checking for Spec-Driven Development workflow.
|
|
||||||
# It replaces the functionality previously spread across multiple scripts.
|
|
||||||
#
|
|
||||||
# Usage: ./check-prerequisites.sh [OPTIONS]
|
|
||||||
#
|
|
||||||
# OPTIONS:
|
|
||||||
# --json Output in JSON format
|
|
||||||
# --require-tasks Require tasks.md to exist (for implementation phase)
|
|
||||||
# --include-tasks Include tasks.md in AVAILABLE_DOCS list
|
|
||||||
# --paths-only Only output path variables (no validation)
|
|
||||||
# --help, -h Show help message
|
|
||||||
#
|
|
||||||
# OUTPUTS:
|
|
||||||
# JSON mode: {"FEATURE_DIR":"...", "AVAILABLE_DOCS":["..."]}
|
|
||||||
# Text mode: FEATURE_DIR:... \n AVAILABLE_DOCS: \n ✓/✗ file.md
|
|
||||||
# Paths only: REPO_ROOT: ... \n BRANCH: ... \n FEATURE_DIR: ... etc.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Parse command line arguments
|
|
||||||
JSON_MODE=false
|
|
||||||
REQUIRE_TASKS=false
|
|
||||||
INCLUDE_TASKS=false
|
|
||||||
PATHS_ONLY=false
|
|
||||||
|
|
||||||
for arg in "$@"; do
|
|
||||||
case "$arg" in
|
|
||||||
--json)
|
|
||||||
JSON_MODE=true
|
|
||||||
;;
|
|
||||||
--require-tasks)
|
|
||||||
REQUIRE_TASKS=true
|
|
||||||
;;
|
|
||||||
--include-tasks)
|
|
||||||
INCLUDE_TASKS=true
|
|
||||||
;;
|
|
||||||
--paths-only)
|
|
||||||
PATHS_ONLY=true
|
|
||||||
;;
|
|
||||||
--help|-h)
|
|
||||||
cat << 'EOF'
|
|
||||||
Usage: check-prerequisites.sh [OPTIONS]
|
|
||||||
|
|
||||||
Consolidated prerequisite checking for Spec-Driven Development workflow.
|
|
||||||
|
|
||||||
OPTIONS:
|
|
||||||
--json Output in JSON format
|
|
||||||
--require-tasks Require tasks.md to exist (for implementation phase)
|
|
||||||
--include-tasks Include tasks.md in AVAILABLE_DOCS list
|
|
||||||
--paths-only Only output path variables (no prerequisite validation)
|
|
||||||
--help, -h Show this help message
|
|
||||||
|
|
||||||
EXAMPLES:
|
|
||||||
# Check task prerequisites (plan.md required)
|
|
||||||
./check-prerequisites.sh --json
|
|
||||||
|
|
||||||
# Check implementation prerequisites (plan.md + tasks.md required)
|
|
||||||
./check-prerequisites.sh --json --require-tasks --include-tasks
|
|
||||||
|
|
||||||
# Get feature paths only (no validation)
|
|
||||||
./check-prerequisites.sh --paths-only
|
|
||||||
|
|
||||||
EOF
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "ERROR: Unknown option '$arg'. Use --help for usage information." >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Source common functions
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
source "$SCRIPT_DIR/common.sh"
|
|
||||||
|
|
||||||
# Get feature paths and validate branch
|
|
||||||
eval $(get_feature_paths)
|
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
|
||||||
|
|
||||||
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
|
||||||
if $PATHS_ONLY; then
|
|
||||||
if $JSON_MODE; then
|
|
||||||
# Minimal JSON paths payload (no validation performed)
|
|
||||||
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
|
||||||
"$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS"
|
|
||||||
else
|
|
||||||
echo "REPO_ROOT: $REPO_ROOT"
|
|
||||||
echo "BRANCH: $CURRENT_BRANCH"
|
|
||||||
echo "FEATURE_DIR: $FEATURE_DIR"
|
|
||||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
|
||||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
|
||||||
echo "TASKS: $TASKS"
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Validate required directories and files
|
|
||||||
if [[ ! -d "$FEATURE_DIR" ]]; then
|
|
||||||
echo "ERROR: Feature directory not found: $FEATURE_DIR" >&2
|
|
||||||
echo "Run /specify first to create the feature structure." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -f "$IMPL_PLAN" ]]; then
|
|
||||||
echo "ERROR: plan.md not found in $FEATURE_DIR" >&2
|
|
||||||
echo "Run /plan first to create the implementation plan." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check for tasks.md if required
|
|
||||||
if $REQUIRE_TASKS && [[ ! -f "$TASKS" ]]; then
|
|
||||||
echo "ERROR: tasks.md not found in $FEATURE_DIR" >&2
|
|
||||||
echo "Run /tasks first to create the task list." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Build list of available documents
|
|
||||||
docs=()
|
|
||||||
|
|
||||||
# Always check these optional docs
|
|
||||||
[[ -f "$RESEARCH" ]] && docs+=("research.md")
|
|
||||||
[[ -f "$DATA_MODEL" ]] && docs+=("data-model.md")
|
|
||||||
|
|
||||||
# Check contracts directory (only if it exists and has files)
|
|
||||||
if [[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]; then
|
|
||||||
docs+=("contracts/")
|
|
||||||
fi
|
|
||||||
|
|
||||||
[[ -f "$QUICKSTART" ]] && docs+=("quickstart.md")
|
|
||||||
|
|
||||||
# Include tasks.md if requested and it exists
|
|
||||||
if $INCLUDE_TASKS && [[ -f "$TASKS" ]]; then
|
|
||||||
docs+=("tasks.md")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Output results
|
|
||||||
if $JSON_MODE; then
|
|
||||||
# Build JSON array of documents
|
|
||||||
if [[ ${#docs[@]} -eq 0 ]]; then
|
|
||||||
json_docs="[]"
|
|
||||||
else
|
|
||||||
json_docs=$(printf '"%s",' "${docs[@]}")
|
|
||||||
json_docs="[${json_docs%,}]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
|
||||||
else
|
|
||||||
# Text output
|
|
||||||
echo "FEATURE_DIR:$FEATURE_DIR"
|
|
||||||
echo "AVAILABLE_DOCS:"
|
|
||||||
|
|
||||||
# Show status of each potential document
|
|
||||||
check_file "$RESEARCH" "research.md"
|
|
||||||
check_file "$DATA_MODEL" "data-model.md"
|
|
||||||
check_dir "$CONTRACTS_DIR" "contracts/"
|
|
||||||
check_file "$QUICKSTART" "quickstart.md"
|
|
||||||
|
|
||||||
if $INCLUDE_TASKS; then
|
|
||||||
check_file "$TASKS" "tasks.md"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
15
scripts/bash/check-task-prerequisites.sh
Normal file
15
scripts/bash/check-task-prerequisites.sh
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
JSON_MODE=false
|
||||||
|
for arg in "$@"; do case "$arg" in --json) JSON_MODE=true ;; --help|-h) echo "Usage: $0 [--json]"; exit 0 ;; esac; done
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
eval $(get_feature_paths)
|
||||||
|
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||||
|
if [[ ! -d "$FEATURE_DIR" ]]; then echo "ERROR: Feature directory not found: $FEATURE_DIR"; echo "Run /specify first."; exit 1; fi
|
||||||
|
if [[ ! -f "$IMPL_PLAN" ]]; then echo "ERROR: plan.md not found in $FEATURE_DIR"; echo "Run /plan first."; exit 1; fi
|
||||||
|
if $JSON_MODE; then
|
||||||
|
docs=(); [[ -f "$RESEARCH" ]] && docs+=("research.md"); [[ -f "$DATA_MODEL" ]] && docs+=("data-model.md"); ([[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]) && docs+=("contracts/"); [[ -f "$QUICKSTART" ]] && docs+=("quickstart.md");
|
||||||
|
json_docs=$(printf '"%s",' "${docs[@]}"); json_docs="[${json_docs%,}]"; printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
||||||
|
else
|
||||||
|
echo "FEATURE_DIR:$FEATURE_DIR"; echo "AVAILABLE_DOCS:"; check_file "$RESEARCH" "research.md"; check_file "$DATA_MODEL" "data-model.md"; check_dir "$CONTRACTS_DIR" "contracts/"; check_file "$QUICKSTART" "quickstart.md"; fi
|
||||||
@@ -1,84 +1,16 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Common functions and variables for all scripts
|
# (Moved to scripts/bash/) Common functions and variables for all scripts
|
||||||
|
|
||||||
# Get repository root, with fallback for non-git repositories
|
get_repo_root() { git rev-parse --show-toplevel; }
|
||||||
get_repo_root() {
|
get_current_branch() { git rev-parse --abbrev-ref HEAD; }
|
||||||
if git rev-parse --show-toplevel >/dev/null 2>&1; then
|
|
||||||
git rev-parse --show-toplevel
|
|
||||||
else
|
|
||||||
# Fall back to script location for non-git repos
|
|
||||||
local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
(cd "$script_dir/../../.." && pwd)
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get current branch, with fallback for non-git repositories
|
|
||||||
get_current_branch() {
|
|
||||||
# First check if SPECIFY_FEATURE environment variable is set
|
|
||||||
if [[ -n "${SPECIFY_FEATURE:-}" ]]; then
|
|
||||||
echo "$SPECIFY_FEATURE"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Then check git if available
|
|
||||||
if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
|
|
||||||
git rev-parse --abbrev-ref HEAD
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
# For non-git repos, try to find the latest feature directory
|
|
||||||
local repo_root=$(get_repo_root)
|
|
||||||
local specs_dir="$repo_root/specs"
|
|
||||||
|
|
||||||
if [[ -d "$specs_dir" ]]; then
|
|
||||||
local latest_feature=""
|
|
||||||
local highest=0
|
|
||||||
|
|
||||||
for dir in "$specs_dir"/*; do
|
|
||||||
if [[ -d "$dir" ]]; then
|
|
||||||
local dirname=$(basename "$dir")
|
|
||||||
if [[ "$dirname" =~ ^([0-9]{3})- ]]; then
|
|
||||||
local number=${BASH_REMATCH[1]}
|
|
||||||
number=$((10#$number))
|
|
||||||
if [[ "$number" -gt "$highest" ]]; then
|
|
||||||
highest=$number
|
|
||||||
latest_feature=$dirname
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ -n "$latest_feature" ]]; then
|
|
||||||
echo "$latest_feature"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "main" # Final fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check if we have git available
|
|
||||||
has_git() {
|
|
||||||
git rev-parse --show-toplevel >/dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
check_feature_branch() {
|
check_feature_branch() {
|
||||||
local branch="$1"
|
local branch="$1"
|
||||||
local has_git_repo="$2"
|
|
||||||
|
|
||||||
# For non-git repos, we can't enforce branch naming but still provide output
|
|
||||||
if [[ "$has_git_repo" != "true" ]]; then
|
|
||||||
echo "[specify] Warning: Git repository not detected; skipped branch validation" >&2
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
||||||
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
||||||
echo "Feature branches should be named like: 001-feature-name" >&2
|
echo "Feature branches should be named like: 001-feature-name" >&2
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi; return 0
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get_feature_dir() { echo "$1/specs/$2"; }
|
get_feature_dir() { echo "$1/specs/$2"; }
|
||||||
@@ -86,18 +18,10 @@ get_feature_dir() { echo "$1/specs/$2"; }
|
|||||||
get_feature_paths() {
|
get_feature_paths() {
|
||||||
local repo_root=$(get_repo_root)
|
local repo_root=$(get_repo_root)
|
||||||
local current_branch=$(get_current_branch)
|
local current_branch=$(get_current_branch)
|
||||||
local has_git_repo="false"
|
|
||||||
|
|
||||||
if has_git; then
|
|
||||||
has_git_repo="true"
|
|
||||||
fi
|
|
||||||
|
|
||||||
local feature_dir=$(get_feature_dir "$repo_root" "$current_branch")
|
local feature_dir=$(get_feature_dir "$repo_root" "$current_branch")
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
REPO_ROOT='$repo_root'
|
REPO_ROOT='$repo_root'
|
||||||
CURRENT_BRANCH='$current_branch'
|
CURRENT_BRANCH='$current_branch'
|
||||||
HAS_GIT='$has_git_repo'
|
|
||||||
FEATURE_DIR='$feature_dir'
|
FEATURE_DIR='$feature_dir'
|
||||||
FEATURE_SPEC='$feature_dir/spec.md'
|
FEATURE_SPEC='$feature_dir/spec.md'
|
||||||
IMPL_PLAN='$feature_dir/plan.md'
|
IMPL_PLAN='$feature_dir/plan.md'
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
# (Moved to scripts/bash/) Create a new feature with branch, directory structure, and template
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
JSON_MODE=false
|
JSON_MODE=false
|
||||||
@@ -18,38 +18,7 @@ if [ -z "$FEATURE_DESCRIPTION" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Function to find the repository root by searching for existing project markers
|
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||||
find_repo_root() {
|
|
||||||
local dir="$1"
|
|
||||||
while [ "$dir" != "/" ]; do
|
|
||||||
if [ -d "$dir/.git" ] || [ -d "$dir/.specify" ]; then
|
|
||||||
echo "$dir"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
dir="$(dirname "$dir")"
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Resolve repository root. Prefer git information when available, but fall back
|
|
||||||
# to searching for repository markers so the workflow still functions in repositories that
|
|
||||||
# were initialised with --no-git.
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
|
|
||||||
if git rev-parse --show-toplevel >/dev/null 2>&1; then
|
|
||||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
|
||||||
HAS_GIT=true
|
|
||||||
else
|
|
||||||
REPO_ROOT="$(find_repo_root "$SCRIPT_DIR")"
|
|
||||||
if [ -z "$REPO_ROOT" ]; then
|
|
||||||
echo "Error: Could not determine repository root. Please run this script from within the repository." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
HAS_GIT=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd "$REPO_ROOT"
|
|
||||||
|
|
||||||
SPECS_DIR="$REPO_ROOT/specs"
|
SPECS_DIR="$REPO_ROOT/specs"
|
||||||
mkdir -p "$SPECS_DIR"
|
mkdir -p "$SPECS_DIR"
|
||||||
|
|
||||||
@@ -71,27 +40,19 @@ BRANCH_NAME=$(echo "$FEATURE_DESCRIPTION" | tr '[:upper:]' '[:lower:]' | sed 's/
|
|||||||
WORDS=$(echo "$BRANCH_NAME" | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//')
|
WORDS=$(echo "$BRANCH_NAME" | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//')
|
||||||
BRANCH_NAME="${FEATURE_NUM}-${WORDS}"
|
BRANCH_NAME="${FEATURE_NUM}-${WORDS}"
|
||||||
|
|
||||||
if [ "$HAS_GIT" = true ]; then
|
git checkout -b "$BRANCH_NAME"
|
||||||
git checkout -b "$BRANCH_NAME"
|
|
||||||
else
|
|
||||||
>&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
|
|
||||||
fi
|
|
||||||
|
|
||||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||||
mkdir -p "$FEATURE_DIR"
|
mkdir -p "$FEATURE_DIR"
|
||||||
|
|
||||||
TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md"
|
TEMPLATE="$REPO_ROOT/templates/spec-template.md"
|
||||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||||
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
||||||
|
|
||||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
|
||||||
export SPECIFY_FEATURE="$BRANCH_NAME"
|
|
||||||
|
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
||||||
else
|
else
|
||||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||||
echo "SPEC_FILE: $SPEC_FILE"
|
echo "SPEC_FILE: $SPEC_FILE"
|
||||||
echo "FEATURE_NUM: $FEATURE_NUM"
|
echo "FEATURE_NUM: $FEATURE_NUM"
|
||||||
echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME"
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
7
scripts/bash/get-feature-paths.sh
Normal file
7
scripts/bash/get-feature-paths.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
eval $(get_feature_paths)
|
||||||
|
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||||
|
echo "REPO_ROOT: $REPO_ROOT"; echo "BRANCH: $CURRENT_BRANCH"; echo "FEATURE_DIR: $FEATURE_DIR"; echo "FEATURE_SPEC: $FEATURE_SPEC"; echo "IMPL_PLAN: $IMPL_PLAN"; echo "TASKS: $TASKS"
|
||||||
@@ -1,60 +1,17 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Parse command line arguments
|
|
||||||
JSON_MODE=false
|
JSON_MODE=false
|
||||||
ARGS=()
|
for arg in "$@"; do case "$arg" in --json) JSON_MODE=true ;; --help|-h) echo "Usage: $0 [--json]"; exit 0 ;; esac; done
|
||||||
|
|
||||||
for arg in "$@"; do
|
|
||||||
case "$arg" in
|
|
||||||
--json)
|
|
||||||
JSON_MODE=true
|
|
||||||
;;
|
|
||||||
--help|-h)
|
|
||||||
echo "Usage: $0 [--json]"
|
|
||||||
echo " --json Output results in JSON format"
|
|
||||||
echo " --help Show this help message"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
ARGS+=("$arg")
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Get script directory and load common functions
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
|
||||||
eval $(get_feature_paths)
|
eval $(get_feature_paths)
|
||||||
|
check_feature_branch "$CURRENT_BRANCH" || exit 1
|
||||||
# Check if we're on a proper feature branch (only for git repos)
|
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
|
||||||
|
|
||||||
# Ensure the feature directory exists
|
|
||||||
mkdir -p "$FEATURE_DIR"
|
mkdir -p "$FEATURE_DIR"
|
||||||
|
|
||||||
# Copy plan template if it exists
|
|
||||||
TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md"
|
TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md"
|
||||||
if [[ -f "$TEMPLATE" ]]; then
|
[[ -f "$TEMPLATE" ]] && cp "$TEMPLATE" "$IMPL_PLAN"
|
||||||
cp "$TEMPLATE" "$IMPL_PLAN"
|
|
||||||
echo "Copied plan template to $IMPL_PLAN"
|
|
||||||
else
|
|
||||||
echo "Warning: Plan template not found at $TEMPLATE"
|
|
||||||
# Create a basic plan file if template doesn't exist
|
|
||||||
touch "$IMPL_PLAN"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Output results
|
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s"}\n' \
|
||||||
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT"
|
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH"
|
||||||
else
|
else
|
||||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
echo "FEATURE_SPEC: $FEATURE_SPEC"; echo "IMPL_PLAN: $IMPL_PLAN"; echo "SPECS_DIR: $FEATURE_DIR"; echo "BRANCH: $CURRENT_BRANCH"
|
||||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
|
||||||
echo "SPECS_DIR: $FEATURE_DIR"
|
|
||||||
echo "BRANCH: $CURRENT_BRANCH"
|
|
||||||
echo "HAS_GIT: $HAS_GIT"
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,719 +1,57 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Update agent context files with information from plan.md
|
|
||||||
#
|
|
||||||
# This script maintains AI agent context files by parsing feature specifications
|
|
||||||
# and updating agent-specific configuration files with project information.
|
|
||||||
#
|
|
||||||
# MAIN FUNCTIONS:
|
|
||||||
# 1. Environment Validation
|
|
||||||
# - Verifies git repository structure and branch information
|
|
||||||
# - Checks for required plan.md files and templates
|
|
||||||
# - Validates file permissions and accessibility
|
|
||||||
#
|
|
||||||
# 2. Plan Data Extraction
|
|
||||||
# - Parses plan.md files to extract project metadata
|
|
||||||
# - Identifies language/version, frameworks, databases, and project types
|
|
||||||
# - Handles missing or incomplete specification data gracefully
|
|
||||||
#
|
|
||||||
# 3. Agent File Management
|
|
||||||
# - Creates new agent context files from templates when needed
|
|
||||||
# - Updates existing agent files with new project information
|
|
||||||
# - Preserves manual additions and custom configurations
|
|
||||||
# - Supports multiple AI agent formats and directory structures
|
|
||||||
#
|
|
||||||
# 4. Content Generation
|
|
||||||
# - Generates language-specific build/test commands
|
|
||||||
# - Creates appropriate project directory structures
|
|
||||||
# - Updates technology stacks and recent changes sections
|
|
||||||
# - Maintains consistent formatting and timestamps
|
|
||||||
#
|
|
||||||
# 5. Multi-Agent Support
|
|
||||||
# - Handles agent-specific file paths and naming conventions
|
|
||||||
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf
|
|
||||||
# - Can update single agents or all existing agent files
|
|
||||||
# - Creates default Claude file if no agent files exist
|
|
||||||
#
|
|
||||||
# Usage: ./update-agent-context.sh [agent_type]
|
|
||||||
# Agent types: claude|gemini|copilot|cursor|qwen|opencode|codex|windsurf
|
|
||||||
# Leave empty to update all existing agent files
|
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||||
# Enable strict error handling
|
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||||
set -u
|
FEATURE_DIR="$REPO_ROOT/specs/$CURRENT_BRANCH"
|
||||||
set -o pipefail
|
NEW_PLAN="$FEATURE_DIR/plan.md"
|
||||||
|
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"; GEMINI_FILE="$REPO_ROOT/GEMINI.md"; COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
||||||
#==============================================================================
|
AGENT_TYPE="$1"
|
||||||
# Configuration and Global Variables
|
[ -f "$NEW_PLAN" ] || { echo "ERROR: No plan.md found at $NEW_PLAN"; exit 1; }
|
||||||
#==============================================================================
|
echo "=== Updating agent context files for feature $CURRENT_BRANCH ==="
|
||||||
|
NEW_LANG=$(grep "^**Language/Version**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Language\/Version**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||||
# Get script directory and load common functions
|
NEW_FRAMEWORK=$(grep "^**Primary Dependencies**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Primary Dependencies**: //' | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
NEW_DB=$(grep "^**Storage**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Storage**: //' | grep -v "N/A" | grep -v "NEEDS CLARIFICATION" || echo "")
|
||||||
source "$SCRIPT_DIR/common.sh"
|
NEW_PROJECT_TYPE=$(grep "^**Project Type**: " "$NEW_PLAN" 2>/dev/null | head -1 | sed 's/^**Project Type**: //' || echo "")
|
||||||
|
update_agent_file() { local target_file="$1" agent_name="$2"; echo "Updating $agent_name context file: $target_file"; local temp_file=$(mktemp); if [ ! -f "$target_file" ]; then
|
||||||
# Get all paths and variables from common functions
|
echo "Creating new $agent_name context file..."; if [ -f "$REPO_ROOT/templates/agent-file-template.md" ]; then cp "$REPO_ROOT/templates/agent-file-template.md" "$temp_file"; else echo "ERROR: Template not found"; return 1; fi;
|
||||||
eval $(get_feature_paths)
|
sed -i.bak "s/\[PROJECT NAME\]/$(basename $REPO_ROOT)/" "$temp_file"; sed -i.bak "s/\[DATE\]/$(date +%Y-%m-%d)/" "$temp_file"; sed -i.bak "s/\[EXTRACTED FROM ALL PLAN.MD FILES\]/- $NEW_LANG + $NEW_FRAMEWORK ($CURRENT_BRANCH)/" "$temp_file";
|
||||||
|
if [[ "$NEW_PROJECT_TYPE" == *"web"* ]]; then sed -i.bak "s|\[ACTUAL STRUCTURE FROM PLANS\]|backend/\nfrontend/\ntests/|" "$temp_file"; else sed -i.bak "s|\[ACTUAL STRUCTURE FROM PLANS\]|src/\ntests/|" "$temp_file"; fi;
|
||||||
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
if [[ "$NEW_LANG" == *"Python"* ]]; then COMMANDS="cd src && pytest && ruff check ."; elif [[ "$NEW_LANG" == *"Rust"* ]]; then COMMANDS="cargo test && cargo clippy"; elif [[ "$NEW_LANG" == *"JavaScript"* ]] || [[ "$NEW_LANG" == *"TypeScript"* ]]; then COMMANDS="npm test && npm run lint"; else COMMANDS="# Add commands for $NEW_LANG"; fi; sed -i.bak "s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$COMMANDS|" "$temp_file";
|
||||||
AGENT_TYPE="${1:-}"
|
sed -i.bak "s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$NEW_LANG: Follow standard conventions|" "$temp_file"; sed -i.bak "s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|- $CURRENT_BRANCH: Added $NEW_LANG + $NEW_FRAMEWORK|" "$temp_file"; rm "$temp_file.bak";
|
||||||
|
else
|
||||||
# Agent-specific file paths
|
echo "Updating existing $agent_name context file..."; manual_start=$(grep -n "<!-- MANUAL ADDITIONS START -->" "$target_file" | cut -d: -f1); manual_end=$(grep -n "<!-- MANUAL ADDITIONS END -->" "$target_file" | cut -d: -f1); if [ -n "$manual_start" ] && [ -n "$manual_end" ]; then sed -n "${manual_start},${manual_end}p" "$target_file" > /tmp/manual_additions.txt; fi;
|
||||||
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
|
python3 - "$target_file" <<'EOF'
|
||||||
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
|
import re,sys,datetime
|
||||||
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
target=sys.argv[1]
|
||||||
CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
|
with open(target) as f: content=f.read()
|
||||||
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
NEW_LANG="'$NEW_LANG'";NEW_FRAMEWORK="'$NEW_FRAMEWORK'";CURRENT_BRANCH="'$CURRENT_BRANCH'";NEW_DB="'$NEW_DB'";NEW_PROJECT_TYPE="'$NEW_PROJECT_TYPE'"
|
||||||
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
# Tech section
|
||||||
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
|
m=re.search(r'## Active Technologies\n(.*?)\n\n',content, re.DOTALL)
|
||||||
KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
|
if m:
|
||||||
AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
|
existing=m.group(1)
|
||||||
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
additions=[]
|
||||||
|
if '$NEW_LANG' and '$NEW_LANG' not in existing: additions.append(f"- $NEW_LANG + $NEW_FRAMEWORK ($CURRENT_BRANCH)")
|
||||||
# Template file
|
if '$NEW_DB' and '$NEW_DB' not in existing and '$NEW_DB'!='N/A': additions.append(f"- $NEW_DB ($CURRENT_BRANCH)")
|
||||||
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
|
if additions:
|
||||||
|
new_block=existing+"\n"+"\n".join(additions)
|
||||||
# Global variables for parsed plan data
|
content=content.replace(m.group(0),f"## Active Technologies\n{new_block}\n\n")
|
||||||
NEW_LANG=""
|
# Recent changes
|
||||||
NEW_FRAMEWORK=""
|
m2=re.search(r'## Recent Changes\n(.*?)(\n\n|$)',content, re.DOTALL)
|
||||||
NEW_DB=""
|
if m2:
|
||||||
NEW_PROJECT_TYPE=""
|
lines=[l for l in m2.group(1).strip().split('\n') if l]
|
||||||
|
lines.insert(0,f"- $CURRENT_BRANCH: Added $NEW_LANG + $NEW_FRAMEWORK")
|
||||||
#==============================================================================
|
lines=lines[:3]
|
||||||
# Utility Functions
|
content=re.sub(r'## Recent Changes\n.*?(\n\n|$)', '## Recent Changes\n'+"\n".join(lines)+'\n\n', content, flags=re.DOTALL)
|
||||||
#==============================================================================
|
content=re.sub(r'Last updated: \d{4}-\d{2}-\d{2}', 'Last updated: '+datetime.datetime.now().strftime('%Y-%m-%d'), content)
|
||||||
|
open(target+'.tmp','w').write(content)
|
||||||
log_info() {
|
EOF
|
||||||
echo "INFO: $1"
|
mv "$target_file.tmp" "$target_file"; if [ -f /tmp/manual_additions.txt ]; then sed -i.bak '/<!-- MANUAL ADDITIONS START -->/,/<!-- MANUAL ADDITIONS END -->/d' "$target_file"; cat /tmp/manual_additions.txt >> "$target_file"; rm /tmp/manual_additions.txt "$target_file.bak"; fi;
|
||||||
}
|
fi; mv "$temp_file" "$target_file" 2>/dev/null || true; echo "✅ $agent_name context file updated successfully"; }
|
||||||
|
case "$AGENT_TYPE" in
|
||||||
log_success() {
|
claude) update_agent_file "$CLAUDE_FILE" "Claude Code" ;;
|
||||||
echo "✓ $1"
|
gemini) update_agent_file "$GEMINI_FILE" "Gemini CLI" ;;
|
||||||
}
|
copilot) update_agent_file "$COPILOT_FILE" "GitHub Copilot" ;;
|
||||||
|
"") [ -f "$CLAUDE_FILE" ] && update_agent_file "$CLAUDE_FILE" "Claude Code"; [ -f "$GEMINI_FILE" ] && update_agent_file "$GEMINI_FILE" "Gemini CLI"; [ -f "$COPILOT_FILE" ] && update_agent_file "$COPILOT_FILE" "GitHub Copilot"; if [ ! -f "$CLAUDE_FILE" ] && [ ! -f "$GEMINI_FILE" ] && [ ! -f "$COPILOT_FILE" ]; then update_agent_file "$CLAUDE_FILE" "Claude Code"; fi ;;
|
||||||
log_error() {
|
*) echo "ERROR: Unknown agent type '$AGENT_TYPE'"; exit 1 ;;
|
||||||
echo "ERROR: $1" >&2
|
esac
|
||||||
}
|
echo; echo "Summary of changes:"; [ -n "$NEW_LANG" ] && echo "- Added language: $NEW_LANG"; [ -n "$NEW_FRAMEWORK" ] && echo "- Added framework: $NEW_FRAMEWORK"; [ -n "$NEW_DB" ] && [ "$NEW_DB" != "N/A" ] && echo "- Added database: $NEW_DB"; echo; echo "Usage: $0 [claude|gemini|copilot]"
|
||||||
|
|
||||||
log_warning() {
|
|
||||||
echo "WARNING: $1" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
# Cleanup function for temporary files
|
|
||||||
cleanup() {
|
|
||||||
local exit_code=$?
|
|
||||||
rm -f /tmp/agent_update_*_$$
|
|
||||||
rm -f /tmp/manual_additions_$$
|
|
||||||
exit $exit_code
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set up cleanup trap
|
|
||||||
trap cleanup EXIT INT TERM
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
# Validation Functions
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
validate_environment() {
|
|
||||||
# Check if we have a current branch/feature (git or non-git)
|
|
||||||
if [[ -z "$CURRENT_BRANCH" ]]; then
|
|
||||||
log_error "Unable to determine current feature"
|
|
||||||
if [[ "$HAS_GIT" == "true" ]]; then
|
|
||||||
log_info "Make sure you're on a feature branch"
|
|
||||||
else
|
|
||||||
log_info "Set SPECIFY_FEATURE environment variable or create a feature first"
|
|
||||||
fi
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if plan.md exists
|
|
||||||
if [[ ! -f "$NEW_PLAN" ]]; then
|
|
||||||
log_error "No plan.md found at $NEW_PLAN"
|
|
||||||
log_info "Make sure you're working on a feature with a corresponding spec directory"
|
|
||||||
if [[ "$HAS_GIT" != "true" ]]; then
|
|
||||||
log_info "Use: export SPECIFY_FEATURE=your-feature-name or create a new feature first"
|
|
||||||
fi
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if template exists (needed for new files)
|
|
||||||
if [[ ! -f "$TEMPLATE_FILE" ]]; then
|
|
||||||
log_warning "Template file not found at $TEMPLATE_FILE"
|
|
||||||
log_warning "Creating new agent files will fail"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
# Plan Parsing Functions
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
extract_plan_field() {
|
|
||||||
local field_pattern="$1"
|
|
||||||
local plan_file="$2"
|
|
||||||
|
|
||||||
grep "^\*\*${field_pattern}\*\*: " "$plan_file" 2>/dev/null | \
|
|
||||||
head -1 | \
|
|
||||||
sed "s|^\*\*${field_pattern}\*\*: ||" | \
|
|
||||||
sed 's/^[ \t]*//;s/[ \t]*$//' | \
|
|
||||||
grep -v "NEEDS CLARIFICATION" | \
|
|
||||||
grep -v "^N/A$" || echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
parse_plan_data() {
|
|
||||||
local plan_file="$1"
|
|
||||||
|
|
||||||
if [[ ! -f "$plan_file" ]]; then
|
|
||||||
log_error "Plan file not found: $plan_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -r "$plan_file" ]]; then
|
|
||||||
log_error "Plan file is not readable: $plan_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Parsing plan data from $plan_file"
|
|
||||||
|
|
||||||
NEW_LANG=$(extract_plan_field "Language/Version" "$plan_file")
|
|
||||||
NEW_FRAMEWORK=$(extract_plan_field "Primary Dependencies" "$plan_file")
|
|
||||||
NEW_DB=$(extract_plan_field "Storage" "$plan_file")
|
|
||||||
NEW_PROJECT_TYPE=$(extract_plan_field "Project Type" "$plan_file")
|
|
||||||
|
|
||||||
# Log what we found
|
|
||||||
if [[ -n "$NEW_LANG" ]]; then
|
|
||||||
log_info "Found language: $NEW_LANG"
|
|
||||||
else
|
|
||||||
log_warning "No language information found in plan"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$NEW_FRAMEWORK" ]]; then
|
|
||||||
log_info "Found framework: $NEW_FRAMEWORK"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then
|
|
||||||
log_info "Found database: $NEW_DB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$NEW_PROJECT_TYPE" ]]; then
|
|
||||||
log_info "Found project type: $NEW_PROJECT_TYPE"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
format_technology_stack() {
|
|
||||||
local lang="$1"
|
|
||||||
local framework="$2"
|
|
||||||
local parts=()
|
|
||||||
|
|
||||||
# Add non-empty parts
|
|
||||||
[[ -n "$lang" && "$lang" != "NEEDS CLARIFICATION" ]] && parts+=("$lang")
|
|
||||||
[[ -n "$framework" && "$framework" != "NEEDS CLARIFICATION" && "$framework" != "N/A" ]] && parts+=("$framework")
|
|
||||||
|
|
||||||
# Join with proper formatting
|
|
||||||
if [[ ${#parts[@]} -eq 0 ]]; then
|
|
||||||
echo ""
|
|
||||||
elif [[ ${#parts[@]} -eq 1 ]]; then
|
|
||||||
echo "${parts[0]}"
|
|
||||||
else
|
|
||||||
# Join multiple parts with " + "
|
|
||||||
local result="${parts[0]}"
|
|
||||||
for ((i=1; i<${#parts[@]}; i++)); do
|
|
||||||
result="$result + ${parts[i]}"
|
|
||||||
done
|
|
||||||
echo "$result"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
# Template and Content Generation Functions
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
get_project_structure() {
|
|
||||||
local project_type="$1"
|
|
||||||
|
|
||||||
if [[ "$project_type" == *"web"* ]]; then
|
|
||||||
echo "backend/\\nfrontend/\\ntests/"
|
|
||||||
else
|
|
||||||
echo "src/\\ntests/"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
get_commands_for_language() {
|
|
||||||
local lang="$1"
|
|
||||||
|
|
||||||
case "$lang" in
|
|
||||||
*"Python"*)
|
|
||||||
echo "cd src && pytest && ruff check ."
|
|
||||||
;;
|
|
||||||
*"Rust"*)
|
|
||||||
echo "cargo test && cargo clippy"
|
|
||||||
;;
|
|
||||||
*"JavaScript"*|*"TypeScript"*)
|
|
||||||
echo "npm test && npm run lint"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "# Add commands for $lang"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
get_language_conventions() {
|
|
||||||
local lang="$1"
|
|
||||||
echo "$lang: Follow standard conventions"
|
|
||||||
}
|
|
||||||
|
|
||||||
create_new_agent_file() {
|
|
||||||
local target_file="$1"
|
|
||||||
local temp_file="$2"
|
|
||||||
local project_name="$3"
|
|
||||||
local current_date="$4"
|
|
||||||
|
|
||||||
if [[ ! -f "$TEMPLATE_FILE" ]]; then
|
|
||||||
log_error "Template not found at $TEMPLATE_FILE"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -r "$TEMPLATE_FILE" ]]; then
|
|
||||||
log_error "Template file is not readable: $TEMPLATE_FILE"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Creating new agent context file from template..."
|
|
||||||
|
|
||||||
if ! cp "$TEMPLATE_FILE" "$temp_file"; then
|
|
||||||
log_error "Failed to copy template file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Replace template placeholders
|
|
||||||
local project_structure
|
|
||||||
project_structure=$(get_project_structure "$NEW_PROJECT_TYPE")
|
|
||||||
|
|
||||||
local commands
|
|
||||||
commands=$(get_commands_for_language "$NEW_LANG")
|
|
||||||
|
|
||||||
local language_conventions
|
|
||||||
language_conventions=$(get_language_conventions "$NEW_LANG")
|
|
||||||
|
|
||||||
# Perform substitutions with error checking using safer approach
|
|
||||||
# Escape special characters for sed by using a different delimiter or escaping
|
|
||||||
local escaped_lang=$(printf '%s\n' "$NEW_LANG" | sed 's/[\[\.*^$()+{}|]/\\&/g')
|
|
||||||
local escaped_framework=$(printf '%s\n' "$NEW_FRAMEWORK" | sed 's/[\[\.*^$()+{}|]/\\&/g')
|
|
||||||
local escaped_branch=$(printf '%s\n' "$CURRENT_BRANCH" | sed 's/[\[\.*^$()+{}|]/\\&/g')
|
|
||||||
|
|
||||||
# Build technology stack and recent change strings conditionally
|
|
||||||
local tech_stack
|
|
||||||
if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then
|
|
||||||
tech_stack="- $escaped_lang + $escaped_framework ($escaped_branch)"
|
|
||||||
elif [[ -n "$escaped_lang" ]]; then
|
|
||||||
tech_stack="- $escaped_lang ($escaped_branch)"
|
|
||||||
elif [[ -n "$escaped_framework" ]]; then
|
|
||||||
tech_stack="- $escaped_framework ($escaped_branch)"
|
|
||||||
else
|
|
||||||
tech_stack="- ($escaped_branch)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
local recent_change
|
|
||||||
if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then
|
|
||||||
recent_change="- $escaped_branch: Added $escaped_lang + $escaped_framework"
|
|
||||||
elif [[ -n "$escaped_lang" ]]; then
|
|
||||||
recent_change="- $escaped_branch: Added $escaped_lang"
|
|
||||||
elif [[ -n "$escaped_framework" ]]; then
|
|
||||||
recent_change="- $escaped_branch: Added $escaped_framework"
|
|
||||||
else
|
|
||||||
recent_change="- $escaped_branch: Added"
|
|
||||||
fi
|
|
||||||
|
|
||||||
local substitutions=(
|
|
||||||
"s|\[PROJECT NAME\]|$project_name|"
|
|
||||||
"s|\[DATE\]|$current_date|"
|
|
||||||
"s|\[EXTRACTED FROM ALL PLAN.MD FILES\]|$tech_stack|"
|
|
||||||
"s|\[ACTUAL STRUCTURE FROM PLANS\]|$project_structure|g"
|
|
||||||
"s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$commands|"
|
|
||||||
"s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$language_conventions|"
|
|
||||||
"s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|$recent_change|"
|
|
||||||
)
|
|
||||||
|
|
||||||
for substitution in "${substitutions[@]}"; do
|
|
||||||
if ! sed -i.bak -e "$substitution" "$temp_file"; then
|
|
||||||
log_error "Failed to perform substitution: $substitution"
|
|
||||||
rm -f "$temp_file" "$temp_file.bak"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Convert \n sequences to actual newlines
|
|
||||||
newline=$(printf '\n')
|
|
||||||
sed -i.bak2 "s/\\\\n/${newline}/g" "$temp_file"
|
|
||||||
|
|
||||||
# Clean up backup files
|
|
||||||
rm -f "$temp_file.bak" "$temp_file.bak2"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
update_existing_agent_file() {
|
|
||||||
local target_file="$1"
|
|
||||||
local current_date="$2"
|
|
||||||
|
|
||||||
log_info "Updating existing agent context file..."
|
|
||||||
|
|
||||||
# Use a single temporary file for atomic update
|
|
||||||
local temp_file
|
|
||||||
temp_file=$(mktemp) || {
|
|
||||||
log_error "Failed to create temporary file"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Process the file in one pass
|
|
||||||
local tech_stack=$(format_technology_stack "$NEW_LANG" "$NEW_FRAMEWORK")
|
|
||||||
local new_tech_entries=()
|
|
||||||
local new_change_entry=""
|
|
||||||
|
|
||||||
# Prepare new technology entries
|
|
||||||
if [[ -n "$tech_stack" ]] && ! grep -q "$tech_stack" "$target_file"; then
|
|
||||||
new_tech_entries+=("- $tech_stack ($CURRENT_BRANCH)")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]] && ! grep -q "$NEW_DB" "$target_file"; then
|
|
||||||
new_tech_entries+=("- $NEW_DB ($CURRENT_BRANCH)")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Prepare new change entry
|
|
||||||
if [[ -n "$tech_stack" ]]; then
|
|
||||||
new_change_entry="- $CURRENT_BRANCH: Added $tech_stack"
|
|
||||||
elif [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]]; then
|
|
||||||
new_change_entry="- $CURRENT_BRANCH: Added $NEW_DB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Process file line by line
|
|
||||||
local in_tech_section=false
|
|
||||||
local in_changes_section=false
|
|
||||||
local tech_entries_added=false
|
|
||||||
local changes_entries_added=false
|
|
||||||
local existing_changes_count=0
|
|
||||||
|
|
||||||
while IFS= read -r line || [[ -n "$line" ]]; do
|
|
||||||
# Handle Active Technologies section
|
|
||||||
if [[ "$line" == "## Active Technologies" ]]; then
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
in_tech_section=true
|
|
||||||
continue
|
|
||||||
elif [[ $in_tech_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then
|
|
||||||
# Add new tech entries before closing the section
|
|
||||||
if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
|
|
||||||
printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
|
|
||||||
tech_entries_added=true
|
|
||||||
fi
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
in_tech_section=false
|
|
||||||
continue
|
|
||||||
elif [[ $in_tech_section == true ]] && [[ -z "$line" ]]; then
|
|
||||||
# Add new tech entries before empty line in tech section
|
|
||||||
if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
|
|
||||||
printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
|
|
||||||
tech_entries_added=true
|
|
||||||
fi
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Handle Recent Changes section
|
|
||||||
if [[ "$line" == "## Recent Changes" ]]; then
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
# Add new change entry right after the heading
|
|
||||||
if [[ -n "$new_change_entry" ]]; then
|
|
||||||
echo "$new_change_entry" >> "$temp_file"
|
|
||||||
fi
|
|
||||||
in_changes_section=true
|
|
||||||
changes_entries_added=true
|
|
||||||
continue
|
|
||||||
elif [[ $in_changes_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
in_changes_section=false
|
|
||||||
continue
|
|
||||||
elif [[ $in_changes_section == true ]] && [[ "$line" == "- "* ]]; then
|
|
||||||
# Keep only first 2 existing changes
|
|
||||||
if [[ $existing_changes_count -lt 2 ]]; then
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
((existing_changes_count++))
|
|
||||||
fi
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update timestamp
|
|
||||||
if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
|
||||||
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
|
||||||
else
|
|
||||||
echo "$line" >> "$temp_file"
|
|
||||||
fi
|
|
||||||
done < "$target_file"
|
|
||||||
|
|
||||||
# Post-loop check: if we're still in the Active Technologies section and haven't added new entries
|
|
||||||
if [[ $in_tech_section == true ]] && [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
|
|
||||||
printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Move temp file to target atomically
|
|
||||||
if ! mv "$temp_file" "$target_file"; then
|
|
||||||
log_error "Failed to update target file"
|
|
||||||
rm -f "$temp_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
#==============================================================================
|
|
||||||
# Main Agent File Update Function
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
update_agent_file() {
|
|
||||||
local target_file="$1"
|
|
||||||
local agent_name="$2"
|
|
||||||
|
|
||||||
if [[ -z "$target_file" ]] || [[ -z "$agent_name" ]]; then
|
|
||||||
log_error "update_agent_file requires target_file and agent_name parameters"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Updating $agent_name context file: $target_file"
|
|
||||||
|
|
||||||
local project_name
|
|
||||||
project_name=$(basename "$REPO_ROOT")
|
|
||||||
local current_date
|
|
||||||
current_date=$(date +%Y-%m-%d)
|
|
||||||
|
|
||||||
# Create directory if it doesn't exist
|
|
||||||
local target_dir
|
|
||||||
target_dir=$(dirname "$target_file")
|
|
||||||
if [[ ! -d "$target_dir" ]]; then
|
|
||||||
if ! mkdir -p "$target_dir"; then
|
|
||||||
log_error "Failed to create directory: $target_dir"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -f "$target_file" ]]; then
|
|
||||||
# Create new file from template
|
|
||||||
local temp_file
|
|
||||||
temp_file=$(mktemp) || {
|
|
||||||
log_error "Failed to create temporary file"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if create_new_agent_file "$target_file" "$temp_file" "$project_name" "$current_date"; then
|
|
||||||
if mv "$temp_file" "$target_file"; then
|
|
||||||
log_success "Created new $agent_name context file"
|
|
||||||
else
|
|
||||||
log_error "Failed to move temporary file to $target_file"
|
|
||||||
rm -f "$temp_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Failed to create new agent file"
|
|
||||||
rm -f "$temp_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Update existing file
|
|
||||||
if [[ ! -r "$target_file" ]]; then
|
|
||||||
log_error "Cannot read existing file: $target_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -w "$target_file" ]]; then
|
|
||||||
log_error "Cannot write to existing file: $target_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if update_existing_agent_file "$target_file" "$current_date"; then
|
|
||||||
log_success "Updated existing $agent_name context file"
|
|
||||||
else
|
|
||||||
log_error "Failed to update existing agent file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
# Agent Selection and Processing
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
update_specific_agent() {
|
|
||||||
local agent_type="$1"
|
|
||||||
|
|
||||||
case "$agent_type" in
|
|
||||||
claude)
|
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
|
||||||
;;
|
|
||||||
gemini)
|
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
|
||||||
;;
|
|
||||||
copilot)
|
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
|
||||||
;;
|
|
||||||
cursor)
|
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
|
||||||
;;
|
|
||||||
qwen)
|
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code"
|
|
||||||
;;
|
|
||||||
opencode)
|
|
||||||
update_agent_file "$AGENTS_FILE" "opencode"
|
|
||||||
;;
|
|
||||||
codex)
|
|
||||||
update_agent_file "$AGENTS_FILE" "Codex CLI"
|
|
||||||
;;
|
|
||||||
windsurf)
|
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
|
||||||
;;
|
|
||||||
kilocode)
|
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
|
||||||
;;
|
|
||||||
auggie)
|
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
|
||||||
;;
|
|
||||||
roo)
|
|
||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log_error "Unknown agent type '$agent_type'"
|
|
||||||
log_error "Expected: claude|gemini|copilot|cursor|qwen|opencode|codex|windsurf|kilocode|auggie|roo"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
update_all_existing_agents() {
|
|
||||||
local found_agent=false
|
|
||||||
|
|
||||||
# Check each possible agent file and update if it exists
|
|
||||||
if [[ -f "$CLAUDE_FILE" ]]; then
|
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$GEMINI_FILE" ]]; then
|
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$COPILOT_FILE" ]]; then
|
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$CURSOR_FILE" ]]; then
|
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$QWEN_FILE" ]]; then
|
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AGENTS_FILE" ]]; then
|
|
||||||
update_agent_file "$AGENTS_FILE" "Codex/opencode"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$WINDSURF_FILE" ]]; then
|
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KILOCODE_FILE" ]]; then
|
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AUGGIE_FILE" ]]; then
|
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$ROO_FILE" ]]; then
|
|
||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If no agent files exist, create a default Claude file
|
|
||||||
if [[ "$found_agent" == false ]]; then
|
|
||||||
log_info "No existing agent files found, creating default Claude file..."
|
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
print_summary() {
|
|
||||||
echo
|
|
||||||
log_info "Summary of changes:"
|
|
||||||
|
|
||||||
if [[ -n "$NEW_LANG" ]]; then
|
|
||||||
echo " - Added language: $NEW_LANG"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$NEW_FRAMEWORK" ]]; then
|
|
||||||
echo " - Added framework: $NEW_FRAMEWORK"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then
|
|
||||||
echo " - Added database: $NEW_DB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo
|
|
||||||
log_info "Usage: $0 [claude|gemini|copilot|cursor|qwen|opencode|codex|windsurf|kilocode|auggie|roo]"
|
|
||||||
}
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
# Main Execution
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
main() {
|
|
||||||
# Validate environment before proceeding
|
|
||||||
validate_environment
|
|
||||||
|
|
||||||
log_info "=== Updating agent context files for feature $CURRENT_BRANCH ==="
|
|
||||||
|
|
||||||
# Parse the plan file to extract project information
|
|
||||||
if ! parse_plan_data "$NEW_PLAN"; then
|
|
||||||
log_error "Failed to parse plan data"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Process based on agent type argument
|
|
||||||
local success=true
|
|
||||||
|
|
||||||
if [[ -z "$AGENT_TYPE" ]]; then
|
|
||||||
# No specific agent provided - update all existing agent files
|
|
||||||
log_info "No agent specified, updating all existing agent files..."
|
|
||||||
if ! update_all_existing_agents; then
|
|
||||||
success=false
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Specific agent provided - update only that agent
|
|
||||||
log_info "Updating specific agent: $AGENT_TYPE"
|
|
||||||
if ! update_specific_agent "$AGENT_TYPE"; then
|
|
||||||
success=false
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Print summary
|
|
||||||
print_summary
|
|
||||||
|
|
||||||
if [[ "$success" == true ]]; then
|
|
||||||
log_success "Agent context update completed successfully"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
log_error "Agent context update completed with errors"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Execute main function if script is run directly
|
|
||||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
||||||
main "$@"
|
|
||||||
fi
|
|
||||||
|
|||||||
@@ -1,148 +0,0 @@
|
|||||||
#!/usr/bin/env pwsh
|
|
||||||
|
|
||||||
# Consolidated prerequisite checking script (PowerShell)
|
|
||||||
#
|
|
||||||
# This script provides unified prerequisite checking for Spec-Driven Development workflow.
|
|
||||||
# It replaces the functionality previously spread across multiple scripts.
|
|
||||||
#
|
|
||||||
# Usage: ./check-prerequisites.ps1 [OPTIONS]
|
|
||||||
#
|
|
||||||
# OPTIONS:
|
|
||||||
# -Json Output in JSON format
|
|
||||||
# -RequireTasks Require tasks.md to exist (for implementation phase)
|
|
||||||
# -IncludeTasks Include tasks.md in AVAILABLE_DOCS list
|
|
||||||
# -PathsOnly Only output path variables (no validation)
|
|
||||||
# -Help, -h Show help message
|
|
||||||
|
|
||||||
[CmdletBinding()]
|
|
||||||
param(
|
|
||||||
[switch]$Json,
|
|
||||||
[switch]$RequireTasks,
|
|
||||||
[switch]$IncludeTasks,
|
|
||||||
[switch]$PathsOnly,
|
|
||||||
[switch]$Help
|
|
||||||
)
|
|
||||||
|
|
||||||
$ErrorActionPreference = 'Stop'
|
|
||||||
|
|
||||||
# Show help if requested
|
|
||||||
if ($Help) {
|
|
||||||
Write-Output @"
|
|
||||||
Usage: check-prerequisites.ps1 [OPTIONS]
|
|
||||||
|
|
||||||
Consolidated prerequisite checking for Spec-Driven Development workflow.
|
|
||||||
|
|
||||||
OPTIONS:
|
|
||||||
-Json Output in JSON format
|
|
||||||
-RequireTasks Require tasks.md to exist (for implementation phase)
|
|
||||||
-IncludeTasks Include tasks.md in AVAILABLE_DOCS list
|
|
||||||
-PathsOnly Only output path variables (no prerequisite validation)
|
|
||||||
-Help, -h Show this help message
|
|
||||||
|
|
||||||
EXAMPLES:
|
|
||||||
# Check task prerequisites (plan.md required)
|
|
||||||
.\check-prerequisites.ps1 -Json
|
|
||||||
|
|
||||||
# Check implementation prerequisites (plan.md + tasks.md required)
|
|
||||||
.\check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks
|
|
||||||
|
|
||||||
# Get feature paths only (no validation)
|
|
||||||
.\check-prerequisites.ps1 -PathsOnly
|
|
||||||
|
|
||||||
"@
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Source common functions
|
|
||||||
. "$PSScriptRoot/common.ps1"
|
|
||||||
|
|
||||||
# Get feature paths and validate branch
|
|
||||||
$paths = Get-FeaturePathsEnv
|
|
||||||
|
|
||||||
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit:$paths.HAS_GIT)) {
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# If paths-only mode, output paths and exit (support combined -Json -PathsOnly)
|
|
||||||
if ($PathsOnly) {
|
|
||||||
if ($Json) {
|
|
||||||
[PSCustomObject]@{
|
|
||||||
REPO_ROOT = $paths.REPO_ROOT
|
|
||||||
BRANCH = $paths.CURRENT_BRANCH
|
|
||||||
FEATURE_DIR = $paths.FEATURE_DIR
|
|
||||||
FEATURE_SPEC = $paths.FEATURE_SPEC
|
|
||||||
IMPL_PLAN = $paths.IMPL_PLAN
|
|
||||||
TASKS = $paths.TASKS
|
|
||||||
} | ConvertTo-Json -Compress
|
|
||||||
} else {
|
|
||||||
Write-Output "REPO_ROOT: $($paths.REPO_ROOT)"
|
|
||||||
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
|
||||||
Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)"
|
|
||||||
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
|
||||||
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
|
||||||
Write-Output "TASKS: $($paths.TASKS)"
|
|
||||||
}
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Validate required directories and files
|
|
||||||
if (-not (Test-Path $paths.FEATURE_DIR -PathType Container)) {
|
|
||||||
Write-Output "ERROR: Feature directory not found: $($paths.FEATURE_DIR)"
|
|
||||||
Write-Output "Run /specify first to create the feature structure."
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) {
|
|
||||||
Write-Output "ERROR: plan.md not found in $($paths.FEATURE_DIR)"
|
|
||||||
Write-Output "Run /plan first to create the implementation plan."
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check for tasks.md if required
|
|
||||||
if ($RequireTasks -and -not (Test-Path $paths.TASKS -PathType Leaf)) {
|
|
||||||
Write-Output "ERROR: tasks.md not found in $($paths.FEATURE_DIR)"
|
|
||||||
Write-Output "Run /tasks first to create the task list."
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Build list of available documents
|
|
||||||
$docs = @()
|
|
||||||
|
|
||||||
# Always check these optional docs
|
|
||||||
if (Test-Path $paths.RESEARCH) { $docs += 'research.md' }
|
|
||||||
if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' }
|
|
||||||
|
|
||||||
# Check contracts directory (only if it exists and has files)
|
|
||||||
if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) {
|
|
||||||
$docs += 'contracts/'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' }
|
|
||||||
|
|
||||||
# Include tasks.md if requested and it exists
|
|
||||||
if ($IncludeTasks -and (Test-Path $paths.TASKS)) {
|
|
||||||
$docs += 'tasks.md'
|
|
||||||
}
|
|
||||||
|
|
||||||
# Output results
|
|
||||||
if ($Json) {
|
|
||||||
# JSON output
|
|
||||||
[PSCustomObject]@{
|
|
||||||
FEATURE_DIR = $paths.FEATURE_DIR
|
|
||||||
AVAILABLE_DOCS = $docs
|
|
||||||
} | ConvertTo-Json -Compress
|
|
||||||
} else {
|
|
||||||
# Text output
|
|
||||||
Write-Output "FEATURE_DIR:$($paths.FEATURE_DIR)"
|
|
||||||
Write-Output "AVAILABLE_DOCS:"
|
|
||||||
|
|
||||||
# Show status of each potential document
|
|
||||||
Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null
|
|
||||||
Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null
|
|
||||||
Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null
|
|
||||||
Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null
|
|
||||||
|
|
||||||
if ($IncludeTasks) {
|
|
||||||
Test-FileExists -Path $paths.TASKS -Description 'tasks.md' | Out-Null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
35
scripts/powershell/check-task-prerequisites.ps1
Normal file
35
scripts/powershell/check-task-prerequisites.ps1
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
[CmdletBinding()]
|
||||||
|
param([switch]$Json)
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
. "$PSScriptRoot/common.ps1"
|
||||||
|
|
||||||
|
$paths = Get-FeaturePathsEnv
|
||||||
|
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH)) { exit 1 }
|
||||||
|
|
||||||
|
if (-not (Test-Path $paths.FEATURE_DIR -PathType Container)) {
|
||||||
|
Write-Output "ERROR: Feature directory not found: $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "Run /specify first to create the feature structure."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) {
|
||||||
|
Write-Output "ERROR: plan.md not found in $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "Run /plan first to create the plan."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($Json) {
|
||||||
|
$docs = @()
|
||||||
|
if (Test-Path $paths.RESEARCH) { $docs += 'research.md' }
|
||||||
|
if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' }
|
||||||
|
if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) { $docs += 'contracts/' }
|
||||||
|
if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' }
|
||||||
|
[PSCustomObject]@{ FEATURE_DIR=$paths.FEATURE_DIR; AVAILABLE_DOCS=$docs } | ConvertTo-Json -Compress
|
||||||
|
} else {
|
||||||
|
Write-Output "FEATURE_DIR:$($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "AVAILABLE_DOCS:"
|
||||||
|
Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null
|
||||||
|
Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null
|
||||||
|
Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null
|
||||||
|
Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null
|
||||||
|
}
|
||||||
@@ -1,84 +1,16 @@
|
|||||||
#!/usr/bin/env pwsh
|
#!/usr/bin/env pwsh
|
||||||
# Common PowerShell functions analogous to common.sh
|
# Common PowerShell functions analogous to common.sh (moved to powershell/)
|
||||||
|
|
||||||
function Get-RepoRoot {
|
function Get-RepoRoot {
|
||||||
try {
|
git rev-parse --show-toplevel
|
||||||
$result = git rev-parse --show-toplevel 2>$null
|
|
||||||
if ($LASTEXITCODE -eq 0) {
|
|
||||||
return $result
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
# Git command failed
|
|
||||||
}
|
|
||||||
|
|
||||||
# Fall back to script location for non-git repos
|
|
||||||
return (Resolve-Path (Join-Path $PSScriptRoot "../../..")).Path
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function Get-CurrentBranch {
|
function Get-CurrentBranch {
|
||||||
# First check if SPECIFY_FEATURE environment variable is set
|
git rev-parse --abbrev-ref HEAD
|
||||||
if ($env:SPECIFY_FEATURE) {
|
|
||||||
return $env:SPECIFY_FEATURE
|
|
||||||
}
|
|
||||||
|
|
||||||
# Then check git if available
|
|
||||||
try {
|
|
||||||
$result = git rev-parse --abbrev-ref HEAD 2>$null
|
|
||||||
if ($LASTEXITCODE -eq 0) {
|
|
||||||
return $result
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
# Git command failed
|
|
||||||
}
|
|
||||||
|
|
||||||
# For non-git repos, try to find the latest feature directory
|
|
||||||
$repoRoot = Get-RepoRoot
|
|
||||||
$specsDir = Join-Path $repoRoot "specs"
|
|
||||||
|
|
||||||
if (Test-Path $specsDir) {
|
|
||||||
$latestFeature = ""
|
|
||||||
$highest = 0
|
|
||||||
|
|
||||||
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
|
|
||||||
if ($_.Name -match '^(\d{3})-') {
|
|
||||||
$num = [int]$matches[1]
|
|
||||||
if ($num -gt $highest) {
|
|
||||||
$highest = $num
|
|
||||||
$latestFeature = $_.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($latestFeature) {
|
|
||||||
return $latestFeature
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Final fallback
|
|
||||||
return "main"
|
|
||||||
}
|
|
||||||
|
|
||||||
function Test-HasGit {
|
|
||||||
try {
|
|
||||||
git rev-parse --show-toplevel 2>$null | Out-Null
|
|
||||||
return ($LASTEXITCODE -eq 0)
|
|
||||||
} catch {
|
|
||||||
return $false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function Test-FeatureBranch {
|
function Test-FeatureBranch {
|
||||||
param(
|
param([string]$Branch)
|
||||||
[string]$Branch,
|
|
||||||
[bool]$HasGit = $true
|
|
||||||
)
|
|
||||||
|
|
||||||
# For non-git repos, we can't enforce branch naming but still provide output
|
|
||||||
if (-not $HasGit) {
|
|
||||||
Write-Warning "[specify] Warning: Git repository not detected; skipped branch validation"
|
|
||||||
return $true
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($Branch -notmatch '^[0-9]{3}-') {
|
if ($Branch -notmatch '^[0-9]{3}-') {
|
||||||
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
||||||
Write-Output "Feature branches should be named like: 001-feature-name"
|
Write-Output "Feature branches should be named like: 001-feature-name"
|
||||||
@@ -95,20 +27,17 @@ function Get-FeatureDir {
|
|||||||
function Get-FeaturePathsEnv {
|
function Get-FeaturePathsEnv {
|
||||||
$repoRoot = Get-RepoRoot
|
$repoRoot = Get-RepoRoot
|
||||||
$currentBranch = Get-CurrentBranch
|
$currentBranch = Get-CurrentBranch
|
||||||
$hasGit = Test-HasGit
|
|
||||||
$featureDir = Get-FeatureDir -RepoRoot $repoRoot -Branch $currentBranch
|
$featureDir = Get-FeatureDir -RepoRoot $repoRoot -Branch $currentBranch
|
||||||
|
|
||||||
[PSCustomObject]@{
|
[PSCustomObject]@{
|
||||||
REPO_ROOT = $repoRoot
|
REPO_ROOT = $repoRoot
|
||||||
CURRENT_BRANCH = $currentBranch
|
CURRENT_BRANCH = $currentBranch
|
||||||
HAS_GIT = $hasGit
|
FEATURE_DIR = $featureDir
|
||||||
FEATURE_DIR = $featureDir
|
FEATURE_SPEC = Join-Path $featureDir 'spec.md'
|
||||||
FEATURE_SPEC = Join-Path $featureDir 'spec.md'
|
IMPL_PLAN = Join-Path $featureDir 'plan.md'
|
||||||
IMPL_PLAN = Join-Path $featureDir 'plan.md'
|
TASKS = Join-Path $featureDir 'tasks.md'
|
||||||
TASKS = Join-Path $featureDir 'tasks.md'
|
RESEARCH = Join-Path $featureDir 'research.md'
|
||||||
RESEARCH = Join-Path $featureDir 'research.md'
|
DATA_MODEL = Join-Path $featureDir 'data-model.md'
|
||||||
DATA_MODEL = Join-Path $featureDir 'data-model.md'
|
QUICKSTART = Join-Path $featureDir 'quickstart.md'
|
||||||
QUICKSTART = Join-Path $featureDir 'quickstart.md'
|
|
||||||
CONTRACTS_DIR = Join-Path $featureDir 'contracts'
|
CONTRACTS_DIR = Join-Path $featureDir 'contracts'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env pwsh
|
#!/usr/bin/env pwsh
|
||||||
# Create a new feature
|
# Create a new feature (moved to powershell/)
|
||||||
[CmdletBinding()]
|
[CmdletBinding()]
|
||||||
param(
|
param(
|
||||||
[switch]$Json,
|
[switch]$Json,
|
||||||
@@ -9,54 +9,11 @@ param(
|
|||||||
$ErrorActionPreference = 'Stop'
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] <feature description>"
|
Write-Error "Usage: ./create-new-feature.ps1 [-Json] <feature description>"; exit 1
|
||||||
exit 1
|
|
||||||
}
|
}
|
||||||
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
||||||
|
|
||||||
# Resolve repository root. Prefer git information when available, but fall back
|
$repoRoot = git rev-parse --show-toplevel
|
||||||
# to searching for repository markers so the workflow still functions in repositories that
|
|
||||||
# were initialised with --no-git.
|
|
||||||
function Find-RepositoryRoot {
|
|
||||||
param(
|
|
||||||
[string]$StartDir,
|
|
||||||
[string[]]$Markers = @('.git', '.specify')
|
|
||||||
)
|
|
||||||
$current = Resolve-Path $StartDir
|
|
||||||
while ($true) {
|
|
||||||
foreach ($marker in $Markers) {
|
|
||||||
if (Test-Path (Join-Path $current $marker)) {
|
|
||||||
return $current
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$parent = Split-Path $current -Parent
|
|
||||||
if ($parent -eq $current) {
|
|
||||||
# Reached filesystem root without finding markers
|
|
||||||
return $null
|
|
||||||
}
|
|
||||||
$current = $parent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$fallbackRoot = (Find-RepositoryRoot -StartDir $PSScriptRoot)
|
|
||||||
if (-not $fallbackRoot) {
|
|
||||||
Write-Error "Error: Could not determine repository root. Please run this script from within the repository."
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
$repoRoot = git rev-parse --show-toplevel 2>$null
|
|
||||||
if ($LASTEXITCODE -eq 0) {
|
|
||||||
$hasGit = $true
|
|
||||||
} else {
|
|
||||||
throw "Git not available"
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
$repoRoot = $fallbackRoot
|
|
||||||
$hasGit = $false
|
|
||||||
}
|
|
||||||
|
|
||||||
Set-Location $repoRoot
|
|
||||||
|
|
||||||
$specsDir = Join-Path $repoRoot 'specs'
|
$specsDir = Join-Path $repoRoot 'specs'
|
||||||
New-Item -ItemType Directory -Path $specsDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $specsDir -Force | Out-Null
|
||||||
|
|
||||||
@@ -76,42 +33,20 @@ $branchName = $featureDesc.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}',
|
|||||||
$words = ($branchName -split '-') | Where-Object { $_ } | Select-Object -First 3
|
$words = ($branchName -split '-') | Where-Object { $_ } | Select-Object -First 3
|
||||||
$branchName = "$featureNum-$([string]::Join('-', $words))"
|
$branchName = "$featureNum-$([string]::Join('-', $words))"
|
||||||
|
|
||||||
if ($hasGit) {
|
git checkout -b $branchName | Out-Null
|
||||||
try {
|
|
||||||
git checkout -b $branchName | Out-Null
|
|
||||||
} catch {
|
|
||||||
Write-Warning "Failed to create git branch: $branchName"
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName"
|
|
||||||
}
|
|
||||||
|
|
||||||
$featureDir = Join-Path $specsDir $branchName
|
$featureDir = Join-Path $specsDir $branchName
|
||||||
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||||
|
|
||||||
$template = Join-Path $repoRoot '.specify/templates/spec-template.md'
|
$template = Join-Path $repoRoot 'templates/spec-template.md'
|
||||||
$specFile = Join-Path $featureDir 'spec.md'
|
$specFile = Join-Path $featureDir 'spec.md'
|
||||||
if (Test-Path $template) {
|
if (Test-Path $template) { Copy-Item $template $specFile -Force } else { New-Item -ItemType File -Path $specFile | Out-Null }
|
||||||
Copy-Item $template $specFile -Force
|
|
||||||
} else {
|
|
||||||
New-Item -ItemType File -Path $specFile | Out-Null
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
|
||||||
$env:SPECIFY_FEATURE = $branchName
|
|
||||||
|
|
||||||
if ($Json) {
|
if ($Json) {
|
||||||
$obj = [PSCustomObject]@{
|
$obj = [PSCustomObject]@{ BRANCH_NAME = $branchName; SPEC_FILE = $specFile; FEATURE_NUM = $featureNum }
|
||||||
BRANCH_NAME = $branchName
|
|
||||||
SPEC_FILE = $specFile
|
|
||||||
FEATURE_NUM = $featureNum
|
|
||||||
HAS_GIT = $hasGit
|
|
||||||
}
|
|
||||||
$obj | ConvertTo-Json -Compress
|
$obj | ConvertTo-Json -Compress
|
||||||
} else {
|
} else {
|
||||||
Write-Output "BRANCH_NAME: $branchName"
|
Write-Output "BRANCH_NAME: $branchName"
|
||||||
Write-Output "SPEC_FILE: $specFile"
|
Write-Output "SPEC_FILE: $specFile"
|
||||||
Write-Output "FEATURE_NUM: $featureNum"
|
Write-Output "FEATURE_NUM: $featureNum"
|
||||||
Write-Output "HAS_GIT: $hasGit"
|
|
||||||
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
|
|
||||||
}
|
}
|
||||||
|
|||||||
15
scripts/powershell/get-feature-paths.ps1
Normal file
15
scripts/powershell/get-feature-paths.ps1
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
param()
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
. "$PSScriptRoot/common.ps1"
|
||||||
|
|
||||||
|
$paths = Get-FeaturePathsEnv
|
||||||
|
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH)) { exit 1 }
|
||||||
|
|
||||||
|
Write-Output "REPO_ROOT: $($paths.REPO_ROOT)"
|
||||||
|
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||||
|
Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
||||||
|
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
||||||
|
Write-Output "TASKS: $($paths.TASKS)"
|
||||||
@@ -1,61 +1,21 @@
|
|||||||
#!/usr/bin/env pwsh
|
#!/usr/bin/env pwsh
|
||||||
# Setup implementation plan for a feature
|
|
||||||
|
|
||||||
[CmdletBinding()]
|
[CmdletBinding()]
|
||||||
param(
|
param([switch]$Json)
|
||||||
[switch]$Json,
|
|
||||||
[switch]$Help
|
|
||||||
)
|
|
||||||
|
|
||||||
$ErrorActionPreference = 'Stop'
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
# Show help if requested
|
|
||||||
if ($Help) {
|
|
||||||
Write-Output "Usage: ./setup-plan.ps1 [-Json] [-Help]"
|
|
||||||
Write-Output " -Json Output results in JSON format"
|
|
||||||
Write-Output " -Help Show this help message"
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Load common functions
|
|
||||||
. "$PSScriptRoot/common.ps1"
|
. "$PSScriptRoot/common.ps1"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
|
||||||
$paths = Get-FeaturePathsEnv
|
$paths = Get-FeaturePathsEnv
|
||||||
|
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH)) { exit 1 }
|
||||||
|
|
||||||
# Check if we're on a proper feature branch (only for git repos)
|
|
||||||
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit $paths.HAS_GIT)) {
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ensure the feature directory exists
|
|
||||||
New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null
|
New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null
|
||||||
|
$template = Join-Path $paths.REPO_ROOT 'templates/plan-template.md'
|
||||||
|
if (Test-Path $template) { Copy-Item $template $paths.IMPL_PLAN -Force }
|
||||||
|
|
||||||
# Copy plan template if it exists, otherwise note it or create empty file
|
|
||||||
$template = Join-Path $paths.REPO_ROOT '.specify/templates/plan-template.md'
|
|
||||||
if (Test-Path $template) {
|
|
||||||
Copy-Item $template $paths.IMPL_PLAN -Force
|
|
||||||
Write-Output "Copied plan template to $($paths.IMPL_PLAN)"
|
|
||||||
} else {
|
|
||||||
Write-Warning "Plan template not found at $template"
|
|
||||||
# Create a basic plan file if template doesn't exist
|
|
||||||
New-Item -ItemType File -Path $paths.IMPL_PLAN -Force | Out-Null
|
|
||||||
}
|
|
||||||
|
|
||||||
# Output results
|
|
||||||
if ($Json) {
|
if ($Json) {
|
||||||
$result = [PSCustomObject]@{
|
[PSCustomObject]@{ FEATURE_SPEC=$paths.FEATURE_SPEC; IMPL_PLAN=$paths.IMPL_PLAN; SPECS_DIR=$paths.FEATURE_DIR; BRANCH=$paths.CURRENT_BRANCH } | ConvertTo-Json -Compress
|
||||||
FEATURE_SPEC = $paths.FEATURE_SPEC
|
|
||||||
IMPL_PLAN = $paths.IMPL_PLAN
|
|
||||||
SPECS_DIR = $paths.FEATURE_DIR
|
|
||||||
BRANCH = $paths.CURRENT_BRANCH
|
|
||||||
HAS_GIT = $paths.HAS_GIT
|
|
||||||
}
|
|
||||||
$result | ConvertTo-Json -Compress
|
|
||||||
} else {
|
} else {
|
||||||
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
||||||
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
||||||
Write-Output "SPECS_DIR: $($paths.FEATURE_DIR)"
|
Write-Output "SPECS_DIR: $($paths.FEATURE_DIR)"
|
||||||
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||||
Write-Output "HAS_GIT: $($paths.HAS_GIT)"
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,430 +1,91 @@
|
|||||||
#!/usr/bin/env pwsh
|
#!/usr/bin/env pwsh
|
||||||
<#!
|
[CmdletBinding()]
|
||||||
.SYNOPSIS
|
param([string]$AgentType)
|
||||||
Update agent context files with information from plan.md (PowerShell version)
|
|
||||||
|
|
||||||
.DESCRIPTION
|
|
||||||
Mirrors the behavior of scripts/bash/update-agent-context.sh:
|
|
||||||
1. Environment Validation
|
|
||||||
2. Plan Data Extraction
|
|
||||||
3. Agent File Management (create from template or update existing)
|
|
||||||
4. Content Generation (technology stack, recent changes, timestamp)
|
|
||||||
5. Multi-Agent Support (claude, gemini, copilot, cursor, qwen, opencode, codex, windsurf)
|
|
||||||
|
|
||||||
.PARAMETER AgentType
|
|
||||||
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
|
|
||||||
|
|
||||||
.EXAMPLE
|
|
||||||
./update-agent-context.ps1 -AgentType claude
|
|
||||||
|
|
||||||
.EXAMPLE
|
|
||||||
./update-agent-context.ps1 # Updates all existing agent files
|
|
||||||
|
|
||||||
.NOTES
|
|
||||||
Relies on common helper functions in common.ps1
|
|
||||||
#>
|
|
||||||
param(
|
|
||||||
[Parameter(Position=0)]
|
|
||||||
[ValidateSet('claude','gemini','copilot','cursor','qwen','opencode','codex','windsurf','kilocode','auggie','roo')]
|
|
||||||
[string]$AgentType
|
|
||||||
)
|
|
||||||
|
|
||||||
$ErrorActionPreference = 'Stop'
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
# Import common helpers
|
$repoRoot = git rev-parse --show-toplevel
|
||||||
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
$currentBranch = git rev-parse --abbrev-ref HEAD
|
||||||
. (Join-Path $ScriptDir 'common.ps1')
|
$featureDir = Join-Path $repoRoot "specs/$currentBranch"
|
||||||
|
$newPlan = Join-Path $featureDir 'plan.md'
|
||||||
|
if (-not (Test-Path $newPlan)) { Write-Error "ERROR: No plan.md found at $newPlan"; exit 1 }
|
||||||
|
|
||||||
# Acquire environment paths
|
$claudeFile = Join-Path $repoRoot 'CLAUDE.md'
|
||||||
$envData = Get-FeaturePathsEnv
|
$geminiFile = Join-Path $repoRoot 'GEMINI.md'
|
||||||
$REPO_ROOT = $envData.REPO_ROOT
|
$copilotFile = Join-Path $repoRoot '.github/copilot-instructions.md'
|
||||||
$CURRENT_BRANCH = $envData.CURRENT_BRANCH
|
|
||||||
$HAS_GIT = $envData.HAS_GIT
|
|
||||||
$IMPL_PLAN = $envData.IMPL_PLAN
|
|
||||||
$NEW_PLAN = $IMPL_PLAN
|
|
||||||
|
|
||||||
# Agent file paths
|
Write-Output "=== Updating agent context files for feature $currentBranch ==="
|
||||||
$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md'
|
|
||||||
$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md'
|
|
||||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/copilot-instructions.md'
|
|
||||||
$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
|
||||||
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
|
||||||
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
|
||||||
$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md'
|
|
||||||
$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md'
|
|
||||||
$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md'
|
|
||||||
$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md'
|
|
||||||
|
|
||||||
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
|
function Get-PlanValue($pattern) {
|
||||||
|
if (-not (Test-Path $newPlan)) { return '' }
|
||||||
# Parsed plan data placeholders
|
$line = Select-String -Path $newPlan -Pattern $pattern | Select-Object -First 1
|
||||||
$script:NEW_LANG = ''
|
if ($line) { return ($line.Line -replace "^\*\*$pattern\*\*: ", '') }
|
||||||
$script:NEW_FRAMEWORK = ''
|
return ''
|
||||||
$script:NEW_DB = ''
|
|
||||||
$script:NEW_PROJECT_TYPE = ''
|
|
||||||
|
|
||||||
function Write-Info {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[string]$Message
|
|
||||||
)
|
|
||||||
Write-Host "INFO: $Message"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function Write-Success {
|
$newLang = Get-PlanValue 'Language/Version'
|
||||||
param(
|
$newFramework = Get-PlanValue 'Primary Dependencies'
|
||||||
[Parameter(Mandatory=$true)]
|
$newTesting = Get-PlanValue 'Testing'
|
||||||
[string]$Message
|
$newDb = Get-PlanValue 'Storage'
|
||||||
)
|
$newProjectType = Get-PlanValue 'Project Type'
|
||||||
Write-Host "$([char]0x2713) $Message"
|
|
||||||
|
function Initialize-AgentFile($targetFile, $agentName) {
|
||||||
|
if (Test-Path $targetFile) { return }
|
||||||
|
$template = Join-Path $repoRoot 'templates/agent-file-template.md'
|
||||||
|
if (-not (Test-Path $template)) { Write-Error "Template not found: $template"; return }
|
||||||
|
$content = Get-Content $template -Raw
|
||||||
|
$content = $content.Replace('[PROJECT NAME]', (Split-Path $repoRoot -Leaf))
|
||||||
|
$content = $content.Replace('[DATE]', (Get-Date -Format 'yyyy-MM-dd'))
|
||||||
|
$content = $content.Replace('[EXTRACTED FROM ALL PLAN.MD FILES]', "- $newLang + $newFramework ($currentBranch)")
|
||||||
|
if ($newProjectType -match 'web') { $structure = "backend/`nfrontend/`ntests/" } else { $structure = "src/`ntests/" }
|
||||||
|
$content = $content.Replace('[ACTUAL STRUCTURE FROM PLANS]', $structure)
|
||||||
|
if ($newLang -match 'Python') { $commands = 'cd src && pytest && ruff check .' }
|
||||||
|
elseif ($newLang -match 'Rust') { $commands = 'cargo test && cargo clippy' }
|
||||||
|
elseif ($newLang -match 'JavaScript|TypeScript') { $commands = 'npm test && npm run lint' }
|
||||||
|
else { $commands = "# Add commands for $newLang" }
|
||||||
|
$content = $content.Replace('[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES]', $commands)
|
||||||
|
$content = $content.Replace('[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE]', "${newLang}: Follow standard conventions")
|
||||||
|
$content = $content.Replace('[LAST 3 FEATURES AND WHAT THEY ADDED]', "- ${currentBranch}: Added ${newLang} + ${newFramework}")
|
||||||
|
$content | Set-Content $targetFile -Encoding UTF8
|
||||||
}
|
}
|
||||||
|
|
||||||
function Write-WarningMsg {
|
function Update-AgentFile($targetFile, $agentName) {
|
||||||
param(
|
if (-not (Test-Path $targetFile)) { Initialize-AgentFile $targetFile $agentName; return }
|
||||||
[Parameter(Mandatory=$true)]
|
$content = Get-Content $targetFile -Raw
|
||||||
[string]$Message
|
if ($newLang -and ($content -notmatch [regex]::Escape($newLang))) { $content = $content -replace '(## Active Technologies\n)', "`$1- $newLang + $newFramework ($currentBranch)`n" }
|
||||||
)
|
if ($newDb -and $newDb -ne 'N/A' -and ($content -notmatch [regex]::Escape($newDb))) { $content = $content -replace '(## Active Technologies\n)', "`$1- $newDb ($currentBranch)`n" }
|
||||||
Write-Warning $Message
|
if ($content -match '## Recent Changes\n([\s\S]*?)(\n\n|$)') {
|
||||||
}
|
$changesBlock = $matches[1].Trim().Split("`n")
|
||||||
|
$changesBlock = ,"- $currentBranch: Added $newLang + $newFramework" + $changesBlock
|
||||||
function Write-Err {
|
$changesBlock = $changesBlock | Where-Object { $_ } | Select-Object -First 3
|
||||||
param(
|
$joined = ($changesBlock -join "`n")
|
||||||
[Parameter(Mandatory=$true)]
|
$content = [regex]::Replace($content, '## Recent Changes\n([\s\S]*?)(\n\n|$)', "## Recent Changes`n$joined`n`n")
|
||||||
[string]$Message
|
|
||||||
)
|
|
||||||
Write-Host "ERROR: $Message" -ForegroundColor Red
|
|
||||||
}
|
|
||||||
|
|
||||||
function Validate-Environment {
|
|
||||||
if (-not $CURRENT_BRANCH) {
|
|
||||||
Write-Err 'Unable to determine current feature'
|
|
||||||
if ($HAS_GIT) { Write-Info "Make sure you're on a feature branch" } else { Write-Info 'Set SPECIFY_FEATURE environment variable or create a feature first' }
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
if (-not (Test-Path $NEW_PLAN)) {
|
|
||||||
Write-Err "No plan.md found at $NEW_PLAN"
|
|
||||||
Write-Info 'Ensure you are working on a feature with a corresponding spec directory'
|
|
||||||
if (-not $HAS_GIT) { Write-Info 'Use: $env:SPECIFY_FEATURE=your-feature-name or create a new feature first' }
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
if (-not (Test-Path $TEMPLATE_FILE)) {
|
|
||||||
Write-Err "Template file not found at $TEMPLATE_FILE"
|
|
||||||
Write-Info 'Run specify init to scaffold .specify/templates, or add agent-file-template.md there.'
|
|
||||||
exit 1
|
|
||||||
}
|
}
|
||||||
|
$content = [regex]::Replace($content, 'Last updated: \d{4}-\d{2}-\d{2}', "Last updated: $(Get-Date -Format 'yyyy-MM-dd')")
|
||||||
|
$content | Set-Content $targetFile -Encoding UTF8
|
||||||
|
Write-Output "✅ $agentName context file updated successfully"
|
||||||
}
|
}
|
||||||
|
|
||||||
function Extract-PlanField {
|
switch ($AgentType) {
|
||||||
param(
|
'claude' { Update-AgentFile $claudeFile 'Claude Code' }
|
||||||
[Parameter(Mandatory=$true)]
|
'gemini' { Update-AgentFile $geminiFile 'Gemini CLI' }
|
||||||
[string]$FieldPattern,
|
'copilot' { Update-AgentFile $copilotFile 'GitHub Copilot' }
|
||||||
[Parameter(Mandatory=$true)]
|
'' {
|
||||||
[string]$PlanFile
|
foreach ($pair in @(@{file=$claudeFile; name='Claude Code'}, @{file=$geminiFile; name='Gemini CLI'}, @{file=$copilotFile; name='GitHub Copilot'})) {
|
||||||
)
|
if (Test-Path $pair.file) { Update-AgentFile $pair.file $pair.name }
|
||||||
if (-not (Test-Path $PlanFile)) { return '' }
|
|
||||||
# Lines like **Language/Version**: Python 3.12
|
|
||||||
$regex = "^\*\*$([Regex]::Escape($FieldPattern))\*\*: (.+)$"
|
|
||||||
Get-Content -LiteralPath $PlanFile -Encoding utf8 | ForEach-Object {
|
|
||||||
if ($_ -match $regex) {
|
|
||||||
$val = $Matches[1].Trim()
|
|
||||||
if ($val -notin @('NEEDS CLARIFICATION','N/A')) { return $val }
|
|
||||||
}
|
}
|
||||||
} | Select-Object -First 1
|
if (-not (Test-Path $claudeFile) -and -not (Test-Path $geminiFile) -and -not (Test-Path $copilotFile)) {
|
||||||
}
|
Write-Output 'No agent context files found. Creating Claude Code context file by default.'
|
||||||
|
Update-AgentFile $claudeFile 'Claude Code'
|
||||||
function Parse-PlanData {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[string]$PlanFile
|
|
||||||
)
|
|
||||||
if (-not (Test-Path $PlanFile)) { Write-Err "Plan file not found: $PlanFile"; return $false }
|
|
||||||
Write-Info "Parsing plan data from $PlanFile"
|
|
||||||
$script:NEW_LANG = Extract-PlanField -FieldPattern 'Language/Version' -PlanFile $PlanFile
|
|
||||||
$script:NEW_FRAMEWORK = Extract-PlanField -FieldPattern 'Primary Dependencies' -PlanFile $PlanFile
|
|
||||||
$script:NEW_DB = Extract-PlanField -FieldPattern 'Storage' -PlanFile $PlanFile
|
|
||||||
$script:NEW_PROJECT_TYPE = Extract-PlanField -FieldPattern 'Project Type' -PlanFile $PlanFile
|
|
||||||
|
|
||||||
if ($NEW_LANG) { Write-Info "Found language: $NEW_LANG" } else { Write-WarningMsg 'No language information found in plan' }
|
|
||||||
if ($NEW_FRAMEWORK) { Write-Info "Found framework: $NEW_FRAMEWORK" }
|
|
||||||
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Info "Found database: $NEW_DB" }
|
|
||||||
if ($NEW_PROJECT_TYPE) { Write-Info "Found project type: $NEW_PROJECT_TYPE" }
|
|
||||||
return $true
|
|
||||||
}
|
|
||||||
|
|
||||||
function Format-TechnologyStack {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$false)]
|
|
||||||
[string]$Lang,
|
|
||||||
[Parameter(Mandatory=$false)]
|
|
||||||
[string]$Framework
|
|
||||||
)
|
|
||||||
$parts = @()
|
|
||||||
if ($Lang -and $Lang -ne 'NEEDS CLARIFICATION') { $parts += $Lang }
|
|
||||||
if ($Framework -and $Framework -notin @('NEEDS CLARIFICATION','N/A')) { $parts += $Framework }
|
|
||||||
if (-not $parts) { return '' }
|
|
||||||
return ($parts -join ' + ')
|
|
||||||
}
|
|
||||||
|
|
||||||
function Get-ProjectStructure {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$false)]
|
|
||||||
[string]$ProjectType
|
|
||||||
)
|
|
||||||
if ($ProjectType -match 'web') { return "backend/`nfrontend/`ntests/" } else { return "src/`ntests/" }
|
|
||||||
}
|
|
||||||
|
|
||||||
function Get-CommandsForLanguage {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$false)]
|
|
||||||
[string]$Lang
|
|
||||||
)
|
|
||||||
switch -Regex ($Lang) {
|
|
||||||
'Python' { return "cd src; pytest; ruff check ." }
|
|
||||||
'Rust' { return "cargo test; cargo clippy" }
|
|
||||||
'JavaScript|TypeScript' { return "npm test; npm run lint" }
|
|
||||||
default { return "# Add commands for $Lang" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function Get-LanguageConventions {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$false)]
|
|
||||||
[string]$Lang
|
|
||||||
)
|
|
||||||
if ($Lang) { "${Lang}: Follow standard conventions" } else { 'General: Follow standard conventions' }
|
|
||||||
}
|
|
||||||
|
|
||||||
function New-AgentFile {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[string]$TargetFile,
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[string]$ProjectName,
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[datetime]$Date
|
|
||||||
)
|
|
||||||
if (-not (Test-Path $TEMPLATE_FILE)) { Write-Err "Template not found at $TEMPLATE_FILE"; return $false }
|
|
||||||
$temp = New-TemporaryFile
|
|
||||||
Copy-Item -LiteralPath $TEMPLATE_FILE -Destination $temp -Force
|
|
||||||
|
|
||||||
$projectStructure = Get-ProjectStructure -ProjectType $NEW_PROJECT_TYPE
|
|
||||||
$commands = Get-CommandsForLanguage -Lang $NEW_LANG
|
|
||||||
$languageConventions = Get-LanguageConventions -Lang $NEW_LANG
|
|
||||||
|
|
||||||
$escaped_lang = $NEW_LANG
|
|
||||||
$escaped_framework = $NEW_FRAMEWORK
|
|
||||||
$escaped_branch = $CURRENT_BRANCH
|
|
||||||
|
|
||||||
$content = Get-Content -LiteralPath $temp -Raw -Encoding utf8
|
|
||||||
$content = $content -replace '\[PROJECT NAME\]',$ProjectName
|
|
||||||
$content = $content -replace '\[DATE\]',$Date.ToString('yyyy-MM-dd')
|
|
||||||
|
|
||||||
# Build the technology stack string safely
|
|
||||||
$techStackForTemplate = ""
|
|
||||||
if ($escaped_lang -and $escaped_framework) {
|
|
||||||
$techStackForTemplate = "- $escaped_lang + $escaped_framework ($escaped_branch)"
|
|
||||||
} elseif ($escaped_lang) {
|
|
||||||
$techStackForTemplate = "- $escaped_lang ($escaped_branch)"
|
|
||||||
} elseif ($escaped_framework) {
|
|
||||||
$techStackForTemplate = "- $escaped_framework ($escaped_branch)"
|
|
||||||
}
|
|
||||||
|
|
||||||
$content = $content -replace '\[EXTRACTED FROM ALL PLAN.MD FILES\]',$techStackForTemplate
|
|
||||||
# For project structure we manually embed (keep newlines)
|
|
||||||
$escapedStructure = [Regex]::Escape($projectStructure)
|
|
||||||
$content = $content -replace '\[ACTUAL STRUCTURE FROM PLANS\]',$escapedStructure
|
|
||||||
# Replace escaped newlines placeholder after all replacements
|
|
||||||
$content = $content -replace '\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]',$commands
|
|
||||||
$content = $content -replace '\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]',$languageConventions
|
|
||||||
|
|
||||||
# Build the recent changes string safely
|
|
||||||
$recentChangesForTemplate = ""
|
|
||||||
if ($escaped_lang -and $escaped_framework) {
|
|
||||||
$recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang} + ${escaped_framework}"
|
|
||||||
} elseif ($escaped_lang) {
|
|
||||||
$recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang}"
|
|
||||||
} elseif ($escaped_framework) {
|
|
||||||
$recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_framework}"
|
|
||||||
}
|
|
||||||
|
|
||||||
$content = $content -replace '\[LAST 3 FEATURES AND WHAT THEY ADDED\]',$recentChangesForTemplate
|
|
||||||
# Convert literal \n sequences introduced by Escape to real newlines
|
|
||||||
$content = $content -replace '\\n',[Environment]::NewLine
|
|
||||||
|
|
||||||
$parent = Split-Path -Parent $TargetFile
|
|
||||||
if (-not (Test-Path $parent)) { New-Item -ItemType Directory -Path $parent | Out-Null }
|
|
||||||
Set-Content -LiteralPath $TargetFile -Value $content -NoNewline -Encoding utf8
|
|
||||||
Remove-Item $temp -Force
|
|
||||||
return $true
|
|
||||||
}
|
|
||||||
|
|
||||||
function Update-ExistingAgentFile {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[string]$TargetFile,
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[datetime]$Date
|
|
||||||
)
|
|
||||||
if (-not (Test-Path $TargetFile)) { return (New-AgentFile -TargetFile $TargetFile -ProjectName (Split-Path $REPO_ROOT -Leaf) -Date $Date) }
|
|
||||||
|
|
||||||
$techStack = Format-TechnologyStack -Lang $NEW_LANG -Framework $NEW_FRAMEWORK
|
|
||||||
$newTechEntries = @()
|
|
||||||
if ($techStack) {
|
|
||||||
$escapedTechStack = [Regex]::Escape($techStack)
|
|
||||||
if (-not (Select-String -Pattern $escapedTechStack -Path $TargetFile -Quiet)) {
|
|
||||||
$newTechEntries += "- $techStack ($CURRENT_BRANCH)"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) {
|
Default { Write-Error "ERROR: Unknown agent type '$AgentType'. Use: claude, gemini, copilot, or leave empty for all."; exit 1 }
|
||||||
$escapedDB = [Regex]::Escape($NEW_DB)
|
|
||||||
if (-not (Select-String -Pattern $escapedDB -Path $TargetFile -Quiet)) {
|
|
||||||
$newTechEntries += "- $NEW_DB ($CURRENT_BRANCH)"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$newChangeEntry = ''
|
|
||||||
if ($techStack) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${techStack}" }
|
|
||||||
elseif ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${NEW_DB}" }
|
|
||||||
|
|
||||||
$lines = Get-Content -LiteralPath $TargetFile -Encoding utf8
|
|
||||||
$output = New-Object System.Collections.Generic.List[string]
|
|
||||||
$inTech = $false; $inChanges = $false; $techAdded = $false; $changeAdded = $false; $existingChanges = 0
|
|
||||||
|
|
||||||
for ($i=0; $i -lt $lines.Count; $i++) {
|
|
||||||
$line = $lines[$i]
|
|
||||||
if ($line -eq '## Active Technologies') {
|
|
||||||
$output.Add($line)
|
|
||||||
$inTech = $true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ($inTech -and $line -match '^##\s') {
|
|
||||||
if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true }
|
|
||||||
$output.Add($line); $inTech = $false; continue
|
|
||||||
}
|
|
||||||
if ($inTech -and [string]::IsNullOrWhiteSpace($line)) {
|
|
||||||
if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true }
|
|
||||||
$output.Add($line); continue
|
|
||||||
}
|
|
||||||
if ($line -eq '## Recent Changes') {
|
|
||||||
$output.Add($line)
|
|
||||||
if ($newChangeEntry) { $output.Add($newChangeEntry); $changeAdded = $true }
|
|
||||||
$inChanges = $true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ($inChanges -and $line -match '^##\s') { $output.Add($line); $inChanges = $false; continue }
|
|
||||||
if ($inChanges -and $line -match '^- ') {
|
|
||||||
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') {
|
|
||||||
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
$output.Add($line)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Post-loop check: if we're still in the Active Technologies section and haven't added new entries
|
|
||||||
if ($inTech -and -not $techAdded -and $newTechEntries.Count -gt 0) {
|
|
||||||
$newTechEntries | ForEach-Object { $output.Add($_) }
|
|
||||||
}
|
|
||||||
|
|
||||||
Set-Content -LiteralPath $TargetFile -Value ($output -join [Environment]::NewLine) -Encoding utf8
|
|
||||||
return $true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function Update-AgentFile {
|
Write-Output ''
|
||||||
param(
|
Write-Output 'Summary of changes:'
|
||||||
[Parameter(Mandatory=$true)]
|
if ($newLang) { Write-Output "- Added language: $newLang" }
|
||||||
[string]$TargetFile,
|
if ($newFramework) { Write-Output "- Added framework: $newFramework" }
|
||||||
[Parameter(Mandatory=$true)]
|
if ($newDb -and $newDb -ne 'N/A') { Write-Output "- Added database: $newDb" }
|
||||||
[string]$AgentName
|
|
||||||
)
|
|
||||||
if (-not $TargetFile -or -not $AgentName) { Write-Err 'Update-AgentFile requires TargetFile and AgentName'; return $false }
|
|
||||||
Write-Info "Updating $AgentName context file: $TargetFile"
|
|
||||||
$projectName = Split-Path $REPO_ROOT -Leaf
|
|
||||||
$date = Get-Date
|
|
||||||
|
|
||||||
$dir = Split-Path -Parent $TargetFile
|
Write-Output ''
|
||||||
if (-not (Test-Path $dir)) { New-Item -ItemType Directory -Path $dir | Out-Null }
|
Write-Output 'Usage: ./update-agent-context.ps1 [claude|gemini|copilot]'
|
||||||
|
|
||||||
if (-not (Test-Path $TargetFile)) {
|
|
||||||
if (New-AgentFile -TargetFile $TargetFile -ProjectName $projectName -Date $date) { Write-Success "Created new $AgentName context file" } else { Write-Err 'Failed to create new agent file'; return $false }
|
|
||||||
} else {
|
|
||||||
try {
|
|
||||||
if (Update-ExistingAgentFile -TargetFile $TargetFile -Date $date) { Write-Success "Updated existing $AgentName context file" } else { Write-Err 'Failed to update agent file'; return $false }
|
|
||||||
} catch {
|
|
||||||
Write-Err "Cannot access or update existing file: $TargetFile. $_"
|
|
||||||
return $false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return $true
|
|
||||||
}
|
|
||||||
|
|
||||||
function Update-SpecificAgent {
|
|
||||||
param(
|
|
||||||
[Parameter(Mandatory=$true)]
|
|
||||||
[string]$Type
|
|
||||||
)
|
|
||||||
switch ($Type) {
|
|
||||||
'claude' { Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code' }
|
|
||||||
'gemini' { Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI' }
|
|
||||||
'copilot' { Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot' }
|
|
||||||
'cursor' { Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE' }
|
|
||||||
'qwen' { Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code' }
|
|
||||||
'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' }
|
|
||||||
'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' }
|
|
||||||
'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' }
|
|
||||||
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
|
||||||
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
|
||||||
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
|
||||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor|qwen|opencode|codex|windsurf|kilocode|auggie|roo'; return $false }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function Update-AllExistingAgents {
|
|
||||||
$found = $false
|
|
||||||
$ok = $true
|
|
||||||
if (Test-Path $CLAUDE_FILE) { if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $GEMINI_FILE) { if (-not (Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $COPILOT_FILE) { if (-not (Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $CURSOR_FILE) { if (-not (Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
|
||||||
if (-not $found) {
|
|
||||||
Write-Info 'No existing agent files found, creating default Claude file...'
|
|
||||||
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
|
|
||||||
}
|
|
||||||
return $ok
|
|
||||||
}
|
|
||||||
|
|
||||||
function Print-Summary {
|
|
||||||
Write-Host ''
|
|
||||||
Write-Info 'Summary of changes:'
|
|
||||||
if ($NEW_LANG) { Write-Host " - Added language: $NEW_LANG" }
|
|
||||||
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
|
|
||||||
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
|
|
||||||
Write-Host ''
|
|
||||||
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor|qwen|opencode|codex|windsurf|kilocode|auggie|roo]'
|
|
||||||
}
|
|
||||||
|
|
||||||
function Main {
|
|
||||||
Validate-Environment
|
|
||||||
Write-Info "=== Updating agent context files for feature $CURRENT_BRANCH ==="
|
|
||||||
if (-not (Parse-PlanData -PlanFile $NEW_PLAN)) { Write-Err 'Failed to parse plan data'; exit 1 }
|
|
||||||
$success = $true
|
|
||||||
if ($AgentType) {
|
|
||||||
Write-Info "Updating specific agent: $AgentType"
|
|
||||||
if (-not (Update-SpecificAgent -Type $AgentType)) { $success = $false }
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
Write-Info 'No agent specified, updating all existing agent files...'
|
|
||||||
if (-not (Update-AllExistingAgents)) { $success = $false }
|
|
||||||
}
|
|
||||||
Print-Summary
|
|
||||||
if ($success) { Write-Success 'Agent context update completed successfully'; exit 0 } else { Write-Err 'Agent context update completed with errors'; exit 1 }
|
|
||||||
}
|
|
||||||
|
|
||||||
Main
|
|
||||||
|
|||||||
@@ -2,15 +2,15 @@
|
|||||||
|
|
||||||
## The Power Inversion
|
## The Power Inversion
|
||||||
|
|
||||||
For decades, code has been king. Specifications served code—they were the scaffolding we built and then discarded once the "real work" of coding began. We wrote PRDs to guide development, created design docs to inform implementation, drew diagrams to visualize architecture. But these were always subordinate to the code itself. Code was truth. Everything else was, at best, good intentions. Code was the source of truth, and as it moved forward, specs rarely kept pace. As the asset (code) and the implementation are one, it's not easy to have a parallel implementation without trying to build from the code.
|
For decades, code has been king. Specifications served code—they were the scaffolding we built and then discarded once the "real work" of coding began. We wrote PRDs to guide development, created design docs to inform implementation, drew diagrams to visualize architecture. But these were always subordinate to the code itself. Code was truth. Everything else was, at best, good intentions. Code was the source of truth, as it moved forward, and spec's rarely kept pace. As the asset (code) and the implementation are one, it's not easy to have a parallel implementation without trying to build from the code.
|
||||||
|
|
||||||
Spec-Driven Development (SDD) inverts this power structure. Specifications don't serve code—code serves specifications. The Product Requirements Document (PRD) isn't a guide for implementation; it's the source that generates implementation. Technical plans aren't documents that inform coding; they're precise definitions that produce code. This isn't an incremental improvement to how we build software. It's a fundamental rethinking of what drives development.
|
Spec-Driven Development (SDD) inverts this power structure. Specifications don't serve code—code serves specifications. The (Product Requirements Document-Specification) PRD isn't a guide for implementation; it's the source that generates implementation. Technical plans aren't documents that inform coding; they're precise definitions that produce code. This isn't an incremental improvement to how we build software. It's a fundamental rethinking of what drives development.
|
||||||
|
|
||||||
The gap between specification and implementation has plagued software development since its inception. We've tried to bridge it with better documentation, more detailed requirements, stricter processes. These approaches fail because they accept the gap as inevitable. They try to narrow it but never eliminate it. SDD eliminates the gap by making specifications and their concrete implementation plans born from the specification executable. When specifications and implementation plans generate code, there is no gap—only transformation.
|
The gap between specification and implementation has plagued software development since its inception. We've tried to bridge it with better documentation, more detailed requirements, stricter processes. These approaches fail because they accept the gap as inevitable. They try to narrow it but never eliminate it. SDD eliminates the gap by making specifications or and their concrete implementation plans born from the specification executable. When specifications to implementation plans generate code, there is no gap—only transformation.
|
||||||
|
|
||||||
This transformation is now possible because AI can understand and implement complex specifications, and create detailed implementation plans. But raw AI generation without structure produces chaos. SDD provides that structure through specifications and subsequent implementation plans that are precise, complete, and unambiguous enough to generate working systems. The specification becomes the primary artifact. Code becomes its expression (as an implementation from the implementation plan) in a particular language and framework.
|
This transformation is now possible because AI can understand and implement complex specifications, and create detailed implementation plans. But raw AI generation without structure produces chaos. SDD provides that structure through specifications and subsequent implementation plans that are precise, complete, and unambiguous enough to generate working systems. The specification becomes the primary artifact. Code becomes its expression (as an implementation from the implementation plan) in a particular language and framework.
|
||||||
|
|
||||||
In this new world, maintaining software means evolving specifications. The intent of the development team is expressed in natural language ("**intent-driven development**"), design assets, core principles and other guidelines. The **lingua franca** of development moves to a higher level, and code is the last-mile approach.
|
In this new world, maintaining software means evolving specifications. The intent of the development team is expressed in natural language ("**intent-driven development**"), design assets, core principles and other guidelines . The **lingua franca** of development moves to a higher-level, and code is the last-mile approach.
|
||||||
|
|
||||||
Debugging means fixing specifications and their implementation plans that generate incorrect code. Refactoring means restructuring for clarity. The entire development workflow reorganizes around specifications as the central source of truth, with implementation plans and code as the continuously regenerated output. Updating apps with new features or creating a new parallel implementation because we are creative beings, means revisiting the specification and creating new implementation plans. This process is therefore a 0 -> 1, (1', ..), 2, 3, N.
|
Debugging means fixing specifications and their implementation plans that generate incorrect code. Refactoring means restructuring for clarity. The entire development workflow reorganizes around specifications as the central source of truth, with implementation plans and code as the continuously regenerated output. Updating apps with new features or creating a new parallel implementation because we are creative beings, means revisiting the specification and creating new implementation plans. This process is therefore a 0 -> 1, (1', ..), 2, 3, N.
|
||||||
|
|
||||||
@@ -18,7 +18,7 @@ The development team focuses in on their creativity, experimentation, their crit
|
|||||||
|
|
||||||
## The SDD Workflow in Practice
|
## The SDD Workflow in Practice
|
||||||
|
|
||||||
The workflow begins with an idea—often vague and incomplete. Through iterative dialogue with AI, this idea becomes a comprehensive PRD. The AI asks clarifying questions, identifies edge cases, and helps define precise acceptance criteria. What might take days of meetings and documentation in traditional development happens in hours of focused specification work. This transforms the traditional SDLC—requirements and design become continuous activities rather than discrete phases. This is supportive of a **team process**, where team-reviewed specifications are expressed and versioned, created in branches, and merged.
|
The workflow begins with an idea—often vague and incomplete. Through iterative dialogue with AI, this idea becomes a comprehensive PRD. The AI asks clarifying questions, identifies edge cases, and helps define precise acceptance criteria. What might take days of meetings and documentation in traditional development happens in hours of focused specification work. This transforms the traditional SDLC—requirements and design become continuous activities rather than discrete phases. This is supportive of a **team process**, that's team reviewed-specifications are expressed and versioned, created in branches, and merged.
|
||||||
|
|
||||||
When a product manager updates acceptance criteria, implementation plans automatically flag affected technical decisions. When an architect discovers a better pattern, the PRD updates to reflect new possibilities.
|
When a product manager updates acceptance criteria, implementation plans automatically flag affected technical decisions. When an architect discovers a better pattern, the PRD updates to reflect new possibilities.
|
||||||
|
|
||||||
@@ -34,13 +34,13 @@ The feedback loop extends beyond initial development. Production metrics and inc
|
|||||||
|
|
||||||
Three trends make SDD not just possible but necessary:
|
Three trends make SDD not just possible but necessary:
|
||||||
|
|
||||||
First, AI capabilities have reached a threshold where natural language specifications can reliably generate working code. This isn't about replacing developers—it's about amplifying their effectiveness by automating the mechanical translation from specification to implementation. It can amplify exploration and creativity, support "start-over" easily, and support addition, subtraction, and critical thinking.
|
First, AI capabilities have reached a threshold where natural language specifications can reliably generate working code. This isn't about replacing developers—it's about amplifying their effectiveness by automating the mechanical translation from specification to implementation. It can amplify exploration and creativity, it can support "start-over" easily, it supports addition subtraction and critical thinking.
|
||||||
|
|
||||||
Second, software complexity continues to grow exponentially. Modern systems integrate dozens of services, frameworks, and dependencies. Keeping all these pieces aligned with original intent through manual processes becomes increasingly difficult. SDD provides systematic alignment through specification-driven generation. Frameworks may evolve to provide AI-first support, not human-first support, or architect around reusable components.
|
Second, software complexity continues to grow exponentially. Modern systems integrate dozens of services, frameworks, and dependencies. Keeping all these pieces aligned with original intent through manual processes becomes increasingly difficult. SDD provides systematic alignment through specification-driven generation. Frameworks may evolve to provide AI-first support, not human-first support, or architect around reusable components.
|
||||||
|
|
||||||
Third, the pace of change accelerates. Requirements change far more rapidly today than ever before. Pivoting is no longer exceptional—it's expected. Modern product development demands rapid iteration based on user feedback, market conditions, and competitive pressures. Traditional development treats these changes as disruptions. Each pivot requires manually propagating changes through documentation, design, and code. The result is either slow, careful updates that limit velocity, or fast, reckless changes that accumulate technical debt.
|
Third, the pace of change accelerates. Requirements change far more rapidly today than ever before. Pivoting is no longer exceptional—it's expected. Modern product development demands rapid iteration based on user feedback, market conditions, and competitive pressures. Traditional development treats these changes as disruptions. Each pivot requires manually propagating changes through documentation, design, and code. The result is either slow, careful updates that limit velocity, or fast, reckless changes that accumulate technical debt.
|
||||||
|
|
||||||
SDD can support what-if/simulation experiments: "If we need to re-implement or change the application to promote a business need to sell more T-shirts, how would we implement and experiment for that?"
|
SDD can support what-if/simulation experiments, "If we need to re-implement or change the application to promote a business need to sell more T-shirts, how would we implement and experiment for that?".
|
||||||
|
|
||||||
SDD transforms requirement changes from obstacles into normal workflow. When specifications drive implementation, pivots become systematic regenerations rather than manual rewrites. Change a core requirement in the PRD, and affected implementation plans update automatically. Modify a user story, and corresponding API endpoints regenerate. This isn't just about initial development—it's about maintaining engineering velocity through inevitable changes.
|
SDD transforms requirement changes from obstacles into normal workflow. When specifications drive implementation, pivots become systematic regenerations rather than manual rewrites. Change a core requirement in the PRD, and affected implementation plans update automatically. Modify a user story, and corresponding API endpoints regenerate. This isn't just about initial development—it's about maintaining engineering velocity through inevitable changes.
|
||||||
|
|
||||||
|
|||||||
5
src/apm_cli/__init__.py
Normal file
5
src/apm_cli/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""APM-CLI package."""
|
||||||
|
|
||||||
|
from .version import get_version
|
||||||
|
|
||||||
|
__version__ = get_version()
|
||||||
1
src/apm_cli/adapters/__init__.py
Normal file
1
src/apm_cli/adapters/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Adapters package."""
|
||||||
1
src/apm_cli/adapters/client/__init__.py
Normal file
1
src/apm_cli/adapters/client/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Client adapters package."""
|
||||||
39
src/apm_cli/adapters/client/base.py
Normal file
39
src/apm_cli/adapters/client/base.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
"""Base adapter interface for MCP clients."""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class MCPClientAdapter(ABC):
|
||||||
|
"""Base adapter for MCP clients."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_config_path(self):
|
||||||
|
"""Get the path to the MCP configuration file."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update_config(self, config_updates):
|
||||||
|
"""Update the MCP configuration."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_current_config(self):
|
||||||
|
"""Get the current MCP configuration."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None):
|
||||||
|
"""Configure an MCP server in the client configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_url (str): URL of the MCP server.
|
||||||
|
server_name (str, optional): Name of the server. Defaults to None.
|
||||||
|
enabled (bool, optional): Whether to enable the server. Defaults to True.
|
||||||
|
env_overrides (dict, optional): Environment variable overrides. Defaults to None.
|
||||||
|
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||||
|
runtime_vars (dict, optional): Runtime variable values. Defaults to None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
528
src/apm_cli/adapters/client/codex.py
Normal file
528
src/apm_cli/adapters/client/codex.py
Normal file
@@ -0,0 +1,528 @@
|
|||||||
|
"""OpenAI Codex CLI implementation of MCP client adapter.
|
||||||
|
|
||||||
|
This adapter implements the Codex CLI-specific handling of MCP server configuration,
|
||||||
|
targeting the global ~/.codex/config.toml file as specified in the MCP installation
|
||||||
|
architecture specification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import toml
|
||||||
|
from pathlib import Path
|
||||||
|
from .base import MCPClientAdapter
|
||||||
|
from ...registry.client import SimpleRegistryClient
|
||||||
|
from ...registry.integration import RegistryIntegration
|
||||||
|
|
||||||
|
|
||||||
|
class CodexClientAdapter(MCPClientAdapter):
|
||||||
|
"""Codex CLI implementation of MCP client adapter.
|
||||||
|
|
||||||
|
This adapter handles Codex CLI-specific configuration for MCP servers using
|
||||||
|
a global ~/.codex/config.toml file, following the TOML format for
|
||||||
|
MCP server configuration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, registry_url=None):
|
||||||
|
"""Initialize the Codex CLI client adapter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registry_url (str, optional): URL of the MCP registry.
|
||||||
|
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||||
|
or falls back to the default GitHub registry.
|
||||||
|
"""
|
||||||
|
self.registry_client = SimpleRegistryClient(registry_url)
|
||||||
|
self.registry_integration = RegistryIntegration(registry_url)
|
||||||
|
|
||||||
|
def get_config_path(self):
|
||||||
|
"""Get the path to the Codex CLI MCP configuration file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Path to ~/.codex/config.toml
|
||||||
|
"""
|
||||||
|
codex_dir = Path.home() / ".codex"
|
||||||
|
return str(codex_dir / "config.toml")
|
||||||
|
|
||||||
|
def update_config(self, config_updates):
|
||||||
|
"""Update the Codex CLI MCP configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_updates (dict): Configuration updates to apply.
|
||||||
|
"""
|
||||||
|
current_config = self.get_current_config()
|
||||||
|
|
||||||
|
# Ensure mcp_servers section exists
|
||||||
|
if "mcp_servers" not in current_config:
|
||||||
|
current_config["mcp_servers"] = {}
|
||||||
|
|
||||||
|
# Apply updates to mcp_servers section
|
||||||
|
current_config["mcp_servers"].update(config_updates)
|
||||||
|
|
||||||
|
# Write back to file
|
||||||
|
config_path = Path(self.get_config_path())
|
||||||
|
|
||||||
|
# Ensure directory exists
|
||||||
|
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
with open(config_path, 'w') as f:
|
||||||
|
toml.dump(current_config, f)
|
||||||
|
|
||||||
|
def get_current_config(self):
|
||||||
|
"""Get the current Codex CLI MCP configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Current configuration, or empty dict if file doesn't exist.
|
||||||
|
"""
|
||||||
|
config_path = self.get_config_path()
|
||||||
|
|
||||||
|
if not os.path.exists(config_path):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
return toml.load(f)
|
||||||
|
except (toml.TomlDecodeError, IOError):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None):
|
||||||
|
"""Configure an MCP server in Codex CLI configuration.
|
||||||
|
|
||||||
|
This method follows the Codex CLI MCP configuration format with
|
||||||
|
mcp_servers sections in the TOML configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_url (str): URL or identifier of the MCP server.
|
||||||
|
server_name (str, optional): Name of the server. Defaults to None.
|
||||||
|
enabled (bool, optional): Ignored parameter, kept for API compatibility.
|
||||||
|
env_overrides (dict, optional): Pre-collected environment variable overrides.
|
||||||
|
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||||
|
runtime_vars (dict, optional): Runtime variable values. Defaults to None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
if not server_url:
|
||||||
|
print("Error: server_url cannot be empty")
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use cached server info if available, otherwise fetch from registry
|
||||||
|
if server_info_cache and server_url in server_info_cache:
|
||||||
|
server_info = server_info_cache[server_url]
|
||||||
|
else:
|
||||||
|
# Fallback to registry lookup if not cached
|
||||||
|
server_info = self.registry_client.find_server_by_reference(server_url)
|
||||||
|
|
||||||
|
# Fail if server is not found in registry - security requirement
|
||||||
|
if not server_info:
|
||||||
|
print(f"Error: MCP server '{server_url}' not found in registry")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check for remote servers early - Codex doesn't support remote/SSE servers
|
||||||
|
remotes = server_info.get("remotes", [])
|
||||||
|
packages = server_info.get("packages", [])
|
||||||
|
|
||||||
|
# If server has only remote endpoints and no packages, it's a remote-only server
|
||||||
|
if remotes and not packages:
|
||||||
|
print(f"⚠️ Warning: MCP server '{server_url}' is a remote server (SSE type)")
|
||||||
|
print(" Codex CLI only supports local servers with command/args configuration")
|
||||||
|
print(" Remote servers are not supported by Codex CLI")
|
||||||
|
print(" Skipping installation for Codex CLI")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Determine the server name for configuration key
|
||||||
|
if server_name:
|
||||||
|
# Use explicitly provided server name
|
||||||
|
config_key = server_name
|
||||||
|
else:
|
||||||
|
# Extract name from server_url (part after last slash)
|
||||||
|
# For URLs like "microsoft/azure-devops-mcp" -> "azure-devops-mcp"
|
||||||
|
# For URLs like "github/github-mcp-server" -> "github-mcp-server"
|
||||||
|
if '/' in server_url:
|
||||||
|
config_key = server_url.split('/')[-1]
|
||||||
|
else:
|
||||||
|
# Fallback to full server_url if no slash
|
||||||
|
config_key = server_url
|
||||||
|
|
||||||
|
# Generate server configuration with environment variable resolution
|
||||||
|
server_config = self._format_server_config(server_info, env_overrides, runtime_vars)
|
||||||
|
|
||||||
|
# Update configuration using the chosen key
|
||||||
|
self.update_config({config_key: server_config})
|
||||||
|
|
||||||
|
print(f"Successfully configured MCP server '{config_key}' for Codex CLI")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error configuring MCP server: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _format_server_config(self, server_info, env_overrides=None, runtime_vars=None):
|
||||||
|
"""Format server information into Codex CLI MCP configuration format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_info (dict): Server information from registry.
|
||||||
|
env_overrides (dict, optional): Pre-collected environment variable overrides.
|
||||||
|
runtime_vars (dict, optional): Runtime variable values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Formatted server configuration for Codex CLI.
|
||||||
|
"""
|
||||||
|
# Default configuration structure with registry ID for conflict detection
|
||||||
|
config = {
|
||||||
|
"command": "unknown",
|
||||||
|
"args": [],
|
||||||
|
"env": {},
|
||||||
|
"id": server_info.get("id", "") # Add registry UUID for conflict detection
|
||||||
|
}
|
||||||
|
|
||||||
|
# Note: Remote servers (SSE type) are handled in configure_mcp_server and rejected early
|
||||||
|
# This method only handles local servers with packages
|
||||||
|
|
||||||
|
# Get packages from server info
|
||||||
|
packages = server_info.get("packages", [])
|
||||||
|
|
||||||
|
if not packages:
|
||||||
|
# If no packages are available, this indicates incomplete server configuration
|
||||||
|
# This should fail installation with a clear error message
|
||||||
|
raise ValueError(f"MCP server has no package information available in registry. "
|
||||||
|
f"This appears to be a temporary registry issue or the server is remote-only. "
|
||||||
|
f"Server: {server_info.get('name', 'unknown')}")
|
||||||
|
|
||||||
|
if packages:
|
||||||
|
# Use the first package for configuration (prioritize npm, then docker, then others)
|
||||||
|
package = self._select_best_package(packages)
|
||||||
|
|
||||||
|
if package:
|
||||||
|
registry_name = package.get("registry_name", "")
|
||||||
|
package_name = package.get("name", "")
|
||||||
|
runtime_hint = package.get("runtime_hint", "")
|
||||||
|
runtime_arguments = package.get("runtime_arguments", [])
|
||||||
|
package_arguments = package.get("package_arguments", [])
|
||||||
|
env_vars = package.get("environment_variables", [])
|
||||||
|
|
||||||
|
# Resolve environment variables first
|
||||||
|
resolved_env = self._process_environment_variables(env_vars, env_overrides)
|
||||||
|
|
||||||
|
# Process arguments to extract simple string values
|
||||||
|
processed_runtime_args = self._process_arguments(runtime_arguments, resolved_env, runtime_vars)
|
||||||
|
processed_package_args = self._process_arguments(package_arguments, resolved_env, runtime_vars)
|
||||||
|
|
||||||
|
# Generate command and args based on package type
|
||||||
|
if registry_name == "npm":
|
||||||
|
config["command"] = runtime_hint or "npx"
|
||||||
|
# For npm packages, use runtime_arguments directly as they contain the complete npx command
|
||||||
|
config["args"] = processed_runtime_args + processed_package_args
|
||||||
|
# For NPM packages, also use env block for environment variables
|
||||||
|
if resolved_env:
|
||||||
|
config["env"] = resolved_env
|
||||||
|
elif registry_name == "docker":
|
||||||
|
config["command"] = "docker"
|
||||||
|
|
||||||
|
# For Docker packages in Codex TOML format:
|
||||||
|
# - Ensure all environment variables from resolved_env are represented as -e flags in args
|
||||||
|
# - Put actual environment variable values in separate [env] section
|
||||||
|
config["args"] = self._ensure_docker_env_flags(processed_runtime_args + processed_package_args, resolved_env)
|
||||||
|
|
||||||
|
# Environment variables go in separate env section for Codex TOML format
|
||||||
|
if resolved_env:
|
||||||
|
config["env"] = resolved_env
|
||||||
|
elif registry_name == "pypi":
|
||||||
|
config["command"] = runtime_hint or "uvx"
|
||||||
|
config["args"] = [package_name] + processed_runtime_args + processed_package_args
|
||||||
|
# For PyPI packages, use env block for environment variables
|
||||||
|
if resolved_env:
|
||||||
|
config["env"] = resolved_env
|
||||||
|
elif registry_name == "homebrew":
|
||||||
|
# For homebrew packages, assume the binary name is the command
|
||||||
|
config["command"] = package_name.split('/')[-1] if '/' in package_name else package_name
|
||||||
|
config["args"] = processed_runtime_args + processed_package_args
|
||||||
|
# For Homebrew packages, use env block for environment variables
|
||||||
|
if resolved_env:
|
||||||
|
config["env"] = resolved_env
|
||||||
|
else:
|
||||||
|
# Generic package handling
|
||||||
|
config["command"] = runtime_hint or package_name
|
||||||
|
config["args"] = processed_runtime_args + processed_package_args
|
||||||
|
# For generic packages, use env block for environment variables
|
||||||
|
if resolved_env:
|
||||||
|
config["env"] = resolved_env
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
def _process_arguments(self, arguments, resolved_env=None, runtime_vars=None):
|
||||||
|
"""Process argument objects to extract simple string values with environment resolution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
arguments (list): List of argument objects from registry.
|
||||||
|
resolved_env (dict): Resolved environment variables.
|
||||||
|
runtime_vars (dict): Runtime variable values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of processed argument strings.
|
||||||
|
"""
|
||||||
|
if resolved_env is None:
|
||||||
|
resolved_env = {}
|
||||||
|
if runtime_vars is None:
|
||||||
|
runtime_vars = {}
|
||||||
|
|
||||||
|
processed = []
|
||||||
|
|
||||||
|
for arg in arguments:
|
||||||
|
if isinstance(arg, dict):
|
||||||
|
# Extract value from argument object
|
||||||
|
arg_type = arg.get("type", "")
|
||||||
|
if arg_type == "positional":
|
||||||
|
value = arg.get("value", arg.get("default", ""))
|
||||||
|
if value:
|
||||||
|
# Resolve both environment and runtime variable placeholders with actual values
|
||||||
|
processed_value = self._resolve_variable_placeholders(str(value), resolved_env, runtime_vars)
|
||||||
|
processed.append(processed_value)
|
||||||
|
elif arg_type == "named":
|
||||||
|
# For named arguments, the flag name is in the "value" field
|
||||||
|
flag_name = arg.get("value", "")
|
||||||
|
if flag_name:
|
||||||
|
processed.append(flag_name)
|
||||||
|
# Some named arguments might have additional values (rare)
|
||||||
|
additional_value = arg.get("name", "")
|
||||||
|
if additional_value and additional_value != flag_name and not additional_value.startswith("-"):
|
||||||
|
processed_value = self._resolve_variable_placeholders(str(additional_value), resolved_env, runtime_vars)
|
||||||
|
processed.append(processed_value)
|
||||||
|
elif isinstance(arg, str):
|
||||||
|
# Already a string, use as-is but resolve variable placeholders
|
||||||
|
processed_value = self._resolve_variable_placeholders(arg, resolved_env, runtime_vars)
|
||||||
|
processed.append(processed_value)
|
||||||
|
|
||||||
|
return processed
|
||||||
|
|
||||||
|
def _process_environment_variables(self, env_vars, env_overrides=None):
|
||||||
|
"""Process environment variable definitions and resolve actual values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
env_vars (list): List of environment variable definitions.
|
||||||
|
env_overrides (dict, optional): Pre-collected environment variable overrides.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Dictionary of resolved environment variable values.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from rich.prompt import Prompt
|
||||||
|
|
||||||
|
resolved = {}
|
||||||
|
env_overrides = env_overrides or {}
|
||||||
|
|
||||||
|
# If env_overrides is provided, it means the CLI has already handled environment variable collection
|
||||||
|
# In this case, we should NEVER prompt for additional variables
|
||||||
|
skip_prompting = bool(env_overrides)
|
||||||
|
|
||||||
|
# Check for CI/automated environment via APM_E2E_TESTS flag (more reliable than TTY detection)
|
||||||
|
if os.getenv('APM_E2E_TESTS') == '1':
|
||||||
|
skip_prompting = True
|
||||||
|
print(f"💡 APM_E2E_TESTS detected, will skip environment variable prompts")
|
||||||
|
|
||||||
|
# Also skip prompting if we're in a non-interactive environment (fallback)
|
||||||
|
is_interactive = sys.stdin.isatty() and sys.stdout.isatty()
|
||||||
|
if not is_interactive:
|
||||||
|
skip_prompting = True
|
||||||
|
|
||||||
|
# Add default GitHub MCP server environment variables for essential functionality first
|
||||||
|
# This ensures variables have defaults when user provides empty values or they're optional
|
||||||
|
default_github_env = {
|
||||||
|
"GITHUB_TOOLSETS": "context",
|
||||||
|
"GITHUB_DYNAMIC_TOOLSETS": "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Track which variables were explicitly provided with empty values (user wants defaults)
|
||||||
|
empty_value_vars = set()
|
||||||
|
if env_overrides:
|
||||||
|
for key, value in env_overrides.items():
|
||||||
|
if key in env_overrides and (not value or not value.strip()):
|
||||||
|
empty_value_vars.add(key)
|
||||||
|
|
||||||
|
for env_var in env_vars:
|
||||||
|
if isinstance(env_var, dict):
|
||||||
|
name = env_var.get("name", "")
|
||||||
|
description = env_var.get("description", "")
|
||||||
|
required = env_var.get("required", True)
|
||||||
|
|
||||||
|
if name:
|
||||||
|
# First check overrides, then environment
|
||||||
|
value = env_overrides.get(name) or os.getenv(name)
|
||||||
|
|
||||||
|
# Only prompt if not provided in overrides or environment AND it's required AND we're not in managed override mode
|
||||||
|
if not value and required and not skip_prompting:
|
||||||
|
# Only prompt if not provided in overrides
|
||||||
|
prompt_text = f"Enter value for {name}"
|
||||||
|
if description:
|
||||||
|
prompt_text += f" ({description})"
|
||||||
|
value = Prompt.ask(prompt_text, password=True if "token" in name.lower() or "key" in name.lower() else False)
|
||||||
|
|
||||||
|
# Add variable if it has a value OR if user explicitly provided empty and we have a default
|
||||||
|
if value and value.strip():
|
||||||
|
resolved[name] = value
|
||||||
|
elif name in empty_value_vars and name in default_github_env:
|
||||||
|
# User provided empty value and we have a default - use default
|
||||||
|
resolved[name] = default_github_env[name]
|
||||||
|
elif not required and name in default_github_env:
|
||||||
|
# Variable is optional and we have a default - use default
|
||||||
|
resolved[name] = default_github_env[name]
|
||||||
|
elif skip_prompting and name in default_github_env:
|
||||||
|
# Non-interactive environment and we have a default - use default
|
||||||
|
resolved[name] = default_github_env[name]
|
||||||
|
|
||||||
|
return resolved
|
||||||
|
|
||||||
|
def _resolve_variable_placeholders(self, value, resolved_env, runtime_vars):
|
||||||
|
"""Resolve both environment and runtime variable placeholders in values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value (str): Value that may contain placeholders like <TOKEN_NAME> or {runtime_var}
|
||||||
|
resolved_env (dict): Dictionary of resolved environment variables.
|
||||||
|
runtime_vars (dict): Dictionary of resolved runtime variables.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Processed value with actual variable values.
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
if not value:
|
||||||
|
return value
|
||||||
|
|
||||||
|
processed = str(value)
|
||||||
|
|
||||||
|
# Replace <TOKEN_NAME> with actual values from resolved_env (for Docker env vars)
|
||||||
|
env_pattern = r'<([A-Z_][A-Z0-9_]*)>'
|
||||||
|
|
||||||
|
def replace_env_var(match):
|
||||||
|
env_name = match.group(1)
|
||||||
|
return resolved_env.get(env_name, match.group(0)) # Return original if not found
|
||||||
|
|
||||||
|
processed = re.sub(env_pattern, replace_env_var, processed)
|
||||||
|
|
||||||
|
# Replace {runtime_var} with actual values from runtime_vars
|
||||||
|
runtime_pattern = r'\{([a-zA-Z_][a-zA-Z0-9_]*)\}'
|
||||||
|
|
||||||
|
def replace_runtime_var(match):
|
||||||
|
var_name = match.group(1)
|
||||||
|
return runtime_vars.get(var_name, match.group(0)) # Return original if not found
|
||||||
|
|
||||||
|
processed = re.sub(runtime_pattern, replace_runtime_var, processed)
|
||||||
|
|
||||||
|
return processed
|
||||||
|
|
||||||
|
def _resolve_env_placeholders(self, value, resolved_env):
|
||||||
|
"""Legacy method for backward compatibility. Use _resolve_variable_placeholders instead."""
|
||||||
|
return self._resolve_variable_placeholders(value, resolved_env, {})
|
||||||
|
|
||||||
|
def _ensure_docker_env_flags(self, base_args, env_vars):
|
||||||
|
"""Ensure all environment variables are represented as -e flags in Docker args.
|
||||||
|
|
||||||
|
For Codex TOML format, Docker args should contain -e flags for ALL environment variables
|
||||||
|
that will be available to the container, while actual values go in the [env] section.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_args (list): Base Docker arguments from registry.
|
||||||
|
env_vars (dict): All environment variables that should be available.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: Docker arguments with -e flags for all environment variables.
|
||||||
|
"""
|
||||||
|
if not env_vars:
|
||||||
|
return base_args
|
||||||
|
|
||||||
|
result = []
|
||||||
|
existing_env_vars = set()
|
||||||
|
|
||||||
|
# First pass: collect existing -e flags and build result with existing args
|
||||||
|
i = 0
|
||||||
|
while i < len(base_args):
|
||||||
|
arg = base_args[i]
|
||||||
|
result.append(arg)
|
||||||
|
|
||||||
|
# Track existing -e flags
|
||||||
|
if arg == "-e" and i + 1 < len(base_args):
|
||||||
|
env_var_name = base_args[i + 1]
|
||||||
|
existing_env_vars.add(env_var_name)
|
||||||
|
result.append(env_var_name)
|
||||||
|
i += 2
|
||||||
|
else:
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
# Second pass: add -e flags for any environment variables not already present
|
||||||
|
# Insert them after "run" but before the image name (last argument)
|
||||||
|
image_name = result[-1] if result else ""
|
||||||
|
if image_name and not image_name.startswith("-"):
|
||||||
|
# Remove image name temporarily
|
||||||
|
result.pop()
|
||||||
|
|
||||||
|
# Add missing environment variable flags
|
||||||
|
for env_name in sorted(env_vars.keys()):
|
||||||
|
if env_name not in existing_env_vars:
|
||||||
|
result.extend(["-e", env_name])
|
||||||
|
|
||||||
|
# Add image name back
|
||||||
|
result.append(image_name)
|
||||||
|
else:
|
||||||
|
# If we can't identify image name, just append at the end
|
||||||
|
for env_name in sorted(env_vars.keys()):
|
||||||
|
if env_name not in existing_env_vars:
|
||||||
|
result.extend(["-e", env_name])
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _inject_docker_env_vars(self, args, env_vars):
|
||||||
|
"""Inject environment variables into Docker arguments as -e flags.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (list): Original Docker arguments.
|
||||||
|
env_vars (dict): Environment variables to inject.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: Updated arguments with environment variables injected as -e flags.
|
||||||
|
"""
|
||||||
|
if not env_vars:
|
||||||
|
return args
|
||||||
|
|
||||||
|
result = []
|
||||||
|
existing_env_vars = set()
|
||||||
|
|
||||||
|
# First pass: collect existing -e flags to avoid duplicates
|
||||||
|
i = 0
|
||||||
|
while i < len(args):
|
||||||
|
if args[i] == "-e" and i + 1 < len(args):
|
||||||
|
existing_env_vars.add(args[i + 1])
|
||||||
|
i += 2
|
||||||
|
else:
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
# Second pass: build the result with new env vars injected after "run"
|
||||||
|
for i, arg in enumerate(args):
|
||||||
|
result.append(arg)
|
||||||
|
# If this is a docker run command, inject new environment variables after "run"
|
||||||
|
if arg == "run":
|
||||||
|
for env_name in env_vars.keys():
|
||||||
|
if env_name not in existing_env_vars:
|
||||||
|
result.extend(["-e", env_name])
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _select_best_package(self, packages):
|
||||||
|
"""Select the best package for installation from available packages.
|
||||||
|
|
||||||
|
Prioritizes packages in order: npm, docker, pypi, homebrew, others.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
packages (list): List of package dictionaries.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Best package to use, or None if no suitable package found.
|
||||||
|
"""
|
||||||
|
priority_order = ["npm", "docker", "pypi", "homebrew"]
|
||||||
|
|
||||||
|
# Sort packages by priority
|
||||||
|
for registry_name in priority_order:
|
||||||
|
for package in packages:
|
||||||
|
if package.get("registry_name") == registry_name:
|
||||||
|
return package
|
||||||
|
|
||||||
|
# If no priority package found, return the first one
|
||||||
|
return packages[0] if packages else None
|
||||||
311
src/apm_cli/adapters/client/vscode.py
Normal file
311
src/apm_cli/adapters/client/vscode.py
Normal file
@@ -0,0 +1,311 @@
|
|||||||
|
"""VSCode implementation of MCP client adapter.
|
||||||
|
|
||||||
|
This adapter implements the VSCode-specific handling of MCP server configuration,
|
||||||
|
following the official documentation at:
|
||||||
|
https://code.visualstudio.com/docs/copilot/chat/mcp-servers
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from .base import MCPClientAdapter
|
||||||
|
from ...registry.client import SimpleRegistryClient
|
||||||
|
from ...registry.integration import RegistryIntegration
|
||||||
|
|
||||||
|
|
||||||
|
class VSCodeClientAdapter(MCPClientAdapter):
|
||||||
|
"""VSCode implementation of MCP client adapter.
|
||||||
|
|
||||||
|
This adapter handles VSCode-specific configuration for MCP servers using
|
||||||
|
a repository-level .vscode/mcp.json file, following the format specified
|
||||||
|
in the VSCode documentation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, registry_url=None):
|
||||||
|
"""Initialize the VSCode client adapter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registry_url (str, optional): URL of the MCP registry.
|
||||||
|
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||||
|
or falls back to the default demo registry.
|
||||||
|
"""
|
||||||
|
self.registry_client = SimpleRegistryClient(registry_url)
|
||||||
|
self.registry_integration = RegistryIntegration(registry_url)
|
||||||
|
|
||||||
|
def get_config_path(self):
|
||||||
|
"""Get the path to the VSCode MCP configuration file in the repository.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Path to the .vscode/mcp.json file.
|
||||||
|
"""
|
||||||
|
# Use the current working directory as the repository root
|
||||||
|
repo_root = Path(os.getcwd())
|
||||||
|
|
||||||
|
# Path to .vscode/mcp.json in the repository
|
||||||
|
vscode_dir = repo_root / ".vscode"
|
||||||
|
mcp_config_path = vscode_dir / "mcp.json"
|
||||||
|
|
||||||
|
# Create the .vscode directory if it doesn't exist
|
||||||
|
try:
|
||||||
|
if not vscode_dir.exists():
|
||||||
|
vscode_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Could not create .vscode directory: {e}")
|
||||||
|
|
||||||
|
return str(mcp_config_path)
|
||||||
|
|
||||||
|
def update_config(self, new_config):
|
||||||
|
"""Update the VSCode MCP configuration with new values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
new_config (dict): Complete configuration object to write.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
config_path = self.get_config_path()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Write the updated config
|
||||||
|
with open(config_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(new_config, f, indent=2)
|
||||||
|
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error updating VSCode MCP configuration: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_current_config(self):
|
||||||
|
"""Get the current VSCode MCP configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Current VSCode MCP configuration from the local .vscode/mcp.json file.
|
||||||
|
"""
|
||||||
|
config_path = self.get_config_path()
|
||||||
|
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
with open(config_path, "r", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except (FileNotFoundError, json.JSONDecodeError):
|
||||||
|
return {}
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading VSCode MCP configuration: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None):
|
||||||
|
"""Configure an MCP server in VS Code mcp.json file.
|
||||||
|
|
||||||
|
This method updates the .vscode/mcp.json file to add or update
|
||||||
|
an MCP server configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_url (str): URL or identifier of the MCP server.
|
||||||
|
server_name (str, optional): Name of the server. Defaults to None.
|
||||||
|
enabled (bool, optional): Whether to enable the server. Defaults to True.
|
||||||
|
env_overrides (dict, optional): Environment variable overrides. Defaults to None.
|
||||||
|
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If server is not found in registry.
|
||||||
|
"""
|
||||||
|
if not server_url:
|
||||||
|
print("Error: server_url cannot be empty")
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use cached server info if available, otherwise fetch from registry
|
||||||
|
if server_info_cache and server_url in server_info_cache:
|
||||||
|
server_info = server_info_cache[server_url]
|
||||||
|
else:
|
||||||
|
# Fallback to registry lookup if not cached
|
||||||
|
server_info = self.registry_client.find_server_by_reference(server_url)
|
||||||
|
|
||||||
|
# Fail if server is not found in registry - security requirement
|
||||||
|
# This raises ValueError as expected by tests
|
||||||
|
if not server_info:
|
||||||
|
raise ValueError(f"Failed to retrieve server details for '{server_url}'. Server not found in registry.")
|
||||||
|
|
||||||
|
# Generate server configuration
|
||||||
|
server_config, input_vars = self._format_server_config(server_info)
|
||||||
|
|
||||||
|
if not server_config:
|
||||||
|
print(f"Unable to configure server: {server_url}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Use provided server name or fallback to server_url
|
||||||
|
config_key = server_name or server_url
|
||||||
|
|
||||||
|
# Get current config
|
||||||
|
current_config = self.get_current_config()
|
||||||
|
|
||||||
|
# Ensure servers and inputs sections exist
|
||||||
|
if "servers" not in current_config:
|
||||||
|
current_config["servers"] = {}
|
||||||
|
if "inputs" not in current_config:
|
||||||
|
current_config["inputs"] = []
|
||||||
|
|
||||||
|
# Add the server configuration
|
||||||
|
current_config["servers"][config_key] = server_config
|
||||||
|
|
||||||
|
# Add input variables (avoiding duplicates)
|
||||||
|
existing_input_ids = {var.get("id") for var in current_config["inputs"] if isinstance(var, dict)}
|
||||||
|
for var in input_vars:
|
||||||
|
if var.get("id") not in existing_input_ids:
|
||||||
|
current_config["inputs"].append(var)
|
||||||
|
existing_input_ids.add(var.get("id"))
|
||||||
|
|
||||||
|
# Update the configuration
|
||||||
|
result = self.update_config(current_config)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
print(f"Successfully configured MCP server '{config_key}' for VS Code")
|
||||||
|
return result
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
# Re-raise ValueError for registry errors
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error configuring MCP server: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _format_server_config(self, server_info):
|
||||||
|
"""Format server details into VSCode mcp.json compatible format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_info (dict): Server information from registry.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (server_config, input_vars) where:
|
||||||
|
- server_config is the formatted server configuration for mcp.json
|
||||||
|
- input_vars is a list of input variable definitions
|
||||||
|
"""
|
||||||
|
# Initialize the base config structure
|
||||||
|
server_config = {}
|
||||||
|
input_vars = []
|
||||||
|
|
||||||
|
# Check for packages information
|
||||||
|
if "packages" in server_info and server_info["packages"]:
|
||||||
|
package = server_info["packages"][0]
|
||||||
|
runtime_hint = package.get("runtime_hint", "")
|
||||||
|
|
||||||
|
# Handle npm packages
|
||||||
|
if runtime_hint == "npx" or "npm" in package.get("registry_name", "").lower():
|
||||||
|
# Get args directly from runtime_arguments
|
||||||
|
args = []
|
||||||
|
if "runtime_arguments" in package and package["runtime_arguments"]:
|
||||||
|
for arg in package["runtime_arguments"]:
|
||||||
|
if arg.get("is_required", False) and arg.get("value_hint"):
|
||||||
|
args.append(arg.get("value_hint"))
|
||||||
|
|
||||||
|
# Fallback if no runtime_arguments are provided
|
||||||
|
if not args and package.get("name"):
|
||||||
|
args = [package.get("name")]
|
||||||
|
|
||||||
|
server_config = {
|
||||||
|
"type": "stdio",
|
||||||
|
"command": "npx",
|
||||||
|
"args": args
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle docker packages
|
||||||
|
elif runtime_hint == "docker":
|
||||||
|
# Get args directly from runtime_arguments
|
||||||
|
args = []
|
||||||
|
if "runtime_arguments" in package and package["runtime_arguments"]:
|
||||||
|
for arg in package["runtime_arguments"]:
|
||||||
|
if arg.get("is_required", False) and arg.get("value_hint"):
|
||||||
|
args.append(arg.get("value_hint"))
|
||||||
|
|
||||||
|
# Fallback if no runtime_arguments are provided - use standard docker run command
|
||||||
|
if not args:
|
||||||
|
args = ["run", "-i", "--rm", package.get("name")]
|
||||||
|
|
||||||
|
server_config = {
|
||||||
|
"type": "stdio",
|
||||||
|
"command": "docker",
|
||||||
|
"args": args
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle Python packages
|
||||||
|
elif runtime_hint in ["uvx", "pip", "python"] or "python" in runtime_hint or package.get("registry_name", "").lower() == "pypi":
|
||||||
|
# Determine the command based on runtime_hint
|
||||||
|
if runtime_hint == "uvx":
|
||||||
|
command = "uvx"
|
||||||
|
elif "python" in runtime_hint:
|
||||||
|
# Use the specified Python path if it's a full path, otherwise default to python3
|
||||||
|
command = "python3" if runtime_hint in ["python", "pip"] else runtime_hint
|
||||||
|
else:
|
||||||
|
command = "python3"
|
||||||
|
|
||||||
|
# Get args directly from runtime_arguments
|
||||||
|
args = []
|
||||||
|
if "runtime_arguments" in package and package["runtime_arguments"]:
|
||||||
|
for arg in package["runtime_arguments"]:
|
||||||
|
if arg.get("is_required", False) and arg.get("value_hint"):
|
||||||
|
args.append(arg.get("value_hint"))
|
||||||
|
|
||||||
|
# Fallback if no runtime_arguments are provided
|
||||||
|
if not args:
|
||||||
|
if runtime_hint == "uvx":
|
||||||
|
module_name = package.get("name", "").replace("mcp-server-", "")
|
||||||
|
args = [f"mcp-server-{module_name}"]
|
||||||
|
else:
|
||||||
|
module_name = package.get("name", "").replace("mcp-server-", "").replace("-", "_")
|
||||||
|
args = ["-m", f"mcp_server_{module_name}"]
|
||||||
|
|
||||||
|
server_config = {
|
||||||
|
"type": "stdio",
|
||||||
|
"command": command,
|
||||||
|
"args": args
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add environment variables if present
|
||||||
|
if "environment_variables" in package and package["environment_variables"]:
|
||||||
|
server_config["env"] = {}
|
||||||
|
for env_var in package["environment_variables"]:
|
||||||
|
if "name" in env_var:
|
||||||
|
# Convert variable name to lowercase and replace underscores with hyphens for VS Code convention
|
||||||
|
input_var_name = env_var["name"].lower().replace("_", "-")
|
||||||
|
|
||||||
|
# Create the input variable reference
|
||||||
|
server_config["env"][env_var["name"]] = f"${{input:{input_var_name}}}"
|
||||||
|
|
||||||
|
# Create the input variable definition
|
||||||
|
input_var_def = {
|
||||||
|
"type": "promptString",
|
||||||
|
"id": input_var_name,
|
||||||
|
"description": env_var.get("description", f"{env_var['name']} for MCP server"),
|
||||||
|
"password": True # Default to True for security
|
||||||
|
}
|
||||||
|
input_vars.append(input_var_def)
|
||||||
|
|
||||||
|
# If no server config was created from packages, check for other server types
|
||||||
|
if not server_config:
|
||||||
|
# Check for SSE endpoints
|
||||||
|
if "sse_endpoint" in server_info:
|
||||||
|
server_config = {
|
||||||
|
"type": "sse",
|
||||||
|
"url": server_info["sse_endpoint"],
|
||||||
|
"headers": server_info.get("sse_headers", {})
|
||||||
|
}
|
||||||
|
# Check for remotes (similar to Copilot adapter)
|
||||||
|
elif "remotes" in server_info and server_info["remotes"]:
|
||||||
|
remotes = server_info["remotes"]
|
||||||
|
remote = remotes[0] # Take the first remote
|
||||||
|
if remote.get("transport_type") == "sse":
|
||||||
|
server_config = {
|
||||||
|
"type": "sse",
|
||||||
|
"url": remote.get("url", ""),
|
||||||
|
"headers": remote.get("headers", {})
|
||||||
|
}
|
||||||
|
# If no packages AND no endpoints/remotes, fail with clear error
|
||||||
|
else:
|
||||||
|
raise ValueError(f"MCP server has incomplete configuration in registry - no package information or remote endpoints available. "
|
||||||
|
f"This appears to be a temporary registry issue. "
|
||||||
|
f"Server: {server_info.get('name', 'unknown')}")
|
||||||
|
|
||||||
|
return server_config, input_vars
|
||||||
1
src/apm_cli/adapters/package_manager/__init__.py
Normal file
1
src/apm_cli/adapters/package_manager/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Package manager adapters package."""
|
||||||
27
src/apm_cli/adapters/package_manager/base.py
Normal file
27
src/apm_cli/adapters/package_manager/base.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
"""Base adapter interface for MCP package managers."""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class MCPPackageManagerAdapter(ABC):
|
||||||
|
"""Base adapter for MCP package managers."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def install(self, package_name, version=None):
|
||||||
|
"""Install an MCP package."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def uninstall(self, package_name):
|
||||||
|
"""Uninstall an MCP package."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list_installed(self):
|
||||||
|
"""List all installed MCP packages."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def search(self, query):
|
||||||
|
"""Search for MCP packages."""
|
||||||
|
pass
|
||||||
123
src/apm_cli/adapters/package_manager/default_manager.py
Normal file
123
src/apm_cli/adapters/package_manager/default_manager.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
"""Implementation of the default MCP package manager."""
|
||||||
|
|
||||||
|
from .base import MCPPackageManagerAdapter
|
||||||
|
from ...config import get_default_client
|
||||||
|
from ...registry.integration import RegistryIntegration
|
||||||
|
|
||||||
|
|
||||||
|
class DefaultMCPPackageManager(MCPPackageManagerAdapter):
|
||||||
|
"""Implementation of the default MCP package manager."""
|
||||||
|
|
||||||
|
def install(self, package_name, version=None):
|
||||||
|
"""Install an MCP package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_name (str): Name of the package to install.
|
||||||
|
version (str, optional): Version of the package to install.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Import here to avoid circular import
|
||||||
|
from ...factory import ClientFactory
|
||||||
|
|
||||||
|
client_type = get_default_client()
|
||||||
|
client_adapter = ClientFactory.create_client(client_type)
|
||||||
|
|
||||||
|
# For VSCode, configure MCP server in mcp.json
|
||||||
|
result = client_adapter.configure_mcp_server(package_name, package_name, True)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
print(f"Successfully installed {package_name}")
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error installing package {package_name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def uninstall(self, package_name):
|
||||||
|
"""Uninstall an MCP package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_name (str): Name of the package to uninstall.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Import here to avoid circular import
|
||||||
|
from ...factory import ClientFactory
|
||||||
|
|
||||||
|
client_type = get_default_client()
|
||||||
|
client_adapter = ClientFactory.create_client(client_type)
|
||||||
|
config = client_adapter.get_current_config()
|
||||||
|
|
||||||
|
# For VSCode, remove the server from mcp.json
|
||||||
|
if "servers" in config and package_name in config["servers"]:
|
||||||
|
servers = config["servers"]
|
||||||
|
servers.pop(package_name, None)
|
||||||
|
result = client_adapter.update_config({"servers": servers})
|
||||||
|
|
||||||
|
if result:
|
||||||
|
print(f"Successfully uninstalled {package_name}")
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
print(f"Package {package_name} not found in configuration")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error uninstalling package {package_name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def list_installed(self):
|
||||||
|
"""List all installed MCP packages.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of installed packages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Import here to avoid circular import
|
||||||
|
from ...factory import ClientFactory
|
||||||
|
|
||||||
|
# Get client type from configuration (default is vscode)
|
||||||
|
client_type = get_default_client()
|
||||||
|
|
||||||
|
# Create client adapter
|
||||||
|
client_adapter = ClientFactory.create_client(client_type)
|
||||||
|
|
||||||
|
# Get config from local .vscode/mcp.json file
|
||||||
|
config = client_adapter.get_current_config()
|
||||||
|
|
||||||
|
# Extract server names from the config
|
||||||
|
servers = config.get("servers", {})
|
||||||
|
|
||||||
|
# Return the list of server names
|
||||||
|
return list(servers.keys())
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error retrieving installed MCP servers: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def search(self, query):
|
||||||
|
"""Search for MCP packages.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): Search query.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of packages matching the query.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use the registry integration to search for packages
|
||||||
|
registry = RegistryIntegration()
|
||||||
|
packages = registry.search_packages(query)
|
||||||
|
|
||||||
|
# Return the list of package IDs/names
|
||||||
|
return [pkg.get("id", pkg.get("name", "Unknown")) for pkg in packages] if packages else []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error searching for packages: {e}")
|
||||||
|
return []
|
||||||
2555
src/apm_cli/cli.py
Normal file
2555
src/apm_cli/cli.py
Normal file
File diff suppressed because it is too large
Load Diff
5
src/apm_cli/commands/__init__.py
Normal file
5
src/apm_cli/commands/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""Commands package for APM CLI."""
|
||||||
|
|
||||||
|
from .deps import deps
|
||||||
|
|
||||||
|
__all__ = ['deps']
|
||||||
656
src/apm_cli/commands/deps.py
Normal file
656
src/apm_cli/commands/deps.py
Normal file
@@ -0,0 +1,656 @@
|
|||||||
|
"""APM dependency management commands."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
import click
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Dict, Any
|
||||||
|
|
||||||
|
# Import existing APM components
|
||||||
|
from ..models.apm_package import APMPackage, ValidationResult, validate_apm_package
|
||||||
|
from ..utils.console import _rich_success, _rich_error, _rich_info, _rich_warning
|
||||||
|
|
||||||
|
# Import APM dependency system components (with fallback)
|
||||||
|
from ..deps.github_downloader import GitHubPackageDownloader
|
||||||
|
from ..deps.apm_resolver import APMDependencyResolver
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@click.group(help="🔗 Manage APM package dependencies")
|
||||||
|
def deps():
|
||||||
|
"""APM dependency management commands."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@deps.command(name="list", help="📋 List installed APM dependencies")
|
||||||
|
def list_packages():
|
||||||
|
"""Show all installed APM dependencies with context files and agent workflows."""
|
||||||
|
try:
|
||||||
|
# Import Rich components with fallback
|
||||||
|
from rich.table import Table
|
||||||
|
from rich.console import Console
|
||||||
|
console = Console()
|
||||||
|
has_rich = True
|
||||||
|
except ImportError:
|
||||||
|
has_rich = False
|
||||||
|
console = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
project_root = Path(".")
|
||||||
|
apm_modules_path = project_root / "apm_modules"
|
||||||
|
|
||||||
|
# Check if apm_modules exists
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
if has_rich:
|
||||||
|
console.print("💡 No APM dependencies installed yet", style="cyan")
|
||||||
|
console.print("Run 'specify apm install' to install dependencies from apm.yml", style="dim")
|
||||||
|
else:
|
||||||
|
click.echo("💡 No APM dependencies installed yet")
|
||||||
|
click.echo("Run 'specify apm install' to install dependencies from apm.yml")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Load project dependencies to check for orphaned packages
|
||||||
|
declared_deps = set()
|
||||||
|
try:
|
||||||
|
apm_yml_path = project_root / "apm.yml"
|
||||||
|
if apm_yml_path.exists():
|
||||||
|
project_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
for dep in project_package.get_apm_dependencies():
|
||||||
|
declared_deps.add(dep.repo_url)
|
||||||
|
except Exception:
|
||||||
|
pass # Continue without orphan detection if apm.yml parsing fails
|
||||||
|
|
||||||
|
# Scan for installed packages in org-namespaced structure
|
||||||
|
installed_packages = []
|
||||||
|
orphaned_packages = []
|
||||||
|
for org_dir in apm_modules_path.iterdir():
|
||||||
|
if org_dir.is_dir() and not org_dir.name.startswith('.'):
|
||||||
|
for package_dir in org_dir.iterdir():
|
||||||
|
if package_dir.is_dir() and not package_dir.name.startswith('.'):
|
||||||
|
try:
|
||||||
|
# org/repo format
|
||||||
|
org_repo_name = f"{org_dir.name}/{package_dir.name}"
|
||||||
|
|
||||||
|
# Try to load package metadata
|
||||||
|
apm_yml_path = package_dir / "apm.yml"
|
||||||
|
if apm_yml_path.exists():
|
||||||
|
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
# Count context files and workflows separately
|
||||||
|
context_count, workflow_count = _count_package_files(package_dir)
|
||||||
|
|
||||||
|
# Check if this package is orphaned
|
||||||
|
is_orphaned = org_repo_name not in declared_deps
|
||||||
|
if is_orphaned:
|
||||||
|
orphaned_packages.append(org_repo_name)
|
||||||
|
|
||||||
|
installed_packages.append({
|
||||||
|
'name': org_repo_name,
|
||||||
|
'version': package.version or 'unknown',
|
||||||
|
'source': 'orphaned' if is_orphaned else 'github',
|
||||||
|
'context': context_count,
|
||||||
|
'workflows': workflow_count,
|
||||||
|
'path': str(package_dir),
|
||||||
|
'is_orphaned': is_orphaned
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# Package without apm.yml - show basic info
|
||||||
|
context_count, workflow_count = _count_package_files(package_dir)
|
||||||
|
is_orphaned = True # Assume orphaned if no apm.yml
|
||||||
|
orphaned_packages.append(org_repo_name)
|
||||||
|
|
||||||
|
installed_packages.append({
|
||||||
|
'name': org_repo_name,
|
||||||
|
'version': 'unknown',
|
||||||
|
'source': 'orphaned',
|
||||||
|
'context': context_count,
|
||||||
|
'workflows': workflow_count,
|
||||||
|
'path': str(package_dir),
|
||||||
|
'is_orphaned': is_orphaned
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"⚠️ Warning: Failed to read package {org_dir.name}/{package_dir.name}: {e}")
|
||||||
|
|
||||||
|
if not installed_packages:
|
||||||
|
if has_rich:
|
||||||
|
console.print("💡 apm_modules/ directory exists but contains no valid packages", style="cyan")
|
||||||
|
else:
|
||||||
|
click.echo("💡 apm_modules/ directory exists but contains no valid packages")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Display packages in table format
|
||||||
|
if has_rich:
|
||||||
|
table = Table(title="📋 APM Dependencies", show_header=True, header_style="bold cyan")
|
||||||
|
table.add_column("Package", style="bold white")
|
||||||
|
table.add_column("Version", style="yellow")
|
||||||
|
table.add_column("Source", style="blue")
|
||||||
|
table.add_column("Context", style="green")
|
||||||
|
table.add_column("Workflows", style="magenta")
|
||||||
|
|
||||||
|
for pkg in installed_packages:
|
||||||
|
table.add_row(
|
||||||
|
pkg['name'],
|
||||||
|
pkg['version'],
|
||||||
|
pkg['source'],
|
||||||
|
f"{pkg['context']} files",
|
||||||
|
f"{pkg['workflows']} workflows"
|
||||||
|
)
|
||||||
|
|
||||||
|
console.print(table)
|
||||||
|
|
||||||
|
# Show orphaned packages warning
|
||||||
|
if orphaned_packages:
|
||||||
|
console.print(f"\n⚠️ {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):", style="yellow")
|
||||||
|
for pkg in orphaned_packages:
|
||||||
|
console.print(f" • {pkg}", style="dim yellow")
|
||||||
|
console.print("\n💡 Run 'specify apm prune' to remove orphaned packages", style="cyan")
|
||||||
|
else:
|
||||||
|
# Fallback text table
|
||||||
|
click.echo("📋 APM Dependencies:")
|
||||||
|
click.echo("┌─────────────────────┬─────────┬──────────────┬─────────────┬─────────────┐")
|
||||||
|
click.echo("│ Package │ Version │ Source │ Context │ Workflows │")
|
||||||
|
click.echo("├─────────────────────┼─────────┼──────────────┼─────────────┼─────────────┤")
|
||||||
|
|
||||||
|
for pkg in installed_packages:
|
||||||
|
name = pkg['name'][:19].ljust(19)
|
||||||
|
version = pkg['version'][:7].ljust(7)
|
||||||
|
source = pkg['source'][:12].ljust(12)
|
||||||
|
context = f"{pkg['context']} files".ljust(11)
|
||||||
|
workflows = f"{pkg['workflows']} wf".ljust(11)
|
||||||
|
click.echo(f"│ {name} │ {version} │ {source} │ {context} │ {workflows} │")
|
||||||
|
|
||||||
|
click.echo("└─────────────────────┴─────────┴──────────────┴─────────────┴─────────────┘")
|
||||||
|
|
||||||
|
# Show orphaned packages warning
|
||||||
|
if orphaned_packages:
|
||||||
|
click.echo(f"\n⚠️ {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):")
|
||||||
|
for pkg in orphaned_packages:
|
||||||
|
click.echo(f" • {pkg}")
|
||||||
|
click.echo("\n💡 Run 'specify apm prune' to remove orphaned packages")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f"Error listing dependencies: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@deps.command(help="🌳 Show dependency tree structure")
|
||||||
|
def tree():
|
||||||
|
"""Display dependencies in hierarchical tree format showing context and workflows."""
|
||||||
|
try:
|
||||||
|
# Import Rich components with fallback
|
||||||
|
from rich.tree import Tree
|
||||||
|
from rich.console import Console
|
||||||
|
console = Console()
|
||||||
|
has_rich = True
|
||||||
|
except ImportError:
|
||||||
|
has_rich = False
|
||||||
|
console = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
project_root = Path(".")
|
||||||
|
apm_modules_path = project_root / "apm_modules"
|
||||||
|
|
||||||
|
# Load project info
|
||||||
|
project_name = "my-project"
|
||||||
|
try:
|
||||||
|
apm_yml_path = project_root / "apm.yml"
|
||||||
|
if apm_yml_path.exists():
|
||||||
|
root_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
project_name = root_package.name
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if has_rich:
|
||||||
|
# Create Rich tree
|
||||||
|
root_tree = Tree(f"[bold cyan]{project_name}[/bold cyan] (local)")
|
||||||
|
|
||||||
|
# Check if apm_modules exists
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
root_tree.add("[dim]No dependencies installed[/dim]")
|
||||||
|
else:
|
||||||
|
# Add each dependency as a branch
|
||||||
|
for package_dir in apm_modules_path.iterdir():
|
||||||
|
if package_dir.is_dir():
|
||||||
|
try:
|
||||||
|
package_info = _get_package_display_info(package_dir)
|
||||||
|
branch = root_tree.add(f"[green]{package_info['display_name']}[/green]")
|
||||||
|
|
||||||
|
# Add context files and workflows as sub-items
|
||||||
|
context_files = _get_detailed_context_counts(package_dir)
|
||||||
|
workflow_count = _count_workflows(package_dir)
|
||||||
|
|
||||||
|
# Show context files by type
|
||||||
|
for context_type, count in context_files.items():
|
||||||
|
if count > 0:
|
||||||
|
branch.add(f"[dim]{count} {context_type}[/dim]")
|
||||||
|
|
||||||
|
# Show workflows
|
||||||
|
if workflow_count > 0:
|
||||||
|
branch.add(f"[bold magenta]{workflow_count} agent workflows[/bold magenta]")
|
||||||
|
|
||||||
|
if not any(count > 0 for count in context_files.values()) and workflow_count == 0:
|
||||||
|
branch.add("[dim]no context or workflows[/dim]")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
branch = root_tree.add(f"[red]{package_dir.name}[/red] [dim](error loading)[/dim]")
|
||||||
|
|
||||||
|
console.print(root_tree)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Fallback text tree
|
||||||
|
click.echo(f"{project_name} (local)")
|
||||||
|
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
click.echo("└── No dependencies installed")
|
||||||
|
return
|
||||||
|
|
||||||
|
package_dirs = [d for d in apm_modules_path.iterdir() if d.is_dir()]
|
||||||
|
|
||||||
|
for i, package_dir in enumerate(package_dirs):
|
||||||
|
is_last = i == len(package_dirs) - 1
|
||||||
|
prefix = "└── " if is_last else "├── "
|
||||||
|
|
||||||
|
try:
|
||||||
|
package_info = _get_package_display_info(package_dir)
|
||||||
|
click.echo(f"{prefix}{package_info['display_name']}")
|
||||||
|
|
||||||
|
# Add context files and workflows
|
||||||
|
context_files = _get_detailed_context_counts(package_dir)
|
||||||
|
workflow_count = _count_workflows(package_dir)
|
||||||
|
sub_prefix = " " if is_last else "│ "
|
||||||
|
|
||||||
|
items_shown = False
|
||||||
|
for context_type, count in context_files.items():
|
||||||
|
if count > 0:
|
||||||
|
click.echo(f"{sub_prefix}├── {count} {context_type}")
|
||||||
|
items_shown = True
|
||||||
|
|
||||||
|
if workflow_count > 0:
|
||||||
|
click.echo(f"{sub_prefix}├── {workflow_count} agent workflows")
|
||||||
|
items_shown = True
|
||||||
|
|
||||||
|
if not items_shown:
|
||||||
|
click.echo(f"{sub_prefix}└── no context or workflows")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"{prefix}{package_dir.name} (error loading)")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f"Error showing dependency tree: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@deps.command(help="🧹 Remove all APM dependencies")
|
||||||
|
def clean():
|
||||||
|
"""Remove entire apm_modules/ directory."""
|
||||||
|
project_root = Path(".")
|
||||||
|
apm_modules_path = project_root / "apm_modules"
|
||||||
|
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
_rich_info("No apm_modules/ directory found - already clean")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Show what will be removed
|
||||||
|
package_count = len([d for d in apm_modules_path.iterdir() if d.is_dir()])
|
||||||
|
|
||||||
|
_rich_warning(f"This will remove the entire apm_modules/ directory ({package_count} packages)")
|
||||||
|
|
||||||
|
# Confirmation prompt
|
||||||
|
try:
|
||||||
|
from rich.prompt import Confirm
|
||||||
|
confirm = Confirm.ask("Continue?")
|
||||||
|
except ImportError:
|
||||||
|
confirm = click.confirm("Continue?")
|
||||||
|
|
||||||
|
if not confirm:
|
||||||
|
_rich_info("Operation cancelled")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
shutil.rmtree(apm_modules_path)
|
||||||
|
_rich_success("Successfully removed apm_modules/ directory")
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f"Error removing apm_modules/: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@deps.command(help="🔄 Update APM dependencies")
|
||||||
|
@click.argument('package', required=False)
|
||||||
|
def update(package: Optional[str]):
|
||||||
|
"""Update specific package or all if no package specified."""
|
||||||
|
|
||||||
|
project_root = Path(".")
|
||||||
|
apm_modules_path = project_root / "apm_modules"
|
||||||
|
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
_rich_info("No apm_modules/ directory found - no packages to update")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get project dependencies to validate updates
|
||||||
|
try:
|
||||||
|
apm_yml_path = project_root / "apm.yml"
|
||||||
|
if not apm_yml_path.exists():
|
||||||
|
_rich_error("No apm.yml found in current directory")
|
||||||
|
return
|
||||||
|
|
||||||
|
project_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
project_deps = project_package.get_apm_dependencies()
|
||||||
|
|
||||||
|
if not project_deps:
|
||||||
|
_rich_info("No APM dependencies defined in apm.yml")
|
||||||
|
return
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f"Error reading apm.yml: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if package:
|
||||||
|
# Update specific package
|
||||||
|
_update_single_package(package, project_deps, apm_modules_path)
|
||||||
|
else:
|
||||||
|
# Update all packages
|
||||||
|
_update_all_packages(project_deps, apm_modules_path)
|
||||||
|
|
||||||
|
|
||||||
|
@deps.command(help="ℹ️ Show detailed package information")
|
||||||
|
@click.argument('package', required=True)
|
||||||
|
def info(package: str):
|
||||||
|
"""Show detailed information about a specific package including context files and workflows."""
|
||||||
|
project_root = Path(".")
|
||||||
|
apm_modules_path = project_root / "apm_modules"
|
||||||
|
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
_rich_error("No apm_modules/ directory found")
|
||||||
|
_rich_info("Run 'specify apm install' to install dependencies first")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Find the package directory
|
||||||
|
package_path = None
|
||||||
|
for package_dir in apm_modules_path.iterdir():
|
||||||
|
if package_dir.is_dir() and package_dir.name == package:
|
||||||
|
package_path = package_dir
|
||||||
|
break
|
||||||
|
|
||||||
|
if not package_path:
|
||||||
|
_rich_error(f"Package '{package}' not found in apm_modules/")
|
||||||
|
_rich_info("Available packages:")
|
||||||
|
|
||||||
|
for package_dir in apm_modules_path.iterdir():
|
||||||
|
if package_dir.is_dir():
|
||||||
|
click.echo(f" - {package_dir.name}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Load package information
|
||||||
|
package_info = _get_detailed_package_info(package_path)
|
||||||
|
|
||||||
|
# Display with Rich panel if available
|
||||||
|
try:
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.text import Text
|
||||||
|
console = Console()
|
||||||
|
|
||||||
|
content_lines = []
|
||||||
|
content_lines.append(f"[bold]Name:[/bold] {package_info['name']}")
|
||||||
|
content_lines.append(f"[bold]Version:[/bold] {package_info['version']}")
|
||||||
|
content_lines.append(f"[bold]Description:[/bold] {package_info['description']}")
|
||||||
|
content_lines.append(f"[bold]Author:[/bold] {package_info['author']}")
|
||||||
|
content_lines.append(f"[bold]Source:[/bold] {package_info['source']}")
|
||||||
|
content_lines.append(f"[bold]Install Path:[/bold] {package_info['install_path']}")
|
||||||
|
content_lines.append("")
|
||||||
|
content_lines.append("[bold]Context Files:[/bold]")
|
||||||
|
|
||||||
|
for context_type, count in package_info['context_files'].items():
|
||||||
|
if count > 0:
|
||||||
|
content_lines.append(f" • {count} {context_type}")
|
||||||
|
|
||||||
|
if not any(count > 0 for count in package_info['context_files'].values()):
|
||||||
|
content_lines.append(" • No context files found")
|
||||||
|
|
||||||
|
content_lines.append("")
|
||||||
|
content_lines.append("[bold]Agent Workflows:[/bold]")
|
||||||
|
if package_info['workflows'] > 0:
|
||||||
|
content_lines.append(f" • {package_info['workflows']} executable workflows")
|
||||||
|
else:
|
||||||
|
content_lines.append(" • No agent workflows found")
|
||||||
|
|
||||||
|
content = "\n".join(content_lines)
|
||||||
|
panel = Panel(content, title=f"ℹ️ Package Info: {package}", border_style="cyan")
|
||||||
|
console.print(panel)
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
# Fallback text display
|
||||||
|
click.echo(f"ℹ️ Package Info: {package}")
|
||||||
|
click.echo("=" * 40)
|
||||||
|
click.echo(f"Name: {package_info['name']}")
|
||||||
|
click.echo(f"Version: {package_info['version']}")
|
||||||
|
click.echo(f"Description: {package_info['description']}")
|
||||||
|
click.echo(f"Author: {package_info['author']}")
|
||||||
|
click.echo(f"Source: {package_info['source']}")
|
||||||
|
click.echo(f"Install Path: {package_info['install_path']}")
|
||||||
|
click.echo("")
|
||||||
|
click.echo("Context Files:")
|
||||||
|
|
||||||
|
for context_type, count in package_info['context_files'].items():
|
||||||
|
if count > 0:
|
||||||
|
click.echo(f" • {count} {context_type}")
|
||||||
|
|
||||||
|
if not any(count > 0 for count in package_info['context_files'].values()):
|
||||||
|
click.echo(" • No context files found")
|
||||||
|
|
||||||
|
click.echo("")
|
||||||
|
click.echo("Agent Workflows:")
|
||||||
|
if package_info['workflows'] > 0:
|
||||||
|
click.echo(f" • {package_info['workflows']} executable workflows")
|
||||||
|
else:
|
||||||
|
click.echo(" • No agent workflows found")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f"Error reading package information: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
|
||||||
|
def _count_package_files(package_path: Path) -> tuple[int, int]:
|
||||||
|
"""Count context files and workflows in a package.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (context_count, workflow_count)
|
||||||
|
"""
|
||||||
|
apm_dir = package_path / ".apm"
|
||||||
|
if not apm_dir.exists():
|
||||||
|
# Also check root directory for .prompt.md files
|
||||||
|
workflow_count = len(list(package_path.glob("*.prompt.md")))
|
||||||
|
return 0, workflow_count
|
||||||
|
|
||||||
|
context_count = 0
|
||||||
|
context_dirs = ['instructions', 'chatmodes', 'contexts']
|
||||||
|
|
||||||
|
for context_dir in context_dirs:
|
||||||
|
context_path = apm_dir / context_dir
|
||||||
|
if context_path.exists() and context_path.is_dir():
|
||||||
|
context_count += len(list(context_path.glob("*.md")))
|
||||||
|
|
||||||
|
# Count workflows in both .apm/prompts and root directory
|
||||||
|
workflow_count = 0
|
||||||
|
prompts_path = apm_dir / "prompts"
|
||||||
|
if prompts_path.exists() and prompts_path.is_dir():
|
||||||
|
workflow_count += len(list(prompts_path.glob("*.prompt.md")))
|
||||||
|
|
||||||
|
# Also check root directory for .prompt.md files
|
||||||
|
workflow_count += len(list(package_path.glob("*.prompt.md")))
|
||||||
|
|
||||||
|
return context_count, workflow_count
|
||||||
|
|
||||||
|
|
||||||
|
def _count_workflows(package_path: Path) -> int:
|
||||||
|
"""Count agent workflows (.prompt.md files) in a package."""
|
||||||
|
_, workflow_count = _count_package_files(package_path)
|
||||||
|
return workflow_count
|
||||||
|
|
||||||
|
|
||||||
|
def _get_detailed_context_counts(package_path: Path) -> Dict[str, int]:
|
||||||
|
"""Get detailed context file counts by type."""
|
||||||
|
apm_dir = package_path / ".apm"
|
||||||
|
if not apm_dir.exists():
|
||||||
|
return {'instructions': 0, 'chatmodes': 0, 'contexts': 0}
|
||||||
|
|
||||||
|
counts = {}
|
||||||
|
context_types = {
|
||||||
|
'instructions': ['instructions.md'],
|
||||||
|
'chatmodes': ['chatmode.md'],
|
||||||
|
'contexts': ['context.md', 'memory.md']
|
||||||
|
}
|
||||||
|
|
||||||
|
for context_type, extensions in context_types.items():
|
||||||
|
count = 0
|
||||||
|
context_path = apm_dir / context_type
|
||||||
|
if context_path.exists() and context_path.is_dir():
|
||||||
|
for ext in extensions:
|
||||||
|
count += len(list(context_path.glob(f"*.{ext}")))
|
||||||
|
counts[context_type] = count
|
||||||
|
|
||||||
|
return counts
|
||||||
|
|
||||||
|
|
||||||
|
def _get_package_display_info(package_path: Path) -> Dict[str, str]:
|
||||||
|
"""Get package display information."""
|
||||||
|
try:
|
||||||
|
apm_yml_path = package_path / "apm.yml"
|
||||||
|
if apm_yml_path.exists():
|
||||||
|
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
version_info = f"@{package.version}" if package.version else "@unknown"
|
||||||
|
return {
|
||||||
|
'display_name': f"{package.name}{version_info}",
|
||||||
|
'name': package.name,
|
||||||
|
'version': package.version or 'unknown'
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'display_name': f"{package_path.name}@unknown",
|
||||||
|
'name': package_path.name,
|
||||||
|
'version': 'unknown'
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
return {
|
||||||
|
'display_name': f"{package_path.name}@error",
|
||||||
|
'name': package_path.name,
|
||||||
|
'version': 'error'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_detailed_package_info(package_path: Path) -> Dict[str, Any]:
|
||||||
|
"""Get detailed package information for the info command."""
|
||||||
|
try:
|
||||||
|
apm_yml_path = package_path / "apm.yml"
|
||||||
|
if apm_yml_path.exists():
|
||||||
|
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
context_count, workflow_count = _count_package_files(package_path)
|
||||||
|
return {
|
||||||
|
'name': package.name,
|
||||||
|
'version': package.version or 'unknown',
|
||||||
|
'description': package.description or 'No description',
|
||||||
|
'author': package.author or 'Unknown',
|
||||||
|
'source': package.source or 'local',
|
||||||
|
'install_path': str(package_path.resolve()),
|
||||||
|
'context_files': _get_detailed_context_counts(package_path),
|
||||||
|
'workflows': workflow_count
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
context_count, workflow_count = _count_package_files(package_path)
|
||||||
|
return {
|
||||||
|
'name': package_path.name,
|
||||||
|
'version': 'unknown',
|
||||||
|
'description': 'No apm.yml found',
|
||||||
|
'author': 'Unknown',
|
||||||
|
'source': 'unknown',
|
||||||
|
'install_path': str(package_path.resolve()),
|
||||||
|
'context_files': _get_detailed_context_counts(package_path),
|
||||||
|
'workflows': workflow_count
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
'name': package_path.name,
|
||||||
|
'version': 'error',
|
||||||
|
'description': f'Error loading package: {e}',
|
||||||
|
'author': 'Unknown',
|
||||||
|
'source': 'unknown',
|
||||||
|
'install_path': str(package_path.resolve()),
|
||||||
|
'context_files': {'instructions': 0, 'chatmodes': 0, 'contexts': 0},
|
||||||
|
'workflows': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _update_single_package(package_name: str, project_deps: List, apm_modules_path: Path):
|
||||||
|
"""Update a specific package."""
|
||||||
|
# Find the dependency reference for this package
|
||||||
|
target_dep = None
|
||||||
|
for dep in project_deps:
|
||||||
|
if dep.get_display_name() == package_name or dep.repo_url.split('/')[-1] == package_name:
|
||||||
|
target_dep = dep
|
||||||
|
break
|
||||||
|
|
||||||
|
if not target_dep:
|
||||||
|
_rich_error(f"Package '{package_name}' not found in apm.yml dependencies")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Find the installed package directory
|
||||||
|
package_dir = None
|
||||||
|
if target_dep.alias:
|
||||||
|
package_dir = apm_modules_path / target_dep.alias
|
||||||
|
else:
|
||||||
|
package_dir = apm_modules_path / package_name
|
||||||
|
|
||||||
|
if not package_dir.exists():
|
||||||
|
_rich_error(f"Package '{package_name}' not installed in apm_modules/")
|
||||||
|
_rich_info(f"Run 'apm install' to install it first")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
downloader = GitHubPackageDownloader()
|
||||||
|
_rich_info(f"Updating {target_dep.repo_url}...")
|
||||||
|
|
||||||
|
# Download latest version
|
||||||
|
package_info = downloader.download_package(str(target_dep), package_dir)
|
||||||
|
|
||||||
|
_rich_success(f"✅ Updated {target_dep.repo_url}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f"Failed to update {package_name}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def _update_all_packages(project_deps: List, apm_modules_path: Path):
|
||||||
|
"""Update all packages."""
|
||||||
|
if not project_deps:
|
||||||
|
_rich_info("No APM dependencies to update")
|
||||||
|
return
|
||||||
|
|
||||||
|
_rich_info(f"Updating {len(project_deps)} APM dependencies...")
|
||||||
|
|
||||||
|
downloader = GitHubPackageDownloader()
|
||||||
|
updated_count = 0
|
||||||
|
|
||||||
|
for dep in project_deps:
|
||||||
|
# Determine package directory
|
||||||
|
if dep.alias:
|
||||||
|
package_dir = apm_modules_path / dep.alias
|
||||||
|
else:
|
||||||
|
package_dir = apm_modules_path / dep.repo_url.split('/')[-1]
|
||||||
|
|
||||||
|
if not package_dir.exists():
|
||||||
|
_rich_warning(f"⚠️ {dep.repo_url} not installed - skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
_rich_info(f" Updating {dep.repo_url}...")
|
||||||
|
package_info = downloader.download_package(str(dep), package_dir)
|
||||||
|
updated_count += 1
|
||||||
|
_rich_success(f" ✅ {dep.repo_url}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
_rich_error(f" ❌ Failed to update {dep.repo_url}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
_rich_success(f"Updated {updated_count} of {len(project_deps)} packages")
|
||||||
|
|
||||||
29
src/apm_cli/compilation/__init__.py
Normal file
29
src/apm_cli/compilation/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
"""APM compilation module for generating AGENTS.md files."""
|
||||||
|
|
||||||
|
from .agents_compiler import AgentsCompiler, compile_agents_md, CompilationConfig, CompilationResult
|
||||||
|
from .template_builder import (
|
||||||
|
build_conditional_sections,
|
||||||
|
TemplateData,
|
||||||
|
find_chatmode_by_name
|
||||||
|
)
|
||||||
|
from .link_resolver import (
|
||||||
|
resolve_markdown_links,
|
||||||
|
validate_link_targets
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Main compilation interface
|
||||||
|
'AgentsCompiler',
|
||||||
|
'compile_agents_md',
|
||||||
|
'CompilationConfig',
|
||||||
|
'CompilationResult',
|
||||||
|
|
||||||
|
# Template building
|
||||||
|
'build_conditional_sections',
|
||||||
|
'TemplateData',
|
||||||
|
'find_chatmode_by_name',
|
||||||
|
|
||||||
|
# Link resolution
|
||||||
|
'resolve_markdown_links',
|
||||||
|
'validate_link_targets'
|
||||||
|
]
|
||||||
630
src/apm_cli/compilation/agents_compiler.py
Normal file
630
src/apm_cli/compilation/agents_compiler.py
Normal file
@@ -0,0 +1,630 @@
|
|||||||
|
"""Main compilation orchestration for AGENTS.md generation.
|
||||||
|
|
||||||
|
Timestamp generation removed in favor of deterministic Build ID handled after
|
||||||
|
full content assembly. This keeps repeated compiles byte-identical when source
|
||||||
|
primitives & constitution are unchanged.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Dict, Any
|
||||||
|
from ..primitives.models import PrimitiveCollection
|
||||||
|
from ..primitives.discovery import discover_primitives
|
||||||
|
from ..version import get_version
|
||||||
|
from .template_builder import (
|
||||||
|
build_conditional_sections,
|
||||||
|
generate_agents_md_template,
|
||||||
|
TemplateData,
|
||||||
|
find_chatmode_by_name
|
||||||
|
)
|
||||||
|
from .link_resolver import resolve_markdown_links, validate_link_targets
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CompilationConfig:
|
||||||
|
"""Configuration for AGENTS.md compilation."""
|
||||||
|
output_path: str = "AGENTS.md"
|
||||||
|
chatmode: Optional[str] = None
|
||||||
|
resolve_links: bool = True
|
||||||
|
dry_run: bool = False
|
||||||
|
with_constitution: bool = True # Phase 0 feature flag
|
||||||
|
|
||||||
|
# Distributed compilation settings (Task 7)
|
||||||
|
strategy: str = "distributed" # "distributed" or "single-file"
|
||||||
|
single_agents: bool = False # Force single-file mode
|
||||||
|
trace: bool = False # Show source attribution and conflicts
|
||||||
|
local_only: bool = False # Ignore dependencies, compile only local primitives
|
||||||
|
debug: bool = False # Show context optimizer analysis and metrics
|
||||||
|
min_instructions_per_file: int = 1 # Minimum instructions per AGENTS.md file (Minimal Context Principle)
|
||||||
|
source_attribution: bool = True # Include source file comments
|
||||||
|
clean_orphaned: bool = False # Remove orphaned AGENTS.md files
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Handle CLI flag precedence after initialization."""
|
||||||
|
if self.single_agents:
|
||||||
|
self.strategy = "single-file"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_apm_yml(cls, **overrides) -> 'CompilationConfig':
|
||||||
|
"""Create configuration from apm.yml with command-line overrides.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**overrides: Command-line arguments that override config file values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CompilationConfig: Configuration with apm.yml values and overrides applied.
|
||||||
|
"""
|
||||||
|
config = cls()
|
||||||
|
|
||||||
|
# Try to load from apm.yml
|
||||||
|
try:
|
||||||
|
from pathlib import Path
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
if Path('apm.yml').exists():
|
||||||
|
with open('apm.yml', 'r') as f:
|
||||||
|
apm_config = yaml.safe_load(f) or {}
|
||||||
|
|
||||||
|
# Look for compilation section
|
||||||
|
compilation_config = apm_config.get('compilation', {})
|
||||||
|
|
||||||
|
# Apply config file values
|
||||||
|
if 'output' in compilation_config:
|
||||||
|
config.output_path = compilation_config['output']
|
||||||
|
if 'chatmode' in compilation_config:
|
||||||
|
config.chatmode = compilation_config['chatmode']
|
||||||
|
if 'resolve_links' in compilation_config:
|
||||||
|
config.resolve_links = compilation_config['resolve_links']
|
||||||
|
|
||||||
|
# Distributed compilation settings (Task 7)
|
||||||
|
if 'strategy' in compilation_config:
|
||||||
|
config.strategy = compilation_config['strategy']
|
||||||
|
if 'single_file' in compilation_config:
|
||||||
|
# Legacy config support - if single_file is True, override strategy
|
||||||
|
if compilation_config['single_file']:
|
||||||
|
config.strategy = "single-file"
|
||||||
|
config.single_agents = True
|
||||||
|
|
||||||
|
# Placement settings
|
||||||
|
placement_config = compilation_config.get('placement', {})
|
||||||
|
if 'min_instructions_per_file' in placement_config:
|
||||||
|
config.min_instructions_per_file = placement_config['min_instructions_per_file']
|
||||||
|
|
||||||
|
# Source attribution
|
||||||
|
if 'source_attribution' in compilation_config:
|
||||||
|
config.source_attribution = compilation_config['source_attribution']
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# If config loading fails, use defaults
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Apply command-line overrides (highest priority)
|
||||||
|
for key, value in overrides.items():
|
||||||
|
if value is not None: # Only override if explicitly provided
|
||||||
|
setattr(config, key, value)
|
||||||
|
|
||||||
|
# Handle CLI flag precedence
|
||||||
|
if config.single_agents:
|
||||||
|
config.strategy = "single-file"
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CompilationResult:
|
||||||
|
"""Result of AGENTS.md compilation."""
|
||||||
|
success: bool
|
||||||
|
output_path: str
|
||||||
|
content: str
|
||||||
|
warnings: List[str]
|
||||||
|
errors: List[str]
|
||||||
|
stats: Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class AgentsCompiler:
|
||||||
|
"""Main compiler for generating AGENTS.md files."""
|
||||||
|
|
||||||
|
def __init__(self, base_dir: str = "."):
|
||||||
|
"""Initialize the compiler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory for compilation. Defaults to current directory.
|
||||||
|
"""
|
||||||
|
self.base_dir = Path(base_dir)
|
||||||
|
self.warnings: List[str] = []
|
||||||
|
self.errors: List[str] = []
|
||||||
|
|
||||||
|
def compile(self, config: CompilationConfig, primitives: Optional[PrimitiveCollection] = None) -> CompilationResult:
|
||||||
|
"""Compile AGENTS.md with the given configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
primitives (Optional[PrimitiveCollection]): Primitives to use, or None to discover.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CompilationResult: Result of the compilation.
|
||||||
|
"""
|
||||||
|
self.warnings.clear()
|
||||||
|
self.errors.clear()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use provided primitives or discover them (with dependency support)
|
||||||
|
if primitives is None:
|
||||||
|
if config.local_only:
|
||||||
|
# Use basic discovery for local-only mode
|
||||||
|
primitives = discover_primitives(str(self.base_dir))
|
||||||
|
else:
|
||||||
|
# Use enhanced discovery with dependencies (Task 4 integration)
|
||||||
|
from ..primitives.discovery import discover_primitives_with_dependencies
|
||||||
|
primitives = discover_primitives_with_dependencies(str(self.base_dir))
|
||||||
|
|
||||||
|
# Handle distributed compilation (Task 7 - new default behavior)
|
||||||
|
if config.strategy == "distributed" and not config.single_agents:
|
||||||
|
return self._compile_distributed(config, primitives)
|
||||||
|
else:
|
||||||
|
# Traditional single-file compilation (backward compatibility)
|
||||||
|
return self._compile_single_file(config, primitives)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.errors.append(f"Compilation failed: {str(e)}")
|
||||||
|
return CompilationResult(
|
||||||
|
success=False,
|
||||||
|
output_path="",
|
||||||
|
content="",
|
||||||
|
warnings=self.warnings.copy(),
|
||||||
|
errors=self.errors.copy(),
|
||||||
|
stats={}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _compile_distributed(self, config: CompilationConfig, primitives: PrimitiveCollection) -> CompilationResult:
|
||||||
|
"""Compile using distributed AGENTS.md approach (Task 7).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
primitives (PrimitiveCollection): Primitives to compile.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CompilationResult: Result of distributed compilation.
|
||||||
|
"""
|
||||||
|
from .distributed_compiler import DistributedAgentsCompiler
|
||||||
|
|
||||||
|
# Create distributed compiler
|
||||||
|
distributed_compiler = DistributedAgentsCompiler(str(self.base_dir))
|
||||||
|
|
||||||
|
# Prepare configuration for distributed compilation
|
||||||
|
distributed_config = {
|
||||||
|
'min_instructions_per_file': config.min_instructions_per_file,
|
||||||
|
# max_depth removed - full project analysis
|
||||||
|
'source_attribution': config.source_attribution,
|
||||||
|
'debug': config.debug,
|
||||||
|
'clean_orphaned': config.clean_orphaned,
|
||||||
|
'dry_run': config.dry_run
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compile distributed
|
||||||
|
distributed_result = distributed_compiler.compile_distributed(primitives, distributed_config)
|
||||||
|
|
||||||
|
# Display professional compilation output (always show, not just in debug)
|
||||||
|
compilation_results = distributed_compiler.get_compilation_results_for_display(config.dry_run)
|
||||||
|
if compilation_results:
|
||||||
|
if config.debug or config.trace:
|
||||||
|
# Verbose mode with mathematical analysis
|
||||||
|
output = distributed_compiler.output_formatter.format_verbose(compilation_results)
|
||||||
|
elif config.dry_run:
|
||||||
|
# Dry run mode with placement preview
|
||||||
|
output = distributed_compiler.output_formatter.format_dry_run(compilation_results)
|
||||||
|
else:
|
||||||
|
# Default mode with essential information
|
||||||
|
output = distributed_compiler.output_formatter.format_default(compilation_results)
|
||||||
|
|
||||||
|
# Display the professional output
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
if not distributed_result.success:
|
||||||
|
self.warnings.extend(distributed_result.warnings)
|
||||||
|
self.errors.extend(distributed_result.errors)
|
||||||
|
return CompilationResult(
|
||||||
|
success=False,
|
||||||
|
output_path="",
|
||||||
|
content="",
|
||||||
|
warnings=self.warnings.copy(),
|
||||||
|
errors=self.errors.copy(),
|
||||||
|
stats=distributed_result.stats
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle dry-run mode (preview placement without writing files)
|
||||||
|
if config.dry_run:
|
||||||
|
# Count files that would be written (directories that exist)
|
||||||
|
successful_writes = 0
|
||||||
|
for agents_path in distributed_result.content_map.keys():
|
||||||
|
if agents_path.parent.exists():
|
||||||
|
successful_writes += 1
|
||||||
|
|
||||||
|
# Update stats with actual files that would be written
|
||||||
|
if distributed_result.stats:
|
||||||
|
distributed_result.stats["agents_files_generated"] = successful_writes
|
||||||
|
|
||||||
|
# Don't write files in preview mode - output already shown above
|
||||||
|
return CompilationResult(
|
||||||
|
success=True,
|
||||||
|
output_path="Preview mode - no files written",
|
||||||
|
content=self._generate_placement_summary(distributed_result),
|
||||||
|
warnings=distributed_result.warnings,
|
||||||
|
errors=distributed_result.errors,
|
||||||
|
stats=distributed_result.stats
|
||||||
|
)
|
||||||
|
|
||||||
|
# Write distributed AGENTS.md files
|
||||||
|
successful_writes = 0
|
||||||
|
total_content_entries = len(distributed_result.content_map)
|
||||||
|
|
||||||
|
for agents_path, content in distributed_result.content_map.items():
|
||||||
|
try:
|
||||||
|
self._write_distributed_file(agents_path, content, config)
|
||||||
|
successful_writes += 1
|
||||||
|
except OSError as e:
|
||||||
|
self.errors.append(f"Failed to write {agents_path}: {str(e)}")
|
||||||
|
|
||||||
|
# Update stats with actual files written
|
||||||
|
if distributed_result.stats:
|
||||||
|
distributed_result.stats["agents_files_generated"] = successful_writes
|
||||||
|
|
||||||
|
# Merge warnings and errors
|
||||||
|
self.warnings.extend(distributed_result.warnings)
|
||||||
|
self.errors.extend(distributed_result.errors)
|
||||||
|
|
||||||
|
# Create summary for backward compatibility
|
||||||
|
summary_content = self._generate_distributed_summary(distributed_result, config)
|
||||||
|
|
||||||
|
return CompilationResult(
|
||||||
|
success=len(self.errors) == 0,
|
||||||
|
output_path=f"Distributed: {len(distributed_result.placements)} AGENTS.md files",
|
||||||
|
content=summary_content,
|
||||||
|
warnings=self.warnings.copy(),
|
||||||
|
errors=self.errors.copy(),
|
||||||
|
stats=distributed_result.stats
|
||||||
|
)
|
||||||
|
|
||||||
|
def _compile_single_file(self, config: CompilationConfig, primitives: PrimitiveCollection) -> CompilationResult:
|
||||||
|
"""Compile using traditional single-file approach (backward compatibility).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
primitives (PrimitiveCollection): Primitives to compile.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CompilationResult: Result of single-file compilation.
|
||||||
|
"""
|
||||||
|
# Validate primitives
|
||||||
|
validation_errors = self.validate_primitives(primitives)
|
||||||
|
if validation_errors:
|
||||||
|
self.errors.extend(validation_errors)
|
||||||
|
|
||||||
|
# Generate template data
|
||||||
|
template_data = self._generate_template_data(primitives, config)
|
||||||
|
|
||||||
|
# Generate final output
|
||||||
|
content = self.generate_output(template_data, config)
|
||||||
|
|
||||||
|
# Write output file (constitution injection handled externally in CLI)
|
||||||
|
output_path = str(self.base_dir / config.output_path)
|
||||||
|
if not config.dry_run:
|
||||||
|
self._write_output_file(output_path, content)
|
||||||
|
|
||||||
|
# Compile statistics
|
||||||
|
stats = self._compile_stats(primitives, template_data)
|
||||||
|
|
||||||
|
return CompilationResult(
|
||||||
|
success=len(self.errors) == 0,
|
||||||
|
output_path=output_path,
|
||||||
|
content=content,
|
||||||
|
warnings=self.warnings.copy(),
|
||||||
|
errors=self.errors.copy(),
|
||||||
|
stats=stats
|
||||||
|
)
|
||||||
|
|
||||||
|
def validate_primitives(self, primitives: PrimitiveCollection) -> List[str]:
|
||||||
|
"""Validate primitives for compilation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
primitives (PrimitiveCollection): Collection of primitives to validate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation errors.
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Validate each primitive
|
||||||
|
for primitive in primitives.all_primitives():
|
||||||
|
primitive_errors = primitive.validate()
|
||||||
|
if primitive_errors:
|
||||||
|
try:
|
||||||
|
# Try to get relative path, but fall back to absolute if it fails
|
||||||
|
file_path = str(primitive.file_path.relative_to(self.base_dir))
|
||||||
|
except ValueError:
|
||||||
|
# File is outside base_dir, use absolute path
|
||||||
|
file_path = str(primitive.file_path)
|
||||||
|
|
||||||
|
for error in primitive_errors:
|
||||||
|
# Treat validation errors as warnings instead of hard errors
|
||||||
|
# This allows compilation to continue with incomplete primitives
|
||||||
|
self.warnings.append(f"{file_path}: {error}")
|
||||||
|
|
||||||
|
# Validate markdown links in each primitive's content using its own directory as base
|
||||||
|
if hasattr(primitive, 'content') and primitive.content:
|
||||||
|
primitive_dir = primitive.file_path.parent
|
||||||
|
link_errors = validate_link_targets(primitive.content, primitive_dir)
|
||||||
|
if link_errors:
|
||||||
|
try:
|
||||||
|
file_path = str(primitive.file_path.relative_to(self.base_dir))
|
||||||
|
except ValueError:
|
||||||
|
file_path = str(primitive.file_path)
|
||||||
|
|
||||||
|
for link_error in link_errors:
|
||||||
|
self.warnings.append(f"{file_path}: {link_error}")
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
||||||
|
def generate_output(self, template_data: TemplateData, config: CompilationConfig) -> str:
|
||||||
|
"""Generate the final AGENTS.md output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
template_data (TemplateData): Data for template generation.
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Generated AGENTS.md content.
|
||||||
|
"""
|
||||||
|
content = generate_agents_md_template(template_data)
|
||||||
|
|
||||||
|
# Resolve markdown links if enabled
|
||||||
|
if config.resolve_links:
|
||||||
|
content = resolve_markdown_links(content, self.base_dir)
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
|
def _generate_template_data(self, primitives: PrimitiveCollection, config: CompilationConfig) -> TemplateData:
|
||||||
|
"""Generate template data from primitives and configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
primitives (PrimitiveCollection): Discovered primitives.
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
TemplateData: Template data for generation.
|
||||||
|
"""
|
||||||
|
# Build instructions content
|
||||||
|
instructions_content = build_conditional_sections(primitives.instructions)
|
||||||
|
|
||||||
|
# Metadata (version only; timestamp intentionally omitted for determinism)
|
||||||
|
version = get_version()
|
||||||
|
|
||||||
|
# Handle chatmode content
|
||||||
|
chatmode_content = None
|
||||||
|
if config.chatmode:
|
||||||
|
chatmode = find_chatmode_by_name(primitives.chatmodes, config.chatmode)
|
||||||
|
if chatmode:
|
||||||
|
chatmode_content = chatmode.content
|
||||||
|
else:
|
||||||
|
self.warnings.append(f"Chatmode '{config.chatmode}' not found")
|
||||||
|
|
||||||
|
return TemplateData(
|
||||||
|
instructions_content=instructions_content,
|
||||||
|
version=version,
|
||||||
|
chatmode_content=chatmode_content
|
||||||
|
)
|
||||||
|
|
||||||
|
def _write_output_file(self, output_path: str, content: str) -> None:
|
||||||
|
"""Write the generated content to the output file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
output_path (str): Path to write the output.
|
||||||
|
content (str): Content to write.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(output_path, 'w', encoding='utf-8') as f:
|
||||||
|
f.write(content)
|
||||||
|
except OSError as e:
|
||||||
|
self.errors.append(f"Failed to write output file {output_path}: {str(e)}")
|
||||||
|
|
||||||
|
def _compile_stats(self, primitives: PrimitiveCollection, template_data: TemplateData) -> Dict[str, Any]:
|
||||||
|
"""Compile statistics about the compilation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
primitives (PrimitiveCollection): Discovered primitives.
|
||||||
|
template_data (TemplateData): Generated template data.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: Compilation statistics.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"primitives_found": primitives.count(),
|
||||||
|
"chatmodes": len(primitives.chatmodes),
|
||||||
|
"instructions": len(primitives.instructions),
|
||||||
|
"contexts": len(primitives.contexts),
|
||||||
|
"content_length": len(template_data.instructions_content),
|
||||||
|
# timestamp removed
|
||||||
|
"version": template_data.version
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _write_distributed_file(self, agents_path: Path, content: str, config: CompilationConfig) -> None:
|
||||||
|
"""Write a distributed AGENTS.md file with constitution injection support.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents_path (Path): Path to write the AGENTS.md file.
|
||||||
|
content (str): Content to write.
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Handle constitution injection for distributed files
|
||||||
|
final_content = content
|
||||||
|
|
||||||
|
if config.with_constitution:
|
||||||
|
# Try to inject constitution if available
|
||||||
|
try:
|
||||||
|
from .injector import ConstitutionInjector
|
||||||
|
injector = ConstitutionInjector(str(agents_path.parent))
|
||||||
|
final_content, c_status, c_hash = injector.inject(
|
||||||
|
content,
|
||||||
|
with_constitution=True,
|
||||||
|
output_path=agents_path
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# If constitution injection fails, use original content
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Create directory if it doesn't exist
|
||||||
|
agents_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Write the file
|
||||||
|
with open(agents_path, 'w', encoding='utf-8') as f:
|
||||||
|
f.write(final_content)
|
||||||
|
|
||||||
|
except OSError as e:
|
||||||
|
raise OSError(f"Failed to write distributed AGENTS.md file {agents_path}: {str(e)}")
|
||||||
|
|
||||||
|
def _display_placement_preview(self, distributed_result) -> None:
|
||||||
|
"""Display placement preview for --show-placement mode.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
distributed_result: Result from distributed compilation.
|
||||||
|
"""
|
||||||
|
print("🔍 Distributed AGENTS.md Placement Preview:")
|
||||||
|
print()
|
||||||
|
|
||||||
|
for placement in distributed_result.placements:
|
||||||
|
try:
|
||||||
|
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||||
|
except ValueError:
|
||||||
|
# Fallback for path resolution issues
|
||||||
|
rel_path = placement.agents_path
|
||||||
|
print(f"📄 {rel_path}")
|
||||||
|
print(f" Instructions: {len(placement.instructions)}")
|
||||||
|
print(f" Patterns: {', '.join(sorted(placement.coverage_patterns))}")
|
||||||
|
if placement.source_attribution:
|
||||||
|
sources = set(placement.source_attribution.values())
|
||||||
|
print(f" Sources: {', '.join(sorted(sources))}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
def _display_trace_info(self, distributed_result, primitives: PrimitiveCollection) -> None:
|
||||||
|
"""Display detailed trace information for --trace mode.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
distributed_result: Result from distributed compilation.
|
||||||
|
primitives (PrimitiveCollection): Full primitive collection.
|
||||||
|
"""
|
||||||
|
print("🔍 Distributed Compilation Trace:")
|
||||||
|
print()
|
||||||
|
|
||||||
|
for placement in distributed_result.placements:
|
||||||
|
try:
|
||||||
|
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||||
|
except ValueError:
|
||||||
|
rel_path = placement.agents_path
|
||||||
|
print(f"📄 {rel_path}")
|
||||||
|
|
||||||
|
for instruction in placement.instructions:
|
||||||
|
source = getattr(instruction, 'source', 'local')
|
||||||
|
try:
|
||||||
|
inst_path = instruction.file_path.relative_to(self.base_dir.resolve())
|
||||||
|
except ValueError:
|
||||||
|
inst_path = instruction.file_path
|
||||||
|
|
||||||
|
print(f" • {instruction.apply_to or 'no pattern'} <- {source} {inst_path}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
def _generate_placement_summary(self, distributed_result) -> str:
|
||||||
|
"""Generate a text summary of placement results.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
distributed_result: Result from distributed compilation.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Text summary of placements.
|
||||||
|
"""
|
||||||
|
lines = ["Distributed AGENTS.md Placement Summary:", ""]
|
||||||
|
|
||||||
|
for placement in distributed_result.placements:
|
||||||
|
try:
|
||||||
|
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||||
|
except ValueError:
|
||||||
|
rel_path = placement.agents_path
|
||||||
|
lines.append(f"📄 {rel_path}")
|
||||||
|
lines.append(f" Instructions: {len(placement.instructions)}")
|
||||||
|
lines.append(f" Patterns: {', '.join(sorted(placement.coverage_patterns))}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
lines.append(f"Total AGENTS.md files: {len(distributed_result.placements)}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def _generate_distributed_summary(self, distributed_result, config: CompilationConfig) -> str:
|
||||||
|
"""Generate a summary of distributed compilation results.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
distributed_result: Result from distributed compilation.
|
||||||
|
config (CompilationConfig): Compilation configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Summary content.
|
||||||
|
"""
|
||||||
|
lines = [
|
||||||
|
"# Distributed AGENTS.md Compilation Summary",
|
||||||
|
"",
|
||||||
|
f"Generated {len(distributed_result.placements)} AGENTS.md files:",
|
||||||
|
""
|
||||||
|
]
|
||||||
|
|
||||||
|
for placement in distributed_result.placements:
|
||||||
|
try:
|
||||||
|
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
|
||||||
|
except ValueError:
|
||||||
|
rel_path = placement.agents_path
|
||||||
|
lines.append(f"- {rel_path} ({len(placement.instructions)} instructions)")
|
||||||
|
|
||||||
|
lines.extend([
|
||||||
|
"",
|
||||||
|
f"Total instructions: {distributed_result.stats.get('total_instructions_placed', 0)}",
|
||||||
|
f"Total patterns: {distributed_result.stats.get('total_patterns_covered', 0)}",
|
||||||
|
"",
|
||||||
|
"Use 'apm compile --single-agents' for traditional single-file compilation."
|
||||||
|
])
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def compile_agents_md(
|
||||||
|
primitives: Optional[PrimitiveCollection] = None,
|
||||||
|
output_path: str = "AGENTS.md",
|
||||||
|
chatmode: Optional[str] = None,
|
||||||
|
dry_run: bool = False,
|
||||||
|
base_dir: str = "."
|
||||||
|
) -> str:
|
||||||
|
"""Generate AGENTS.md with conditional sections.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
primitives (Optional[PrimitiveCollection]): Primitives to use, or None to discover.
|
||||||
|
output_path (str): Output file path. Defaults to "AGENTS.md".
|
||||||
|
chatmode (str): Specific chatmode to use, or None for default.
|
||||||
|
dry_run (bool): If True, don't write output file. Defaults to False.
|
||||||
|
base_dir (str): Base directory for compilation. Defaults to current directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Generated AGENTS.md content.
|
||||||
|
"""
|
||||||
|
# Create configuration - use single-file mode for backward compatibility
|
||||||
|
config = CompilationConfig(
|
||||||
|
output_path=output_path,
|
||||||
|
chatmode=chatmode,
|
||||||
|
dry_run=dry_run,
|
||||||
|
strategy="single-file" # Force single-file mode for backward compatibility
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create compiler and compile
|
||||||
|
compiler = AgentsCompiler(base_dir)
|
||||||
|
result = compiler.compile(config, primitives)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
raise RuntimeError(f"Compilation failed: {'; '.join(result.errors)}")
|
||||||
|
|
||||||
|
return result.content
|
||||||
18
src/apm_cli/compilation/constants.py
Normal file
18
src/apm_cli/compilation/constants.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
"""Shared constants for compilation extensions (constitution injection, etc.).
|
||||||
|
|
||||||
|
Also contains shared markers for build metadata stabilization. We intentionally
|
||||||
|
avoid timestamps in generated artifacts to guarantee byte-level idempotency; a
|
||||||
|
deterministic Build ID (content hash) is substituted post-generation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Constitution injection markers
|
||||||
|
CONSTITUTION_MARKER_BEGIN = "<!-- SPEC-KIT CONSTITUTION: BEGIN -->"
|
||||||
|
CONSTITUTION_MARKER_END = "<!-- SPEC-KIT CONSTITUTION: END -->"
|
||||||
|
CONSTITUTION_RELATIVE_PATH = ".specify/memory/constitution.md" # repo-root relative
|
||||||
|
|
||||||
|
# Build ID placeholder & regex pattern (line-level). The placeholder line is
|
||||||
|
# inserted during initial template generation; after all transformations
|
||||||
|
# (constitution injection, link resolution, etc.) we compute a SHA256 of the
|
||||||
|
# final content with this line removed and then replace it with the truncated
|
||||||
|
# hash. This ensures the hash is not self-referential and remains stable.
|
||||||
|
BUILD_ID_PLACEHOLDER = "<!-- Build ID: __BUILD_ID__ -->"
|
||||||
33
src/apm_cli/compilation/constitution.py
Normal file
33
src/apm_cli/compilation/constitution.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
"""Utilities for reading Spec Kit style constitution file."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from .constants import CONSTITUTION_RELATIVE_PATH
|
||||||
|
|
||||||
|
|
||||||
|
def find_constitution(base_dir: Path) -> Path:
|
||||||
|
"""Return path to constitution.md if present, else Path that does not exist.
|
||||||
|
|
||||||
|
We keep logic trivial for Phase 0: fixed location under memory/.
|
||||||
|
Later phases may support multiple shards / namespacing.
|
||||||
|
"""
|
||||||
|
return base_dir / CONSTITUTION_RELATIVE_PATH
|
||||||
|
|
||||||
|
|
||||||
|
def read_constitution(base_dir: Path) -> Optional[str]:
|
||||||
|
"""Read full constitution content if file exists.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir: Repository root path.
|
||||||
|
Returns:
|
||||||
|
Full file text or None if absent.
|
||||||
|
"""
|
||||||
|
path = find_constitution(base_dir)
|
||||||
|
if not path.exists() or not path.is_file():
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
return path.read_text(encoding="utf-8")
|
||||||
|
except OSError:
|
||||||
|
return None
|
||||||
96
src/apm_cli/compilation/constitution_block.py
Normal file
96
src/apm_cli/compilation/constitution_block.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
"""Rendering & parsing of injected constitution block in AGENTS.md."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from .constants import (
|
||||||
|
CONSTITUTION_MARKER_BEGIN,
|
||||||
|
CONSTITUTION_MARKER_END,
|
||||||
|
CONSTITUTION_RELATIVE_PATH,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
HASH_PREFIX = "hash:"
|
||||||
|
|
||||||
|
|
||||||
|
def compute_constitution_hash(content: str) -> str:
|
||||||
|
"""Compute stable truncated SHA256 hash of full constitution content."""
|
||||||
|
sha = hashlib.sha256(content.encode("utf-8"))
|
||||||
|
return sha.hexdigest()[:12]
|
||||||
|
|
||||||
|
|
||||||
|
def render_block(constitution_content: str) -> str:
|
||||||
|
"""Render full constitution block with markers and hash line.
|
||||||
|
|
||||||
|
The block mirrors spec requirement: entire file as-is within markers.
|
||||||
|
"""
|
||||||
|
h = compute_constitution_hash(constitution_content)
|
||||||
|
header_meta = f"{HASH_PREFIX} {h} path: {CONSTITUTION_RELATIVE_PATH}"
|
||||||
|
# Ensure trailing newline for clean separation from compiled content
|
||||||
|
body = constitution_content.rstrip() + "\n"
|
||||||
|
return (
|
||||||
|
f"{CONSTITUTION_MARKER_BEGIN}\n"
|
||||||
|
f"{header_meta}\n"
|
||||||
|
f"{body}"
|
||||||
|
f"{CONSTITUTION_MARKER_END}\n"
|
||||||
|
"\n" # blank line after block
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExistingBlock:
|
||||||
|
raw: str
|
||||||
|
hash: Optional[str]
|
||||||
|
start_index: int
|
||||||
|
end_index: int
|
||||||
|
|
||||||
|
|
||||||
|
BLOCK_REGEX = re.compile(
|
||||||
|
rf"({re.escape(CONSTITUTION_MARKER_BEGIN)})(.*?)({re.escape(CONSTITUTION_MARKER_END)})",
|
||||||
|
re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
|
HASH_LINE_REGEX = re.compile(r"hash:\s*([0-9a-fA-F]{6,64})")
|
||||||
|
|
||||||
|
|
||||||
|
def find_existing_block(content: str) -> Optional[ExistingBlock]:
|
||||||
|
"""Locate existing constitution block and extract its hash if present."""
|
||||||
|
match = BLOCK_REGEX.search(content)
|
||||||
|
if not match:
|
||||||
|
return None
|
||||||
|
block_text = match.group(0)
|
||||||
|
hash_match = HASH_LINE_REGEX.search(block_text)
|
||||||
|
h = hash_match.group(1) if hash_match else None
|
||||||
|
return ExistingBlock(raw=block_text, hash=h, start_index=match.start(), end_index=match.end())
|
||||||
|
|
||||||
|
|
||||||
|
def inject_or_update(existing_agents: str, new_block: str, place_top: bool = True) -> tuple[str, str]:
|
||||||
|
"""Insert or update constitution block in existing AGENTS.md content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
existing_agents: Current AGENTS.md text (may be empty).
|
||||||
|
new_block: Rendered constitution block (already ends with newline).
|
||||||
|
place_top: Always True for Phase 0 (prepend at top).
|
||||||
|
Returns:
|
||||||
|
(updated_text, status) where status in CREATED|UPDATED|UNCHANGED.
|
||||||
|
"""
|
||||||
|
existing_block = find_existing_block(existing_agents)
|
||||||
|
if existing_block:
|
||||||
|
if existing_block.raw == new_block.rstrip(): # exclude trailing blank block newline
|
||||||
|
return existing_agents, "UNCHANGED"
|
||||||
|
# Replace existing block span with new block
|
||||||
|
updated = existing_agents[: existing_block.start_index] + new_block.rstrip() + existing_agents[existing_block.end_index :]
|
||||||
|
# Ensure trailing newline after block + rest
|
||||||
|
if not updated.startswith(new_block):
|
||||||
|
# If markers were not at top previously and we want top placement, move them
|
||||||
|
if place_top:
|
||||||
|
body_without_block = updated.replace(new_block.rstrip(), "").lstrip("\n")
|
||||||
|
updated = new_block + body_without_block
|
||||||
|
return updated, "UPDATED"
|
||||||
|
# No existing block
|
||||||
|
if place_top:
|
||||||
|
return new_block + existing_agents.lstrip("\n"), "CREATED"
|
||||||
|
return existing_agents + ("\n" if not existing_agents.endswith("\n") else "") + new_block, "CREATED"
|
||||||
1163
src/apm_cli/compilation/context_optimizer.py
Normal file
1163
src/apm_cli/compilation/context_optimizer.py
Normal file
File diff suppressed because it is too large
Load Diff
685
src/apm_cli/compilation/distributed_compiler.py
Normal file
685
src/apm_cli/compilation/distributed_compiler.py
Normal file
@@ -0,0 +1,685 @@
|
|||||||
|
"""Distributed AGENTS.md compilation system following the Minimal Context Principle.
|
||||||
|
|
||||||
|
This module implements hierarchical directory-based distribution to generate multiple
|
||||||
|
AGENTS.md files across a project's directory structure, following the AGENTS.md standard
|
||||||
|
for nested agent context files.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Set, Tuple
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
from ..primitives.models import Instruction, PrimitiveCollection
|
||||||
|
from ..version import get_version
|
||||||
|
from .template_builder import TemplateData, find_chatmode_by_name
|
||||||
|
from .constants import BUILD_ID_PLACEHOLDER
|
||||||
|
from .context_optimizer import ContextOptimizer
|
||||||
|
from ..output.formatters import CompilationFormatter
|
||||||
|
from ..output.models import CompilationResults
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DirectoryMap:
|
||||||
|
"""Mapping of directory structure analysis."""
|
||||||
|
directories: Dict[Path, Set[str]] # directory -> set of applicable file patterns
|
||||||
|
depth_map: Dict[Path, int] # directory -> depth level
|
||||||
|
parent_map: Dict[Path, Optional[Path]] # directory -> parent directory
|
||||||
|
|
||||||
|
def get_max_depth(self) -> int:
|
||||||
|
"""Get maximum depth in the directory structure."""
|
||||||
|
return max(self.depth_map.values()) if self.depth_map else 0
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PlacementResult:
|
||||||
|
"""Result of AGENTS.md placement analysis."""
|
||||||
|
agents_path: Path
|
||||||
|
instructions: List[Instruction]
|
||||||
|
inherited_instructions: List[Instruction] = field(default_factory=list)
|
||||||
|
coverage_patterns: Set[str] = field(default_factory=set)
|
||||||
|
source_attribution: Dict[str, str] = field(default_factory=dict) # instruction_id -> source
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CompilationResult:
|
||||||
|
"""Result of distributed AGENTS.md compilation."""
|
||||||
|
success: bool
|
||||||
|
placements: List[PlacementResult]
|
||||||
|
content_map: Dict[Path, str] # agents_path -> content
|
||||||
|
warnings: List[str] = field(default_factory=list)
|
||||||
|
errors: List[str] = field(default_factory=list)
|
||||||
|
stats: Dict[str, float] = field(default_factory=dict) # Support optimization metrics
|
||||||
|
|
||||||
|
|
||||||
|
class DistributedAgentsCompiler:
|
||||||
|
"""Main compiler for generating distributed AGENTS.md files."""
|
||||||
|
|
||||||
|
def __init__(self, base_dir: str = "."):
|
||||||
|
"""Initialize the distributed AGENTS.md compiler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory for compilation.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.base_dir = Path(base_dir).resolve()
|
||||||
|
except (OSError, FileNotFoundError):
|
||||||
|
self.base_dir = Path(base_dir).absolute()
|
||||||
|
|
||||||
|
self.warnings: List[str] = []
|
||||||
|
self.errors: List[str] = []
|
||||||
|
self.total_files_written = 0
|
||||||
|
self.context_optimizer = ContextOptimizer(str(self.base_dir))
|
||||||
|
self.output_formatter = CompilationFormatter()
|
||||||
|
self._placement_map = None
|
||||||
|
|
||||||
|
def compile_distributed(
|
||||||
|
self,
|
||||||
|
primitives: PrimitiveCollection,
|
||||||
|
config: Optional[dict] = None
|
||||||
|
) -> CompilationResult:
|
||||||
|
"""Compile primitives into distributed AGENTS.md files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
primitives (PrimitiveCollection): Collection of primitives to compile.
|
||||||
|
config (Optional[dict]): Configuration for distributed compilation.
|
||||||
|
- clean_orphaned (bool): Remove orphaned AGENTS.md files. Default: False
|
||||||
|
- dry_run (bool): Preview mode, don't write files. Default: False
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CompilationResult: Result of the distributed compilation.
|
||||||
|
"""
|
||||||
|
self.warnings.clear()
|
||||||
|
self.errors.clear()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Configuration with defaults aligned to Minimal Context Principle
|
||||||
|
config = config or {}
|
||||||
|
min_instructions = config.get('min_instructions_per_file', 1) # Default to 1 for minimal context
|
||||||
|
source_attribution = config.get('source_attribution', True)
|
||||||
|
debug = config.get('debug', False)
|
||||||
|
clean_orphaned = config.get('clean_orphaned', False)
|
||||||
|
dry_run = config.get('dry_run', False)
|
||||||
|
|
||||||
|
# Phase 1: Directory structure analysis
|
||||||
|
directory_map = self.analyze_directory_structure(primitives.instructions)
|
||||||
|
|
||||||
|
# Phase 2: Determine optimal AGENTS.md placement
|
||||||
|
placement_map = self.determine_agents_placement(
|
||||||
|
primitives.instructions,
|
||||||
|
directory_map,
|
||||||
|
min_instructions=min_instructions,
|
||||||
|
debug=debug
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase 3: Generate distributed AGENTS.md files
|
||||||
|
placements = self.generate_distributed_agents_files(
|
||||||
|
placement_map,
|
||||||
|
primitives,
|
||||||
|
source_attribution=source_attribution
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase 4: Handle orphaned file cleanup
|
||||||
|
generated_paths = [p.agents_path for p in placements]
|
||||||
|
orphaned_files = self._find_orphaned_agents_files(generated_paths)
|
||||||
|
|
||||||
|
if orphaned_files:
|
||||||
|
# Always show warnings about orphaned files
|
||||||
|
warning_messages = self._generate_orphan_warnings(orphaned_files)
|
||||||
|
if warning_messages:
|
||||||
|
self.warnings.extend(warning_messages)
|
||||||
|
|
||||||
|
# Only perform actual cleanup if not dry_run and clean_orphaned is True
|
||||||
|
if not dry_run and clean_orphaned:
|
||||||
|
cleanup_messages = self._cleanup_orphaned_files(orphaned_files, dry_run=False)
|
||||||
|
if cleanup_messages:
|
||||||
|
self.warnings.extend(cleanup_messages)
|
||||||
|
|
||||||
|
# Phase 5: Validate coverage
|
||||||
|
coverage_validation = self._validate_coverage(placements, primitives.instructions)
|
||||||
|
if coverage_validation:
|
||||||
|
self.warnings.extend(coverage_validation)
|
||||||
|
|
||||||
|
# Compile statistics
|
||||||
|
stats = self._compile_distributed_stats(placements, primitives)
|
||||||
|
|
||||||
|
return CompilationResult(
|
||||||
|
success=len(self.errors) == 0,
|
||||||
|
placements=placements,
|
||||||
|
content_map={p.agents_path: self._generate_agents_content(p, primitives) for p in placements},
|
||||||
|
warnings=self.warnings.copy(),
|
||||||
|
errors=self.errors.copy(),
|
||||||
|
stats=stats
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.errors.append(f"Distributed compilation failed: {str(e)}")
|
||||||
|
return CompilationResult(
|
||||||
|
success=False,
|
||||||
|
placements=[],
|
||||||
|
content_map={},
|
||||||
|
warnings=self.warnings.copy(),
|
||||||
|
errors=self.errors.copy(),
|
||||||
|
stats={}
|
||||||
|
)
|
||||||
|
|
||||||
|
def analyze_directory_structure(self, instructions: List[Instruction]) -> DirectoryMap:
|
||||||
|
"""Analyze project directory structure based on instruction patterns.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
instructions (List[Instruction]): List of instructions to analyze.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DirectoryMap: Analysis of the directory structure.
|
||||||
|
"""
|
||||||
|
directories: Dict[Path, Set[str]] = defaultdict(set)
|
||||||
|
depth_map: Dict[Path, int] = {}
|
||||||
|
parent_map: Dict[Path, Optional[Path]] = {}
|
||||||
|
|
||||||
|
# Analyze each instruction's applyTo pattern
|
||||||
|
for instruction in instructions:
|
||||||
|
if not instruction.apply_to:
|
||||||
|
continue
|
||||||
|
|
||||||
|
pattern = instruction.apply_to
|
||||||
|
|
||||||
|
# Extract directory paths from pattern
|
||||||
|
dirs = self._extract_directories_from_pattern(pattern)
|
||||||
|
|
||||||
|
for dir_path in dirs:
|
||||||
|
abs_dir = self.base_dir / dir_path
|
||||||
|
directories[abs_dir].add(pattern)
|
||||||
|
|
||||||
|
# Calculate depth and parent relationships
|
||||||
|
depth = len(abs_dir.relative_to(self.base_dir).parts)
|
||||||
|
depth_map[abs_dir] = depth
|
||||||
|
|
||||||
|
if depth > 0:
|
||||||
|
parent_dir = abs_dir.parent
|
||||||
|
parent_map[abs_dir] = parent_dir
|
||||||
|
# Ensure parent is also tracked
|
||||||
|
if parent_dir not in directories:
|
||||||
|
directories[parent_dir] = set()
|
||||||
|
else:
|
||||||
|
parent_map[abs_dir] = None
|
||||||
|
|
||||||
|
# Add base directory
|
||||||
|
directories[self.base_dir].update(instruction.apply_to for instruction in instructions if instruction.apply_to)
|
||||||
|
depth_map[self.base_dir] = 0
|
||||||
|
parent_map[self.base_dir] = None
|
||||||
|
|
||||||
|
return DirectoryMap(
|
||||||
|
directories=dict(directories),
|
||||||
|
depth_map=depth_map,
|
||||||
|
parent_map=parent_map
|
||||||
|
)
|
||||||
|
|
||||||
|
def determine_agents_placement(
|
||||||
|
self,
|
||||||
|
instructions: List[Instruction],
|
||||||
|
directory_map: DirectoryMap,
|
||||||
|
min_instructions: int = 1,
|
||||||
|
debug: bool = False
|
||||||
|
) -> Dict[Path, List[Instruction]]:
|
||||||
|
"""Determine optimal AGENTS.md file placement using Context Optimization Engine.
|
||||||
|
|
||||||
|
Following the Minimal Context Principle and Context Optimization, creates
|
||||||
|
focused AGENTS.md files that minimize context pollution while maximizing
|
||||||
|
relevance for agents working in specific directories.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
instructions (List[Instruction]): List of instructions to place.
|
||||||
|
directory_map (DirectoryMap): Directory structure analysis.
|
||||||
|
min_instructions (int): Minimum instructions (default 1 for minimal context).
|
||||||
|
max_depth (int): Maximum depth for placement.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[Path, List[Instruction]]: Optimized mapping of directory paths to instructions.
|
||||||
|
"""
|
||||||
|
# Use the Context Optimization Engine for intelligent placement
|
||||||
|
optimized_placement = self.context_optimizer.optimize_instruction_placement(
|
||||||
|
instructions,
|
||||||
|
verbose=debug,
|
||||||
|
enable_timing=debug # Enable timing when debug mode is on
|
||||||
|
)
|
||||||
|
|
||||||
|
# Special case: if no instructions but constitution exists, create root placement
|
||||||
|
if not optimized_placement:
|
||||||
|
from .constitution import find_constitution
|
||||||
|
constitution_path = find_constitution(Path(self.base_dir))
|
||||||
|
if constitution_path.exists():
|
||||||
|
# Create an empty placement for the root directory to enable verbose output
|
||||||
|
optimized_placement = {Path(self.base_dir): []}
|
||||||
|
|
||||||
|
# Store optimization results for output formatting later
|
||||||
|
# Update with proper dry run status in the final result
|
||||||
|
self._placement_map = optimized_placement
|
||||||
|
|
||||||
|
# Remove the verbose warning log - we'll show this in professional output instead
|
||||||
|
|
||||||
|
# Filter out directories with too few instructions if specified
|
||||||
|
if min_instructions > 1:
|
||||||
|
filtered_placement = {}
|
||||||
|
for dir_path, dir_instructions in optimized_placement.items():
|
||||||
|
if len(dir_instructions) >= min_instructions or dir_path == self.base_dir:
|
||||||
|
filtered_placement[dir_path] = dir_instructions
|
||||||
|
else:
|
||||||
|
# Move instructions to parent directory
|
||||||
|
parent_dir = dir_path.parent if dir_path != self.base_dir else self.base_dir
|
||||||
|
if parent_dir not in filtered_placement:
|
||||||
|
filtered_placement[parent_dir] = []
|
||||||
|
filtered_placement[parent_dir].extend(dir_instructions)
|
||||||
|
|
||||||
|
return filtered_placement
|
||||||
|
|
||||||
|
return optimized_placement
|
||||||
|
|
||||||
|
def generate_distributed_agents_files(
|
||||||
|
self,
|
||||||
|
placement_map: Dict[Path, List[Instruction]],
|
||||||
|
primitives: PrimitiveCollection,
|
||||||
|
source_attribution: bool = True
|
||||||
|
) -> List[PlacementResult]:
|
||||||
|
"""Generate distributed AGENTS.md file contents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
placement_map (Dict[Path, List[Instruction]]): Directory to instructions mapping.
|
||||||
|
primitives (PrimitiveCollection): Full primitive collection.
|
||||||
|
source_attribution (bool): Whether to include source attribution.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[PlacementResult]: List of placement results with content.
|
||||||
|
"""
|
||||||
|
placements = []
|
||||||
|
|
||||||
|
# Special case: if no instructions but constitution exists, create root placement
|
||||||
|
if not placement_map:
|
||||||
|
from .constitution import find_constitution
|
||||||
|
constitution_path = find_constitution(Path(self.base_dir))
|
||||||
|
if constitution_path.exists():
|
||||||
|
# Create a root placement for constitution-only projects
|
||||||
|
root_path = Path(self.base_dir)
|
||||||
|
agents_path = root_path / "AGENTS.md"
|
||||||
|
|
||||||
|
placement = PlacementResult(
|
||||||
|
agents_path=agents_path,
|
||||||
|
instructions=[], # No instructions, just constitution
|
||||||
|
coverage_patterns=set(), # No patterns since no instructions
|
||||||
|
source_attribution={"constitution": "constitution.md"} if source_attribution else {}
|
||||||
|
)
|
||||||
|
|
||||||
|
placements.append(placement)
|
||||||
|
else:
|
||||||
|
# Normal case: create placements for each entry in placement_map
|
||||||
|
for dir_path, instructions in placement_map.items():
|
||||||
|
agents_path = dir_path / "AGENTS.md"
|
||||||
|
|
||||||
|
# Build source attribution map if enabled
|
||||||
|
source_map = {}
|
||||||
|
if source_attribution:
|
||||||
|
for instruction in instructions:
|
||||||
|
source_info = getattr(instruction, 'source', 'local')
|
||||||
|
source_map[str(instruction.file_path)] = source_info
|
||||||
|
|
||||||
|
# Extract coverage patterns
|
||||||
|
patterns = set()
|
||||||
|
for instruction in instructions:
|
||||||
|
if instruction.apply_to:
|
||||||
|
patterns.add(instruction.apply_to)
|
||||||
|
|
||||||
|
placement = PlacementResult(
|
||||||
|
agents_path=agents_path,
|
||||||
|
instructions=instructions,
|
||||||
|
coverage_patterns=patterns,
|
||||||
|
source_attribution=source_map
|
||||||
|
)
|
||||||
|
|
||||||
|
placements.append(placement)
|
||||||
|
|
||||||
|
return placements
|
||||||
|
|
||||||
|
def get_compilation_results_for_display(self, is_dry_run: bool = False) -> Optional[CompilationResults]:
|
||||||
|
"""Get compilation results for CLI display integration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
is_dry_run: Whether this is a dry run.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CompilationResults if available, None otherwise.
|
||||||
|
"""
|
||||||
|
if self._placement_map:
|
||||||
|
# Generate fresh compilation results with correct dry run status
|
||||||
|
compilation_results = self.context_optimizer.get_compilation_results(
|
||||||
|
self._placement_map,
|
||||||
|
is_dry_run=is_dry_run
|
||||||
|
)
|
||||||
|
|
||||||
|
# Merge distributed compiler's warnings (like orphan warnings) with optimizer warnings
|
||||||
|
all_warnings = compilation_results.warnings + self.warnings
|
||||||
|
|
||||||
|
# Create new compilation results with merged warnings
|
||||||
|
from ..output.models import CompilationResults
|
||||||
|
return CompilationResults(
|
||||||
|
project_analysis=compilation_results.project_analysis,
|
||||||
|
optimization_decisions=compilation_results.optimization_decisions,
|
||||||
|
placement_summaries=compilation_results.placement_summaries,
|
||||||
|
optimization_stats=compilation_results.optimization_stats,
|
||||||
|
warnings=all_warnings,
|
||||||
|
errors=compilation_results.errors + self.errors,
|
||||||
|
is_dry_run=is_dry_run
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _extract_directories_from_pattern(self, pattern: str) -> List[Path]:
|
||||||
|
"""Extract potential directory paths from a file pattern.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pattern (str): File pattern like "src/**/*.py" or "docs/*.md"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Path]: List of directory paths that could contain matching files.
|
||||||
|
"""
|
||||||
|
directories = []
|
||||||
|
|
||||||
|
# Remove filename part and wildcards to get directory structure
|
||||||
|
# Examples:
|
||||||
|
# "src/**/*.py" -> ["src"]
|
||||||
|
# "docs/*.md" -> ["docs"]
|
||||||
|
# "**/*.py" -> ["."] (current directory)
|
||||||
|
# "*.py" -> ["."] (current directory)
|
||||||
|
|
||||||
|
if pattern.startswith("**/"):
|
||||||
|
# Global pattern - applies to all directories
|
||||||
|
directories.append(Path("."))
|
||||||
|
elif "/" in pattern:
|
||||||
|
# Extract directory part
|
||||||
|
dir_part = pattern.split("/")[0]
|
||||||
|
if not dir_part.startswith("*"):
|
||||||
|
directories.append(Path(dir_part))
|
||||||
|
else:
|
||||||
|
directories.append(Path("."))
|
||||||
|
else:
|
||||||
|
# No directory part - applies to current directory
|
||||||
|
directories.append(Path("."))
|
||||||
|
|
||||||
|
return directories
|
||||||
|
|
||||||
|
def _find_best_directory(
|
||||||
|
self,
|
||||||
|
instruction: Instruction,
|
||||||
|
directory_map: DirectoryMap,
|
||||||
|
max_depth: int
|
||||||
|
) -> Path:
|
||||||
|
"""Find the best directory for placing an instruction.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
instruction (Instruction): Instruction to place.
|
||||||
|
directory_map (DirectoryMap): Directory structure analysis.
|
||||||
|
max_depth (int): Maximum allowed depth.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path: Best directory path for the instruction.
|
||||||
|
"""
|
||||||
|
if not instruction.apply_to:
|
||||||
|
return self.base_dir
|
||||||
|
|
||||||
|
pattern = instruction.apply_to
|
||||||
|
best_dir = self.base_dir
|
||||||
|
best_specificity = 0
|
||||||
|
|
||||||
|
for dir_path in directory_map.directories:
|
||||||
|
# Skip directories that are too deep
|
||||||
|
if directory_map.depth_map.get(dir_path, 0) > max_depth:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if this directory could contain files matching the pattern
|
||||||
|
if pattern in directory_map.directories[dir_path]:
|
||||||
|
# Prefer more specific (deeper) directories
|
||||||
|
specificity = directory_map.depth_map.get(dir_path, 0)
|
||||||
|
if specificity > best_specificity:
|
||||||
|
best_specificity = specificity
|
||||||
|
best_dir = dir_path
|
||||||
|
|
||||||
|
return best_dir
|
||||||
|
|
||||||
|
def _generate_agents_content(
|
||||||
|
self,
|
||||||
|
placement: PlacementResult,
|
||||||
|
primitives: PrimitiveCollection
|
||||||
|
) -> str:
|
||||||
|
"""Generate AGENTS.md content for a specific placement.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
placement (PlacementResult): Placement result with instructions.
|
||||||
|
primitives (PrimitiveCollection): Full primitive collection.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Generated AGENTS.md content.
|
||||||
|
"""
|
||||||
|
sections = []
|
||||||
|
|
||||||
|
# Header with source attribution
|
||||||
|
sections.append("# AGENTS.md")
|
||||||
|
sections.append("<!-- Generated by APM CLI from distributed .apm/ primitives -->")
|
||||||
|
sections.append(BUILD_ID_PLACEHOLDER)
|
||||||
|
sections.append(f"<!-- APM Version: {get_version()} -->")
|
||||||
|
|
||||||
|
# Add source attribution summary if enabled
|
||||||
|
if placement.source_attribution:
|
||||||
|
sources = set(placement.source_attribution.values())
|
||||||
|
if len(sources) > 1:
|
||||||
|
sections.append(f"<!-- Sources: {', '.join(sorted(sources))} -->")
|
||||||
|
else:
|
||||||
|
sections.append(f"<!-- Source: {list(sources)[0] if sources else 'local'} -->")
|
||||||
|
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
# Group instructions by pattern
|
||||||
|
pattern_groups: Dict[str, List[Instruction]] = defaultdict(list)
|
||||||
|
for instruction in placement.instructions:
|
||||||
|
if instruction.apply_to:
|
||||||
|
pattern_groups[instruction.apply_to].append(instruction)
|
||||||
|
|
||||||
|
# Generate sections for each pattern
|
||||||
|
for pattern, pattern_instructions in sorted(pattern_groups.items()):
|
||||||
|
sections.append(f"## Files matching `{pattern}`")
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
for instruction in pattern_instructions:
|
||||||
|
content = instruction.content.strip()
|
||||||
|
if content:
|
||||||
|
# Add source attribution for individual instructions
|
||||||
|
if placement.source_attribution:
|
||||||
|
source = placement.source_attribution.get(str(instruction.file_path), 'local')
|
||||||
|
try:
|
||||||
|
rel_path = instruction.file_path.relative_to(self.base_dir)
|
||||||
|
except ValueError:
|
||||||
|
rel_path = instruction.file_path
|
||||||
|
|
||||||
|
sections.append(f"<!-- Source: {source} {rel_path} -->")
|
||||||
|
|
||||||
|
sections.append(content)
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
# Footer
|
||||||
|
sections.append("---")
|
||||||
|
sections.append("*This file was generated by APM CLI. Do not edit manually.*")
|
||||||
|
sections.append("*To regenerate: `specify apm compile`*")
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
return "\n".join(sections)
|
||||||
|
|
||||||
|
def _validate_coverage(
|
||||||
|
self,
|
||||||
|
placements: List[PlacementResult],
|
||||||
|
all_instructions: List[Instruction]
|
||||||
|
) -> List[str]:
|
||||||
|
"""Validate that all instructions are covered by placements.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
placements (List[PlacementResult]): Generated placements.
|
||||||
|
all_instructions (List[Instruction]): All available instructions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of coverage warnings.
|
||||||
|
"""
|
||||||
|
warnings = []
|
||||||
|
placed_instructions = set()
|
||||||
|
|
||||||
|
for placement in placements:
|
||||||
|
placed_instructions.update(str(inst.file_path) for inst in placement.instructions)
|
||||||
|
|
||||||
|
all_instruction_paths = set(str(inst.file_path) for inst in all_instructions)
|
||||||
|
|
||||||
|
missing_instructions = all_instruction_paths - placed_instructions
|
||||||
|
if missing_instructions:
|
||||||
|
warnings.append(f"Instructions not placed in any AGENTS.md: {', '.join(missing_instructions)}")
|
||||||
|
|
||||||
|
return warnings
|
||||||
|
|
||||||
|
def _find_orphaned_agents_files(self, generated_paths: List[Path]) -> List[Path]:
|
||||||
|
"""Find existing AGENTS.md files that weren't generated in the current compilation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
generated_paths (List[Path]): List of AGENTS.md files generated in current run.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Path]: List of orphaned AGENTS.md files that should be cleaned up.
|
||||||
|
"""
|
||||||
|
orphaned_files = []
|
||||||
|
generated_set = set(generated_paths)
|
||||||
|
|
||||||
|
# Find all existing AGENTS.md files in the project
|
||||||
|
for agents_file in self.base_dir.rglob("AGENTS.md"):
|
||||||
|
# Skip files that are outside our project or in special directories
|
||||||
|
try:
|
||||||
|
relative_path = agents_file.relative_to(self.base_dir)
|
||||||
|
|
||||||
|
# Skip files in certain directories that shouldn't be cleaned
|
||||||
|
skip_dirs = {".git", ".apm", "node_modules", "__pycache__", ".pytest_cache", "apm_modules"}
|
||||||
|
if any(part in skip_dirs for part in relative_path.parts):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If this existing file wasn't generated in current run, it's orphaned
|
||||||
|
if agents_file not in generated_set:
|
||||||
|
orphaned_files.append(agents_file)
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
# File is outside base_dir, skip it
|
||||||
|
continue
|
||||||
|
|
||||||
|
return orphaned_files
|
||||||
|
|
||||||
|
def _generate_orphan_warnings(self, orphaned_files: List[Path]) -> List[str]:
|
||||||
|
"""Generate warning messages for orphaned AGENTS.md files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orphaned_files (List[Path]): List of orphaned files to warn about.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of warning messages.
|
||||||
|
"""
|
||||||
|
warning_messages = []
|
||||||
|
|
||||||
|
if not orphaned_files:
|
||||||
|
return warning_messages
|
||||||
|
|
||||||
|
# Professional warning format with readable list for multiple files
|
||||||
|
if len(orphaned_files) == 1:
|
||||||
|
rel_path = orphaned_files[0].relative_to(self.base_dir)
|
||||||
|
warning_messages.append(f"Orphaned AGENTS.md found: {rel_path} - run 'apm compile --clean' to remove")
|
||||||
|
else:
|
||||||
|
# For multiple files, create a single multi-line warning message
|
||||||
|
file_list = []
|
||||||
|
for file_path in orphaned_files[:5]: # Show first 5
|
||||||
|
rel_path = file_path.relative_to(self.base_dir)
|
||||||
|
file_list.append(f" • {rel_path}")
|
||||||
|
if len(orphaned_files) > 5:
|
||||||
|
file_list.append(f" • ...and {len(orphaned_files) - 5} more")
|
||||||
|
|
||||||
|
# Create one cohesive warning message
|
||||||
|
files_text = "\n".join(file_list)
|
||||||
|
warning_messages.append(f"Found {len(orphaned_files)} orphaned AGENTS.md files:\n{files_text}\n Run 'apm compile --clean' to remove orphaned files")
|
||||||
|
|
||||||
|
return warning_messages
|
||||||
|
|
||||||
|
def _cleanup_orphaned_files(self, orphaned_files: List[Path], dry_run: bool = False) -> List[str]:
|
||||||
|
"""Actually remove orphaned AGENTS.md files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orphaned_files (List[Path]): List of orphaned files to remove.
|
||||||
|
dry_run (bool): If True, don't actually remove files, just report what would be removed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of cleanup status messages.
|
||||||
|
"""
|
||||||
|
cleanup_messages = []
|
||||||
|
|
||||||
|
if not orphaned_files:
|
||||||
|
return cleanup_messages
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
# In dry-run mode, just report what would be cleaned
|
||||||
|
cleanup_messages.append(f"🧹 Would clean up {len(orphaned_files)} orphaned AGENTS.md files")
|
||||||
|
for file_path in orphaned_files:
|
||||||
|
rel_path = file_path.relative_to(self.base_dir)
|
||||||
|
cleanup_messages.append(f" • {rel_path}")
|
||||||
|
else:
|
||||||
|
# Actually perform the cleanup
|
||||||
|
cleanup_messages.append(f"🧹 Cleaning up {len(orphaned_files)} orphaned AGENTS.md files")
|
||||||
|
for file_path in orphaned_files:
|
||||||
|
try:
|
||||||
|
rel_path = file_path.relative_to(self.base_dir)
|
||||||
|
file_path.unlink()
|
||||||
|
cleanup_messages.append(f" ✓ Removed {rel_path}")
|
||||||
|
except Exception as e:
|
||||||
|
cleanup_messages.append(f" ✗ Failed to remove {rel_path}: {str(e)}")
|
||||||
|
|
||||||
|
return cleanup_messages
|
||||||
|
|
||||||
|
def _compile_distributed_stats(
|
||||||
|
self,
|
||||||
|
placements: List[PlacementResult],
|
||||||
|
primitives: PrimitiveCollection
|
||||||
|
) -> Dict[str, float]:
|
||||||
|
"""Compile statistics about the distributed compilation with optimization metrics.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
placements (List[PlacementResult]): Generated placements.
|
||||||
|
primitives (PrimitiveCollection): Full primitive collection.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, float]: Compilation statistics including optimization metrics.
|
||||||
|
"""
|
||||||
|
total_instructions = sum(len(p.instructions) for p in placements)
|
||||||
|
total_patterns = sum(len(p.coverage_patterns) for p in placements)
|
||||||
|
|
||||||
|
# Get optimization metrics
|
||||||
|
placement_map = {Path(p.agents_path.parent): p.instructions for p in placements}
|
||||||
|
optimization_stats = self.context_optimizer.get_optimization_stats(placement_map)
|
||||||
|
|
||||||
|
# Combine traditional stats with optimization metrics
|
||||||
|
stats = {
|
||||||
|
"agents_files_generated": len(placements),
|
||||||
|
"total_instructions_placed": total_instructions,
|
||||||
|
"total_patterns_covered": total_patterns,
|
||||||
|
"primitives_found": primitives.count(),
|
||||||
|
"chatmodes": len(primitives.chatmodes),
|
||||||
|
"instructions": len(primitives.instructions),
|
||||||
|
"contexts": len(primitives.contexts)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add optimization metrics from OptimizationStats object
|
||||||
|
if optimization_stats:
|
||||||
|
stats.update({
|
||||||
|
"average_context_efficiency": optimization_stats.average_context_efficiency,
|
||||||
|
"pollution_improvement": optimization_stats.pollution_improvement,
|
||||||
|
"baseline_efficiency": optimization_stats.baseline_efficiency,
|
||||||
|
"placement_accuracy": optimization_stats.placement_accuracy,
|
||||||
|
"generation_time_ms": optimization_stats.generation_time_ms,
|
||||||
|
"total_agents_files": optimization_stats.total_agents_files,
|
||||||
|
"directories_analyzed": optimization_stats.directories_analyzed
|
||||||
|
})
|
||||||
|
|
||||||
|
return stats
|
||||||
91
src/apm_cli/compilation/injector.py
Normal file
91
src/apm_cli/compilation/injector.py
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
"""High-level constitution injection workflow used by compile command."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, Literal
|
||||||
|
|
||||||
|
from .constitution import read_constitution
|
||||||
|
from .constitution_block import render_block, find_existing_block
|
||||||
|
from .constants import CONSTITUTION_MARKER_BEGIN, CONSTITUTION_MARKER_END
|
||||||
|
|
||||||
|
InjectionStatus = Literal["CREATED", "UPDATED", "UNCHANGED", "SKIPPED", "MISSING"]
|
||||||
|
|
||||||
|
|
||||||
|
class ConstitutionInjector:
|
||||||
|
"""Encapsulates constitution detection + injection logic."""
|
||||||
|
|
||||||
|
def __init__(self, base_dir: str):
|
||||||
|
self.base_dir = Path(base_dir)
|
||||||
|
|
||||||
|
def inject(self, compiled_content: str, with_constitution: bool, output_path: Path) -> tuple[str, InjectionStatus, Optional[str]]:
|
||||||
|
"""Return final AGENTS.md content after optional injection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
compiled_content: Newly compiled content (without constitution block).
|
||||||
|
with_constitution: Whether to perform injection (True) or preserve existing block (False).
|
||||||
|
output_path: Existing AGENTS.md path (may not exist) for preservation logic.
|
||||||
|
Returns:
|
||||||
|
(final_content, status, hash_or_none)
|
||||||
|
"""
|
||||||
|
existing_content = ""
|
||||||
|
if output_path.exists():
|
||||||
|
try:
|
||||||
|
existing_content = output_path.read_text(encoding="utf-8")
|
||||||
|
except OSError:
|
||||||
|
existing_content = ""
|
||||||
|
|
||||||
|
# Helper to split header/body from freshly compiled content.
|
||||||
|
def _split_header(content: str) -> tuple[str, str]:
|
||||||
|
# Header ends at the first double newline (blank line separating header from body)
|
||||||
|
marker = "\n\n"
|
||||||
|
if marker in content:
|
||||||
|
idx = content.index(marker)
|
||||||
|
return content[: idx + len(marker)], content[idx + len(marker) :]
|
||||||
|
# Fallback: treat whole content as header
|
||||||
|
return content, ""
|
||||||
|
|
||||||
|
header_part, body_part = _split_header(compiled_content)
|
||||||
|
|
||||||
|
if not with_constitution:
|
||||||
|
# If skipping, we preserve existing block if present but enforce ordering: header first, block (if any), then body.
|
||||||
|
existing_block = find_existing_block(existing_content)
|
||||||
|
if existing_block:
|
||||||
|
final = header_part + existing_block.raw.rstrip() + "\n\n" + body_part.lstrip("\n")
|
||||||
|
return final, "SKIPPED", None
|
||||||
|
return compiled_content, "SKIPPED", None
|
||||||
|
|
||||||
|
constitution_text = read_constitution(self.base_dir)
|
||||||
|
if constitution_text is None:
|
||||||
|
existing_block = find_existing_block(existing_content)
|
||||||
|
if existing_block:
|
||||||
|
final = header_part + existing_block.raw.rstrip() + "\n\n" + body_part.lstrip("\n")
|
||||||
|
return final, "MISSING", None
|
||||||
|
return compiled_content, "MISSING", None
|
||||||
|
|
||||||
|
new_block = render_block(constitution_text)
|
||||||
|
existing_block = find_existing_block(existing_content)
|
||||||
|
|
||||||
|
if existing_block:
|
||||||
|
# Compare raw block bodies (strip trailing newlines for stable compare)
|
||||||
|
if existing_block.raw.rstrip() == new_block.rstrip():
|
||||||
|
status = "UNCHANGED"
|
||||||
|
block_to_use = existing_block.raw.rstrip()
|
||||||
|
else:
|
||||||
|
status = "UPDATED"
|
||||||
|
block_to_use = new_block.rstrip()
|
||||||
|
else:
|
||||||
|
status = "CREATED"
|
||||||
|
block_to_use = new_block.rstrip()
|
||||||
|
|
||||||
|
hash_line = new_block.splitlines()[1] if len(new_block.splitlines()) > 1 else ""
|
||||||
|
hash_value = None
|
||||||
|
if hash_line.startswith("hash:"):
|
||||||
|
parts = hash_line.split()
|
||||||
|
if len(parts) >= 2:
|
||||||
|
hash_value = parts[1]
|
||||||
|
|
||||||
|
final_content = header_part + block_to_use + "\n\n" + body_part.lstrip("\n")
|
||||||
|
# Ensure single trailing newline
|
||||||
|
if not final_content.endswith("\n"):
|
||||||
|
final_content += "\n"
|
||||||
|
return final_content, status, hash_value
|
||||||
181
src/apm_cli/compilation/link_resolver.py
Normal file
181
src/apm_cli/compilation/link_resolver.py
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
"""Markdown link resolution for AGENTS.md compilation."""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Tuple, Optional
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_markdown_links(content: str, base_path: Path) -> str:
|
||||||
|
"""Resolve markdown links and inline referenced content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content (str): Content with markdown links to resolve.
|
||||||
|
base_path (Path): Base directory for resolving relative paths.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Content with resolved links and inlined content where appropriate.
|
||||||
|
"""
|
||||||
|
# Pattern to match markdown links: [text](path)
|
||||||
|
link_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
|
||||||
|
|
||||||
|
def replace_link(match):
|
||||||
|
text = match.group(1)
|
||||||
|
path = match.group(2)
|
||||||
|
|
||||||
|
# Skip external URLs
|
||||||
|
if path.startswith(('http://', 'https://', 'ftp://', 'mailto:')):
|
||||||
|
return match.group(0) # Return original link
|
||||||
|
|
||||||
|
# Skip anchors
|
||||||
|
if path.startswith('#'):
|
||||||
|
return match.group(0) # Return original link
|
||||||
|
|
||||||
|
# Resolve relative path
|
||||||
|
full_path = _resolve_path(path, base_path)
|
||||||
|
|
||||||
|
if full_path and full_path.exists() and full_path.is_file():
|
||||||
|
# For certain file types, inline the content
|
||||||
|
if full_path.suffix.lower() in ['.md', '.txt']:
|
||||||
|
try:
|
||||||
|
file_content = full_path.read_text(encoding='utf-8')
|
||||||
|
# Remove frontmatter if present
|
||||||
|
file_content = _remove_frontmatter(file_content)
|
||||||
|
return f"**{text}**:\n\n{file_content}"
|
||||||
|
except (OSError, UnicodeDecodeError):
|
||||||
|
# Fall back to original link if file can't be read
|
||||||
|
return match.group(0)
|
||||||
|
else:
|
||||||
|
# For other file types, keep the link but update path if needed
|
||||||
|
return match.group(0)
|
||||||
|
else:
|
||||||
|
# File doesn't exist, keep original link (will be caught by validation)
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
return re.sub(link_pattern, replace_link, content)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def validate_link_targets(content: str, base_path: Path) -> List[str]:
|
||||||
|
"""Validate that all referenced files exist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content (str): Content to validate links in.
|
||||||
|
base_path (Path): Base directory for resolving relative paths.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of error messages for missing or invalid links.
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Check markdown links
|
||||||
|
link_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
|
||||||
|
for match in re.finditer(link_pattern, content):
|
||||||
|
text = match.group(1)
|
||||||
|
path = match.group(2)
|
||||||
|
|
||||||
|
# Skip external URLs and anchors
|
||||||
|
if (path.startswith(('http://', 'https://', 'ftp://', 'mailto:')) or
|
||||||
|
path.startswith('#')):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Resolve and check path
|
||||||
|
full_path = _resolve_path(path, base_path)
|
||||||
|
if not full_path or not full_path.exists():
|
||||||
|
errors.append(f"Referenced file not found: {path} (in link '{text}')")
|
||||||
|
elif not full_path.is_file() and not full_path.is_dir():
|
||||||
|
errors.append(f"Referenced path is neither a file nor directory: {path} (in link '{text}')")
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_path(path: str, base_path: Path) -> Optional[Path]:
|
||||||
|
"""Resolve a relative path against a base path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str): Relative path to resolve.
|
||||||
|
base_path (Path): Base directory for resolution.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[Path]: Resolved path or None if invalid.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if Path(path).is_absolute():
|
||||||
|
return Path(path)
|
||||||
|
else:
|
||||||
|
return base_path / path
|
||||||
|
except (OSError, ValueError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_frontmatter(content: str) -> str:
|
||||||
|
"""Remove YAML frontmatter from content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content (str): Content that may contain frontmatter.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Content without frontmatter.
|
||||||
|
"""
|
||||||
|
# Remove YAML frontmatter (--- at start, --- at end)
|
||||||
|
if content.startswith('---\n'):
|
||||||
|
lines = content.split('\n')
|
||||||
|
in_frontmatter = True
|
||||||
|
content_lines = []
|
||||||
|
|
||||||
|
for i, line in enumerate(lines[1:], 1): # Skip first ---
|
||||||
|
if line.strip() == '---' and in_frontmatter:
|
||||||
|
in_frontmatter = False
|
||||||
|
continue
|
||||||
|
if not in_frontmatter:
|
||||||
|
content_lines.append(line)
|
||||||
|
|
||||||
|
content = '\n'.join(content_lines)
|
||||||
|
|
||||||
|
return content.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_circular_references(content: str, base_path: Path, visited: Optional[set] = None) -> List[str]:
|
||||||
|
"""Detect circular references in markdown links.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content (str): Content to check for circular references.
|
||||||
|
base_path (Path): Base directory for resolving paths.
|
||||||
|
visited (Optional[set]): Set of already visited files.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of circular reference errors.
|
||||||
|
"""
|
||||||
|
if visited is None:
|
||||||
|
visited = set()
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
current_file = base_path
|
||||||
|
|
||||||
|
if current_file in visited:
|
||||||
|
errors.append(f"Circular reference detected: {current_file}")
|
||||||
|
return errors
|
||||||
|
|
||||||
|
visited.add(current_file)
|
||||||
|
|
||||||
|
# Check markdown links for potential circular references
|
||||||
|
link_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
|
||||||
|
for match in re.finditer(link_pattern, content):
|
||||||
|
path = match.group(2)
|
||||||
|
|
||||||
|
# Skip external URLs and anchors
|
||||||
|
if (path.startswith(('http://', 'https://', 'ftp://', 'mailto:')) or
|
||||||
|
path.startswith('#')):
|
||||||
|
continue
|
||||||
|
|
||||||
|
full_path = _resolve_path(path, base_path.parent if base_path.is_file() else base_path)
|
||||||
|
if full_path and full_path.exists() and full_path.is_file():
|
||||||
|
if full_path.suffix.lower() in ['.md', '.txt']:
|
||||||
|
try:
|
||||||
|
linked_content = full_path.read_text(encoding='utf-8')
|
||||||
|
errors.extend(_detect_circular_references(linked_content, full_path, visited.copy()))
|
||||||
|
except (OSError, UnicodeDecodeError):
|
||||||
|
continue
|
||||||
|
|
||||||
|
return errors
|
||||||
138
src/apm_cli/compilation/template_builder.py
Normal file
138
src/apm_cli/compilation/template_builder.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
"""Template building system for AGENTS.md compilation."""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Optional, Tuple
|
||||||
|
from ..primitives.models import Instruction, Chatmode
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TemplateData:
|
||||||
|
"""Data structure for template generation."""
|
||||||
|
instructions_content: str
|
||||||
|
# Removed volatile timestamp for deterministic builds
|
||||||
|
version: str
|
||||||
|
chatmode_content: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
def build_conditional_sections(instructions: List[Instruction]) -> str:
|
||||||
|
"""Build sections grouped by applyTo patterns.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
instructions (List[Instruction]): List of instruction primitives.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Formatted conditional sections content.
|
||||||
|
"""
|
||||||
|
if not instructions:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
# Group instructions by pattern - use raw patterns
|
||||||
|
pattern_groups = _group_instructions_by_pattern(instructions)
|
||||||
|
|
||||||
|
sections = []
|
||||||
|
|
||||||
|
for pattern, pattern_instructions in pattern_groups.items():
|
||||||
|
sections.append(f"## Files matching `{pattern}`")
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
# Combine content from all instructions for this pattern
|
||||||
|
for instruction in pattern_instructions:
|
||||||
|
content = instruction.content.strip()
|
||||||
|
if content:
|
||||||
|
# Add source file comment before the content
|
||||||
|
try:
|
||||||
|
# Try to get relative path for cleaner display
|
||||||
|
if instruction.file_path.is_absolute():
|
||||||
|
relative_path = instruction.file_path.relative_to(Path.cwd())
|
||||||
|
else:
|
||||||
|
relative_path = instruction.file_path
|
||||||
|
except (ValueError, OSError):
|
||||||
|
# Fall back to absolute or given path if relative fails
|
||||||
|
relative_path = instruction.file_path
|
||||||
|
|
||||||
|
sections.append(f"<!-- Source: {relative_path} -->")
|
||||||
|
sections.append(content)
|
||||||
|
sections.append(f"<!-- End source: {relative_path} -->")
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
return "\n".join(sections)
|
||||||
|
|
||||||
|
|
||||||
|
def find_chatmode_by_name(chatmodes: List[Chatmode], chatmode_name: str) -> Optional[Chatmode]:
|
||||||
|
"""Find a chatmode by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chatmodes (List[Chatmode]): List of available chatmodes.
|
||||||
|
chatmode_name (str): Name of the chatmode to find.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[Chatmode]: The found chatmode, or None if not found.
|
||||||
|
"""
|
||||||
|
for chatmode in chatmodes:
|
||||||
|
if chatmode.name == chatmode_name:
|
||||||
|
return chatmode
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _group_instructions_by_pattern(instructions: List[Instruction]) -> Dict[str, List[Instruction]]:
|
||||||
|
"""Group instructions by applyTo patterns.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
instructions (List[Instruction]): List of instructions to group.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, List[Instruction]]: Grouped instructions with raw patterns as keys.
|
||||||
|
"""
|
||||||
|
pattern_groups: Dict[str, List[Instruction]] = {}
|
||||||
|
|
||||||
|
for instruction in instructions:
|
||||||
|
if not instruction.apply_to:
|
||||||
|
continue
|
||||||
|
|
||||||
|
pattern = instruction.apply_to
|
||||||
|
|
||||||
|
if pattern not in pattern_groups:
|
||||||
|
pattern_groups[pattern] = []
|
||||||
|
|
||||||
|
pattern_groups[pattern].append(instruction)
|
||||||
|
|
||||||
|
return pattern_groups
|
||||||
|
|
||||||
|
|
||||||
|
def generate_agents_md_template(template_data: TemplateData) -> str:
|
||||||
|
"""Generate the complete AGENTS.md file content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
template_data (TemplateData): Data for template generation.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Complete AGENTS.md file content.
|
||||||
|
"""
|
||||||
|
sections = []
|
||||||
|
|
||||||
|
# Header
|
||||||
|
sections.append("# AGENTS.md")
|
||||||
|
sections.append(f"<!-- Generated by APM CLI from .apm/ primitives -->")
|
||||||
|
from .constants import BUILD_ID_PLACEHOLDER
|
||||||
|
sections.append(BUILD_ID_PLACEHOLDER)
|
||||||
|
sections.append(f"<!-- APM Version: {template_data.version} -->")
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
# Chatmode content (if provided)
|
||||||
|
if template_data.chatmode_content:
|
||||||
|
sections.append(template_data.chatmode_content.strip())
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
# Instructions content (grouped by patterns)
|
||||||
|
if template_data.instructions_content:
|
||||||
|
sections.append(template_data.instructions_content)
|
||||||
|
|
||||||
|
# Footer
|
||||||
|
sections.append("---")
|
||||||
|
sections.append("*This file was generated by APM CLI. Do not edit manually.*")
|
||||||
|
sections.append("*To regenerate: `specify apm compile`*")
|
||||||
|
sections.append("")
|
||||||
|
|
||||||
|
return "\n".join(sections)
|
||||||
60
src/apm_cli/config.py
Normal file
60
src/apm_cli/config.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
"""Configuration management for APM-CLI."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
CONFIG_DIR = os.path.expanduser("~/.apm-cli")
|
||||||
|
CONFIG_FILE = os.path.join(CONFIG_DIR, "config.json")
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_config_exists():
|
||||||
|
"""Ensure the configuration directory and file exist."""
|
||||||
|
if not os.path.exists(CONFIG_DIR):
|
||||||
|
os.makedirs(CONFIG_DIR)
|
||||||
|
|
||||||
|
if not os.path.exists(CONFIG_FILE):
|
||||||
|
with open(CONFIG_FILE, "w") as f:
|
||||||
|
json.dump({"default_client": "vscode"}, f)
|
||||||
|
|
||||||
|
|
||||||
|
def get_config():
|
||||||
|
"""Get the current configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Current configuration.
|
||||||
|
"""
|
||||||
|
ensure_config_exists()
|
||||||
|
with open(CONFIG_FILE, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
def update_config(updates):
|
||||||
|
"""Update the configuration with new values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
updates (dict): Dictionary of configuration values to update.
|
||||||
|
"""
|
||||||
|
config = get_config()
|
||||||
|
config.update(updates)
|
||||||
|
|
||||||
|
with open(CONFIG_FILE, "w") as f:
|
||||||
|
json.dump(config, f, indent=2)
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_client():
|
||||||
|
"""Get the default MCP client.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Default MCP client type.
|
||||||
|
"""
|
||||||
|
return get_config().get("default_client", "vscode")
|
||||||
|
|
||||||
|
|
||||||
|
def set_default_client(client_type):
|
||||||
|
"""Set the default MCP client.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_type (str): Type of client to set as default.
|
||||||
|
"""
|
||||||
|
update_config({"default_client": client_type})
|
||||||
1
src/apm_cli/core/__init__.py
Normal file
1
src/apm_cli/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Core package."""
|
||||||
165
src/apm_cli/core/conflict_detector.py
Normal file
165
src/apm_cli/core/conflict_detector.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
"""MCP server conflict detection and resolution."""
|
||||||
|
|
||||||
|
from typing import Dict, Any
|
||||||
|
from ..adapters.client.base import MCPClientAdapter
|
||||||
|
|
||||||
|
|
||||||
|
class MCPConflictDetector:
|
||||||
|
"""Handles detection and resolution of MCP server configuration conflicts."""
|
||||||
|
|
||||||
|
def __init__(self, runtime_adapter: MCPClientAdapter):
|
||||||
|
"""Initialize the conflict detector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runtime_adapter: The MCP client adapter for the target runtime.
|
||||||
|
"""
|
||||||
|
self.adapter = runtime_adapter
|
||||||
|
|
||||||
|
def check_server_exists(self, server_reference: str) -> bool:
|
||||||
|
"""Check if a server already exists in the configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_reference: Server reference to check (e.g., 'github', 'io.github.github/github-mcp-server').
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if server already exists, False otherwise.
|
||||||
|
"""
|
||||||
|
existing_servers = self.get_existing_server_configs()
|
||||||
|
|
||||||
|
# Try to get server info from registry for UUID comparison
|
||||||
|
try:
|
||||||
|
server_info = self.adapter.registry_client.find_server_by_reference(server_reference)
|
||||||
|
if server_info and "id" in server_info:
|
||||||
|
server_uuid = server_info["id"]
|
||||||
|
|
||||||
|
# Check if any existing server has the same UUID
|
||||||
|
for existing_name, existing_config in existing_servers.items():
|
||||||
|
if isinstance(existing_config, dict) and existing_config.get("id") == server_uuid:
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
# If registry lookup fails, fall back to canonical name comparison
|
||||||
|
canonical_name = self.get_canonical_server_name(server_reference)
|
||||||
|
|
||||||
|
# Check for exact canonical name match
|
||||||
|
if canonical_name in existing_servers:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check if any existing server resolves to the same canonical name
|
||||||
|
for existing_name in existing_servers.keys():
|
||||||
|
if existing_name != canonical_name: # Avoid duplicate checking
|
||||||
|
try:
|
||||||
|
existing_canonical = self.get_canonical_server_name(existing_name)
|
||||||
|
if existing_canonical == canonical_name:
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
# If we can't resolve an existing server name, skip it
|
||||||
|
continue
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_canonical_server_name(self, server_ref: str) -> str:
|
||||||
|
"""Get canonical server name from MCP Registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_ref: Server reference to resolve.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Canonical server name if found in registry, otherwise the original reference.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Use existing registry client that's already initialized in adapters
|
||||||
|
server_info = self.adapter.registry_client.find_server_by_reference(server_ref)
|
||||||
|
|
||||||
|
if server_info:
|
||||||
|
# Use the server name from x-github.name field, or fallback to server.name
|
||||||
|
if "x-github" in server_info and "name" in server_info["x-github"]:
|
||||||
|
return server_info["x-github"]["name"]
|
||||||
|
elif "name" in server_info:
|
||||||
|
return server_info["name"]
|
||||||
|
except Exception:
|
||||||
|
# Graceful fallback on registry failure
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Fallback: return the reference as-is if not found in registry
|
||||||
|
return server_ref
|
||||||
|
|
||||||
|
def get_existing_server_configs(self) -> Dict[str, Any]:
|
||||||
|
"""Extract all existing server configurations.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of existing server configurations keyed by server name.
|
||||||
|
"""
|
||||||
|
# Get fresh config each time
|
||||||
|
existing_config = self.adapter.get_current_config()
|
||||||
|
|
||||||
|
# Determine runtime type from adapter class name or type
|
||||||
|
adapter_class_name = getattr(self.adapter, '__class__', type(self.adapter)).__name__.lower()
|
||||||
|
|
||||||
|
if "copilot" in adapter_class_name:
|
||||||
|
return existing_config.get("mcpServers", {})
|
||||||
|
elif "codex" in adapter_class_name:
|
||||||
|
# Extract mcp_servers section from TOML config, handling both nested and flat formats
|
||||||
|
servers = {}
|
||||||
|
|
||||||
|
# Direct mcp_servers section
|
||||||
|
if "mcp_servers" in existing_config:
|
||||||
|
servers.update(existing_config["mcp_servers"])
|
||||||
|
|
||||||
|
# Handle TOML-style nested keys like 'mcp_servers.github' and 'mcp_servers."quoted-name"'
|
||||||
|
for key, value in existing_config.items():
|
||||||
|
if key.startswith("mcp_servers."):
|
||||||
|
# Extract server name from key
|
||||||
|
server_name = key[len("mcp_servers."):]
|
||||||
|
# Remove quotes if present
|
||||||
|
if server_name.startswith('"') and server_name.endswith('"'):
|
||||||
|
server_name = server_name[1:-1]
|
||||||
|
|
||||||
|
# Only add if it looks like server config (has command or args)
|
||||||
|
if isinstance(value, dict) and ('command' in value or 'args' in value):
|
||||||
|
servers[server_name] = value
|
||||||
|
|
||||||
|
return servers
|
||||||
|
elif "vscode" in adapter_class_name:
|
||||||
|
return existing_config.get("servers", {})
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_conflict_summary(self, server_reference: str) -> Dict[str, Any]:
|
||||||
|
"""Get detailed information about a conflict.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_reference: Server reference to analyze.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with conflict details.
|
||||||
|
"""
|
||||||
|
canonical_name = self.get_canonical_server_name(server_reference)
|
||||||
|
existing_servers = self.get_existing_server_configs()
|
||||||
|
|
||||||
|
conflict_info = {
|
||||||
|
"exists": False,
|
||||||
|
"canonical_name": canonical_name,
|
||||||
|
"conflicting_servers": []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for exact canonical name match
|
||||||
|
if canonical_name in existing_servers:
|
||||||
|
conflict_info["exists"] = True
|
||||||
|
conflict_info["conflicting_servers"].append({
|
||||||
|
"name": canonical_name,
|
||||||
|
"type": "exact_match"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Check if any existing server resolves to the same canonical name
|
||||||
|
for existing_name in existing_servers.keys():
|
||||||
|
if existing_name != canonical_name: # Avoid duplicate reporting
|
||||||
|
existing_canonical = self.get_canonical_server_name(existing_name)
|
||||||
|
if existing_canonical == canonical_name:
|
||||||
|
conflict_info["exists"] = True
|
||||||
|
conflict_info["conflicting_servers"].append({
|
||||||
|
"name": existing_name,
|
||||||
|
"type": "canonical_match",
|
||||||
|
"resolves_to": existing_canonical
|
||||||
|
})
|
||||||
|
|
||||||
|
return conflict_info
|
||||||
96
src/apm_cli/core/docker_args.py
Normal file
96
src/apm_cli/core/docker_args.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
"""Docker arguments processing utilities for MCP configuration."""
|
||||||
|
|
||||||
|
from typing import List, Dict, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
class DockerArgsProcessor:
|
||||||
|
"""Handles Docker argument processing with deduplication."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def process_docker_args(base_args: List[str], env_vars: Dict[str, str]) -> List[str]:
|
||||||
|
"""Process Docker arguments with environment variable deduplication and required flags.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_args: Base Docker arguments list.
|
||||||
|
env_vars: Environment variables to inject.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated arguments with environment variables injected without duplicates and required flags.
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
env_vars_added = set()
|
||||||
|
has_interactive = False
|
||||||
|
has_rm = False
|
||||||
|
|
||||||
|
# Check for existing -i and --rm flags
|
||||||
|
for i, arg in enumerate(base_args):
|
||||||
|
if arg == "-i" or arg == "--interactive":
|
||||||
|
has_interactive = True
|
||||||
|
elif arg == "--rm":
|
||||||
|
has_rm = True
|
||||||
|
|
||||||
|
for arg in base_args:
|
||||||
|
result.append(arg)
|
||||||
|
|
||||||
|
# When we encounter "run", inject required flags and environment variables
|
||||||
|
if arg == "run":
|
||||||
|
# Add -i flag if not present
|
||||||
|
if not has_interactive:
|
||||||
|
result.append("-i")
|
||||||
|
|
||||||
|
# Add --rm flag if not present
|
||||||
|
if not has_rm:
|
||||||
|
result.append("--rm")
|
||||||
|
|
||||||
|
# Add environment variables
|
||||||
|
for env_name, env_value in env_vars.items():
|
||||||
|
if env_name not in env_vars_added:
|
||||||
|
result.extend(["-e", f"{env_name}={env_value}"])
|
||||||
|
env_vars_added.add(env_name)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def extract_env_vars_from_args(args: List[str]) -> Tuple[List[str], Dict[str, str]]:
|
||||||
|
"""Extract environment variables from Docker args.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: Docker arguments that may contain -e flags.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (clean_args, env_vars) where clean_args has -e flags removed
|
||||||
|
and env_vars contains the extracted environment variables.
|
||||||
|
"""
|
||||||
|
clean_args = []
|
||||||
|
env_vars = {}
|
||||||
|
i = 0
|
||||||
|
|
||||||
|
while i < len(args):
|
||||||
|
if args[i] == "-e" and i + 1 < len(args):
|
||||||
|
env_spec = args[i + 1]
|
||||||
|
if "=" in env_spec:
|
||||||
|
key, value = env_spec.split("=", 1)
|
||||||
|
env_vars[key] = value
|
||||||
|
else:
|
||||||
|
env_vars[env_spec] = "${" + env_spec + "}"
|
||||||
|
i += 2 # Skip both -e and the env spec
|
||||||
|
else:
|
||||||
|
clean_args.append(args[i])
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return clean_args, env_vars
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def merge_env_vars(existing_env: Dict[str, str], new_env: Dict[str, str]) -> Dict[str, str]:
|
||||||
|
"""Merge environment variables, prioritizing resolved values over templates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
existing_env: Existing environment variables (often templates from registry).
|
||||||
|
new_env: New environment variables to merge (resolved actual values).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Merged environment variables with resolved values taking precedence.
|
||||||
|
"""
|
||||||
|
merged = existing_env.copy()
|
||||||
|
merged.update(new_env) # Resolved values take precedence over templates
|
||||||
|
return merged
|
||||||
99
src/apm_cli/core/operations.py
Normal file
99
src/apm_cli/core/operations.py
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
"""Core operations for APM-CLI."""
|
||||||
|
|
||||||
|
from ..factory import ClientFactory, PackageManagerFactory
|
||||||
|
from .safe_installer import SafeMCPInstaller
|
||||||
|
|
||||||
|
|
||||||
|
def configure_client(client_type, config_updates):
|
||||||
|
"""Configure an MCP client.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_type (str): Type of client to configure.
|
||||||
|
config_updates (dict): Configuration updates to apply.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
client = ClientFactory.create_client(client_type)
|
||||||
|
client.update_config(config_updates)
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error configuring client: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def install_package(client_type, package_name, version=None, shared_env_vars=None, server_info_cache=None, shared_runtime_vars=None):
|
||||||
|
"""Install an MCP package for a specific client type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_type (str): Type of client to configure.
|
||||||
|
package_name (str): Name of the package to install.
|
||||||
|
version (str, optional): Version of the package to install.
|
||||||
|
shared_env_vars (dict, optional): Pre-collected environment variables to use.
|
||||||
|
server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls.
|
||||||
|
shared_runtime_vars (dict, optional): Pre-collected runtime variables to use.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Result with 'success' (bool), 'installed' (bool), 'skipped' (bool) keys.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Use safe installer with conflict detection
|
||||||
|
safe_installer = SafeMCPInstaller(client_type)
|
||||||
|
|
||||||
|
# Pass shared environment and runtime variables and server info cache if available
|
||||||
|
if shared_env_vars is not None or server_info_cache is not None or shared_runtime_vars is not None:
|
||||||
|
summary = safe_installer.install_servers(
|
||||||
|
[package_name],
|
||||||
|
env_overrides=shared_env_vars,
|
||||||
|
server_info_cache=server_info_cache,
|
||||||
|
runtime_vars=shared_runtime_vars
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
summary = safe_installer.install_servers([package_name])
|
||||||
|
|
||||||
|
return {
|
||||||
|
'success': True,
|
||||||
|
'installed': len(summary.installed) > 0,
|
||||||
|
'skipped': len(summary.skipped) > 0,
|
||||||
|
'failed': len(summary.failed) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error installing package {package_name} for {client_type}: {e}")
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'installed': False,
|
||||||
|
'skipped': False,
|
||||||
|
'failed': True
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def uninstall_package(client_type, package_name):
|
||||||
|
"""Uninstall an MCP package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_type (str): Type of client to configure.
|
||||||
|
package_name (str): Name of the package to uninstall.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False otherwise.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
client = ClientFactory.create_client(client_type)
|
||||||
|
package_manager = PackageManagerFactory.create_package_manager()
|
||||||
|
|
||||||
|
# Uninstall the package
|
||||||
|
result = package_manager.uninstall(package_name)
|
||||||
|
|
||||||
|
# Remove any legacy config entries if they exist
|
||||||
|
current_config = client.get_current_config()
|
||||||
|
config_updates = {}
|
||||||
|
if f"mcp.package.{package_name}.enabled" in current_config:
|
||||||
|
config_updates = {f"mcp.package.{package_name}.enabled": None} # Set to None to remove the entry
|
||||||
|
client.update_config(config_updates)
|
||||||
|
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error uninstalling package: {e}")
|
||||||
|
return False
|
||||||
136
src/apm_cli/core/safe_installer.py
Normal file
136
src/apm_cli/core/safe_installer.py
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
"""Safe MCP server installation with conflict detection."""
|
||||||
|
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from ..factory import ClientFactory
|
||||||
|
from .conflict_detector import MCPConflictDetector
|
||||||
|
from ..utils.console import _rich_warning, _rich_success, _rich_error, _rich_info
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InstallationSummary:
|
||||||
|
"""Summary of MCP server installation results."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.installed = []
|
||||||
|
self.skipped = []
|
||||||
|
self.failed = []
|
||||||
|
|
||||||
|
def add_installed(self, server_ref: str):
|
||||||
|
"""Add a server to the installed list."""
|
||||||
|
self.installed.append(server_ref)
|
||||||
|
|
||||||
|
def add_skipped(self, server_ref: str, reason: str):
|
||||||
|
"""Add a server to the skipped list."""
|
||||||
|
self.skipped.append({"server": server_ref, "reason": reason})
|
||||||
|
|
||||||
|
def add_failed(self, server_ref: str, reason: str):
|
||||||
|
"""Add a server to the failed list."""
|
||||||
|
self.failed.append({"server": server_ref, "reason": reason})
|
||||||
|
|
||||||
|
def has_any_changes(self) -> bool:
|
||||||
|
"""Check if any installations or failures occurred."""
|
||||||
|
return len(self.installed) > 0 or len(self.failed) > 0
|
||||||
|
|
||||||
|
def log_summary(self):
|
||||||
|
"""Log a summary of installation results."""
|
||||||
|
if self.installed:
|
||||||
|
_rich_success(f"✅ Installed: {', '.join(self.installed)}")
|
||||||
|
|
||||||
|
if self.skipped:
|
||||||
|
for item in self.skipped:
|
||||||
|
_rich_warning(f"⚠️ Skipped {item['server']}: {item['reason']}")
|
||||||
|
|
||||||
|
if self.failed:
|
||||||
|
for item in self.failed:
|
||||||
|
_rich_error(f"❌ Failed {item['server']}: {item['reason']}")
|
||||||
|
|
||||||
|
|
||||||
|
class SafeMCPInstaller:
|
||||||
|
"""Safe MCP server installation with conflict detection."""
|
||||||
|
|
||||||
|
def __init__(self, runtime: str):
|
||||||
|
"""Initialize the safe installer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runtime: Target runtime (copilot, codex, vscode).
|
||||||
|
"""
|
||||||
|
self.runtime = runtime
|
||||||
|
self.adapter = ClientFactory.create_client(runtime)
|
||||||
|
self.conflict_detector = MCPConflictDetector(self.adapter)
|
||||||
|
|
||||||
|
def install_servers(self, server_references: List[str], env_overrides: Dict[str, str] = None, server_info_cache: Dict[str, Any] = None, runtime_vars: Dict[str, str] = None) -> InstallationSummary:
|
||||||
|
"""Install MCP servers with conflict detection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_references: List of server references to install.
|
||||||
|
env_overrides: Optional dictionary of environment variable overrides.
|
||||||
|
server_info_cache: Optional pre-fetched server info to avoid duplicate registry calls.
|
||||||
|
runtime_vars: Optional dictionary of runtime variable values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
InstallationSummary with detailed results.
|
||||||
|
"""
|
||||||
|
summary = InstallationSummary()
|
||||||
|
|
||||||
|
for server_ref in server_references:
|
||||||
|
if self.conflict_detector.check_server_exists(server_ref):
|
||||||
|
summary.add_skipped(server_ref, "already configured")
|
||||||
|
self._log_skip(server_ref)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Pass environment overrides, server info cache, and runtime variables if provided
|
||||||
|
kwargs = {}
|
||||||
|
if env_overrides is not None:
|
||||||
|
kwargs['env_overrides'] = env_overrides
|
||||||
|
if server_info_cache is not None:
|
||||||
|
kwargs['server_info_cache'] = server_info_cache
|
||||||
|
if runtime_vars is not None:
|
||||||
|
kwargs['runtime_vars'] = runtime_vars
|
||||||
|
|
||||||
|
result = self.adapter.configure_mcp_server(server_ref, **kwargs)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
summary.add_installed(server_ref)
|
||||||
|
self._log_success(server_ref)
|
||||||
|
else:
|
||||||
|
summary.add_failed(server_ref, "configuration failed")
|
||||||
|
self._log_failure(server_ref)
|
||||||
|
except Exception as e:
|
||||||
|
summary.add_failed(server_ref, str(e))
|
||||||
|
self._log_error(server_ref, e)
|
||||||
|
|
||||||
|
return summary
|
||||||
|
|
||||||
|
def _log_skip(self, server_ref: str):
|
||||||
|
"""Log when a server is skipped due to existing configuration."""
|
||||||
|
_rich_warning(f" {server_ref} already configured, skipping")
|
||||||
|
|
||||||
|
def _log_success(self, server_ref: str):
|
||||||
|
"""Log successful server installation."""
|
||||||
|
_rich_success(f" ✓ {server_ref}")
|
||||||
|
|
||||||
|
def _log_failure(self, server_ref: str):
|
||||||
|
"""Log failed server installation."""
|
||||||
|
_rich_warning(f" ✗ {server_ref} installation failed")
|
||||||
|
|
||||||
|
def _log_error(self, server_ref: str, error: Exception):
|
||||||
|
"""Log error during server installation."""
|
||||||
|
_rich_error(f" ✗ {server_ref}: {error}")
|
||||||
|
|
||||||
|
def check_conflicts_only(self, server_references: List[str]) -> Dict[str, Any]:
|
||||||
|
"""Check for conflicts without installing.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_references: List of server references to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with conflict information for each server.
|
||||||
|
"""
|
||||||
|
conflicts = {}
|
||||||
|
|
||||||
|
for server_ref in server_references:
|
||||||
|
conflicts[server_ref] = self.conflict_detector.get_conflict_summary(server_ref)
|
||||||
|
|
||||||
|
return conflicts
|
||||||
500
src/apm_cli/core/script_runner.py
Normal file
500
src/apm_cli/core/script_runner.py
Normal file
@@ -0,0 +1,500 @@
|
|||||||
|
"""Script runner for APM NPM-like script execution."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import yaml
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
from .token_manager import setup_runtime_environment
|
||||||
|
from ..output.script_formatters import ScriptExecutionFormatter
|
||||||
|
|
||||||
|
|
||||||
|
class ScriptRunner:
|
||||||
|
"""Executes APM scripts with auto-compilation of .prompt.md files."""
|
||||||
|
|
||||||
|
def __init__(self, compiler=None, use_color: bool = True):
|
||||||
|
"""Initialize script runner with optional compiler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
compiler: Optional prompt compiler instance
|
||||||
|
use_color: Whether to use colored output
|
||||||
|
"""
|
||||||
|
self.compiler = compiler or PromptCompiler()
|
||||||
|
self.formatter = ScriptExecutionFormatter(use_color=use_color)
|
||||||
|
|
||||||
|
def run_script(self, script_name: str, params: Dict[str, str]) -> bool:
|
||||||
|
"""Run a script from apm.yml with parameter substitution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
script_name: Name of the script to run
|
||||||
|
params: Parameters for compilation and script execution
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if script executed successfully
|
||||||
|
"""
|
||||||
|
# Display script execution header
|
||||||
|
header_lines = self.formatter.format_script_header(script_name, params)
|
||||||
|
for line in header_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
# Load apm.yml configuration
|
||||||
|
config = self._load_config()
|
||||||
|
if not config:
|
||||||
|
raise RuntimeError("No apm.yml found in current directory")
|
||||||
|
|
||||||
|
scripts = config.get('scripts', {})
|
||||||
|
if script_name not in scripts:
|
||||||
|
available = ', '.join(scripts.keys()) if scripts else 'none'
|
||||||
|
raise RuntimeError(f"Script '{script_name}' not found. Available scripts: {available}")
|
||||||
|
|
||||||
|
# Get the script command
|
||||||
|
command = scripts[script_name]
|
||||||
|
|
||||||
|
# Auto-compile any .prompt.md files in the command
|
||||||
|
compiled_command, compiled_prompt_files, runtime_content = self._auto_compile_prompts(command, params)
|
||||||
|
|
||||||
|
# Show compilation progress if needed
|
||||||
|
if compiled_prompt_files:
|
||||||
|
compilation_lines = self.formatter.format_compilation_progress(compiled_prompt_files)
|
||||||
|
for line in compilation_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
# Detect runtime and show execution details
|
||||||
|
runtime = self._detect_runtime(compiled_command)
|
||||||
|
|
||||||
|
# Execute the final command
|
||||||
|
if runtime_content is not None:
|
||||||
|
# Show runtime execution details
|
||||||
|
execution_lines = self.formatter.format_runtime_execution(
|
||||||
|
runtime, compiled_command, len(runtime_content)
|
||||||
|
)
|
||||||
|
for line in execution_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
# Show content preview
|
||||||
|
preview_lines = self.formatter.format_content_preview(runtime_content)
|
||||||
|
for line in preview_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Set up GitHub token environment for all runtimes using centralized manager
|
||||||
|
env = setup_runtime_environment(os.environ.copy())
|
||||||
|
|
||||||
|
# Show environment setup if relevant
|
||||||
|
env_vars_set = []
|
||||||
|
if 'GITHUB_TOKEN' in env and env['GITHUB_TOKEN']:
|
||||||
|
env_vars_set.append('GITHUB_TOKEN')
|
||||||
|
if 'GITHUB_APM_PAT' in env and env['GITHUB_APM_PAT']:
|
||||||
|
env_vars_set.append('GITHUB_APM_PAT')
|
||||||
|
|
||||||
|
if env_vars_set:
|
||||||
|
env_lines = self.formatter.format_environment_setup(runtime, env_vars_set)
|
||||||
|
for line in env_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
# Track execution time
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
# Check if this command needs subprocess execution (has compiled content)
|
||||||
|
if runtime_content is not None:
|
||||||
|
# Use argument list approach for all runtimes to avoid shell parsing issues
|
||||||
|
result = self._execute_runtime_command(compiled_command, runtime_content, env)
|
||||||
|
else:
|
||||||
|
# Use regular shell execution for other commands
|
||||||
|
result = subprocess.run(compiled_command, shell=True, check=True, env=env)
|
||||||
|
|
||||||
|
execution_time = time.time() - start_time
|
||||||
|
|
||||||
|
# Show success message
|
||||||
|
success_lines = self.formatter.format_execution_success(runtime, execution_time)
|
||||||
|
for line in success_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
return result.returncode == 0
|
||||||
|
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
execution_time = time.time() - start_time
|
||||||
|
|
||||||
|
# Show error message
|
||||||
|
error_lines = self.formatter.format_execution_error(runtime, e.returncode)
|
||||||
|
for line in error_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
raise RuntimeError(f"Script execution failed with exit code {e.returncode}")
|
||||||
|
|
||||||
|
def list_scripts(self) -> Dict[str, str]:
|
||||||
|
"""List all available scripts from apm.yml.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict mapping script names to their commands
|
||||||
|
"""
|
||||||
|
config = self._load_config()
|
||||||
|
return config.get('scripts', {}) if config else {}
|
||||||
|
|
||||||
|
def _load_config(self) -> Optional[Dict]:
|
||||||
|
"""Load apm.yml from current directory."""
|
||||||
|
config_path = Path('apm.yml')
|
||||||
|
if not config_path.exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
|
||||||
|
def _auto_compile_prompts(self, command: str, params: Dict[str, str]) -> tuple[str, list[str], str]:
|
||||||
|
"""Auto-compile .prompt.md files and transform runtime commands.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command: Original script command
|
||||||
|
params: Parameters for compilation
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (compiled_command, list_of_compiled_prompt_files, runtime_content_or_none)
|
||||||
|
"""
|
||||||
|
# Find all .prompt.md files in the command using regex
|
||||||
|
prompt_files = re.findall(r'(\S+\.prompt\.md)', command)
|
||||||
|
compiled_prompt_files = []
|
||||||
|
runtime_content = None
|
||||||
|
|
||||||
|
compiled_command = command
|
||||||
|
for prompt_file in prompt_files:
|
||||||
|
# Compile the prompt file with current params
|
||||||
|
compiled_path = self.compiler.compile(prompt_file, params)
|
||||||
|
compiled_prompt_files.append(prompt_file)
|
||||||
|
|
||||||
|
# Read the compiled content
|
||||||
|
with open(compiled_path, 'r') as f:
|
||||||
|
compiled_content = f.read().strip()
|
||||||
|
|
||||||
|
# Check if this is a runtime command (copilot, codex, llm) before transformation
|
||||||
|
is_runtime_cmd = any(runtime in command for runtime in ['copilot', 'codex', 'llm']) and re.search(re.escape(prompt_file), command)
|
||||||
|
|
||||||
|
# Transform command based on runtime pattern
|
||||||
|
compiled_command = self._transform_runtime_command(
|
||||||
|
compiled_command, prompt_file, compiled_content, compiled_path
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store content for runtime commands that need subprocess execution
|
||||||
|
if is_runtime_cmd:
|
||||||
|
runtime_content = compiled_content
|
||||||
|
|
||||||
|
return compiled_command, compiled_prompt_files, runtime_content
|
||||||
|
|
||||||
|
def _transform_runtime_command(self, command: str, prompt_file: str,
|
||||||
|
compiled_content: str, compiled_path: str) -> str:
|
||||||
|
"""Transform runtime commands to their proper execution format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command: Original command
|
||||||
|
prompt_file: Original .prompt.md file path
|
||||||
|
compiled_content: Compiled prompt content as string
|
||||||
|
compiled_path: Path to compiled .txt file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Transformed command for proper runtime execution
|
||||||
|
"""
|
||||||
|
# Handle environment variables prefix (e.g., "ENV1=val1 ENV2=val2 codex [args] file.prompt.md")
|
||||||
|
# More robust approach: split by runtime commands to separate env vars from command
|
||||||
|
runtime_commands = ['codex', 'copilot', 'llm']
|
||||||
|
|
||||||
|
for runtime_cmd in runtime_commands:
|
||||||
|
runtime_pattern = f' {runtime_cmd} '
|
||||||
|
if runtime_pattern in command and re.search(re.escape(prompt_file), command):
|
||||||
|
parts = command.split(runtime_pattern, 1)
|
||||||
|
potential_env_part = parts[0]
|
||||||
|
runtime_part = runtime_cmd + ' ' + parts[1]
|
||||||
|
|
||||||
|
# Check if the first part looks like environment variables (has = signs)
|
||||||
|
if '=' in potential_env_part and not potential_env_part.startswith(runtime_cmd):
|
||||||
|
env_vars = potential_env_part
|
||||||
|
|
||||||
|
# Extract arguments before and after the prompt file from runtime part
|
||||||
|
runtime_match = re.search(f'{runtime_cmd}\\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', runtime_part)
|
||||||
|
if runtime_match:
|
||||||
|
args_before_file = runtime_match.group(1).strip()
|
||||||
|
args_after_file = runtime_match.group(3).strip()
|
||||||
|
|
||||||
|
# Build the command based on runtime
|
||||||
|
if runtime_cmd == 'codex':
|
||||||
|
if args_before_file:
|
||||||
|
result = f"{env_vars} codex exec {args_before_file}"
|
||||||
|
else:
|
||||||
|
result = f"{env_vars} codex exec"
|
||||||
|
else:
|
||||||
|
# For copilot and llm, keep the runtime name and args
|
||||||
|
result = f"{env_vars} {runtime_cmd}"
|
||||||
|
if args_before_file:
|
||||||
|
# Remove any existing -p flag since we'll handle it in execution
|
||||||
|
cleaned_args = args_before_file.replace('-p', '').strip()
|
||||||
|
if cleaned_args:
|
||||||
|
result += f" {cleaned_args}"
|
||||||
|
|
||||||
|
if args_after_file:
|
||||||
|
result += f" {args_after_file}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Handle individual runtime patterns without environment variables
|
||||||
|
|
||||||
|
# Handle "codex [args] file.prompt.md [more_args]" -> "codex exec [args] [more_args]"
|
||||||
|
if re.search(r'codex\s+.*' + re.escape(prompt_file), command):
|
||||||
|
match = re.search(r'codex\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', command)
|
||||||
|
if match:
|
||||||
|
args_before_file = match.group(1).strip()
|
||||||
|
args_after_file = match.group(3).strip()
|
||||||
|
|
||||||
|
result = "codex exec"
|
||||||
|
if args_before_file:
|
||||||
|
result += f" {args_before_file}"
|
||||||
|
if args_after_file:
|
||||||
|
result += f" {args_after_file}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Handle "copilot [args] file.prompt.md [more_args]" -> "copilot [args] [more_args]"
|
||||||
|
elif re.search(r'copilot\s+.*' + re.escape(prompt_file), command):
|
||||||
|
match = re.search(r'copilot\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', command)
|
||||||
|
if match:
|
||||||
|
args_before_file = match.group(1).strip()
|
||||||
|
args_after_file = match.group(3).strip()
|
||||||
|
|
||||||
|
result = "copilot"
|
||||||
|
if args_before_file:
|
||||||
|
# Remove any existing -p flag since we'll handle it in execution
|
||||||
|
cleaned_args = args_before_file.replace('-p', '').strip()
|
||||||
|
if cleaned_args:
|
||||||
|
result += f" {cleaned_args}"
|
||||||
|
if args_after_file:
|
||||||
|
result += f" {args_after_file}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Handle "llm [args] file.prompt.md [more_args]" -> "llm [args] [more_args]"
|
||||||
|
elif re.search(r'llm\s+.*' + re.escape(prompt_file), command):
|
||||||
|
match = re.search(r'llm\s+(.*?)(' + re.escape(prompt_file) + r')(.*?)$', command)
|
||||||
|
if match:
|
||||||
|
args_before_file = match.group(1).strip()
|
||||||
|
args_after_file = match.group(3).strip()
|
||||||
|
|
||||||
|
result = "llm"
|
||||||
|
if args_before_file:
|
||||||
|
result += f" {args_before_file}"
|
||||||
|
if args_after_file:
|
||||||
|
result += f" {args_after_file}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Handle bare "file.prompt.md" -> "codex exec" (default to codex)
|
||||||
|
elif command.strip() == prompt_file:
|
||||||
|
return "codex exec"
|
||||||
|
|
||||||
|
# Fallback: just replace file path with compiled path (for non-runtime commands)
|
||||||
|
return command.replace(prompt_file, compiled_path)
|
||||||
|
|
||||||
|
def _detect_runtime(self, command: str) -> str:
|
||||||
|
"""Detect which runtime is being used in the command.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command: The command to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Name of the detected runtime (copilot, codex, llm, or unknown)
|
||||||
|
"""
|
||||||
|
command_lower = command.lower().strip()
|
||||||
|
if command_lower.startswith('copilot'):
|
||||||
|
return 'copilot'
|
||||||
|
elif command_lower.startswith('codex'):
|
||||||
|
return 'codex'
|
||||||
|
elif command_lower.startswith('llm'):
|
||||||
|
return 'llm'
|
||||||
|
else:
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def _execute_runtime_command(self, command: str, content: str, env: dict) -> subprocess.CompletedProcess:
|
||||||
|
"""Execute a runtime command using subprocess argument list to avoid shell parsing issues.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command: The simplified runtime command (without content)
|
||||||
|
content: The compiled prompt content to pass to the runtime
|
||||||
|
env: Environment variables
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
subprocess.CompletedProcess: The result of the command execution
|
||||||
|
"""
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
# Parse the command into arguments
|
||||||
|
args = shlex.split(command.strip())
|
||||||
|
|
||||||
|
# Handle environment variables at the beginning of the command
|
||||||
|
# Extract environment variables (key=value pairs) from the beginning of args
|
||||||
|
env_vars = env.copy() # Start with existing environment
|
||||||
|
actual_command_args = []
|
||||||
|
|
||||||
|
for arg in args:
|
||||||
|
if '=' in arg and not actual_command_args:
|
||||||
|
# This looks like an environment variable and we haven't started the actual command yet
|
||||||
|
key, value = arg.split('=', 1)
|
||||||
|
# Validate environment variable name with restrictive pattern
|
||||||
|
# Only allow uppercase letters, numbers, and underscores, starting with letter or underscore
|
||||||
|
if re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', key):
|
||||||
|
env_vars[key] = value
|
||||||
|
continue
|
||||||
|
# Once we hit a non-env-var argument, everything else is part of the command
|
||||||
|
actual_command_args.append(arg)
|
||||||
|
|
||||||
|
# Determine how to pass content based on runtime
|
||||||
|
runtime = self._detect_runtime(' '.join(actual_command_args))
|
||||||
|
|
||||||
|
if runtime == 'copilot':
|
||||||
|
# Copilot uses -p flag
|
||||||
|
actual_command_args.extend(["-p", content])
|
||||||
|
elif runtime == 'codex':
|
||||||
|
# Codex exec expects content as the last argument
|
||||||
|
actual_command_args.append(content)
|
||||||
|
elif runtime == 'llm':
|
||||||
|
# LLM expects content as argument
|
||||||
|
actual_command_args.append(content)
|
||||||
|
else:
|
||||||
|
# Default: assume content as last argument
|
||||||
|
actual_command_args.append(content)
|
||||||
|
|
||||||
|
# Show subprocess details for debugging
|
||||||
|
subprocess_lines = self.formatter.format_subprocess_details(actual_command_args[:-1], len(content))
|
||||||
|
for line in subprocess_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
# Show environment variables if any were extracted
|
||||||
|
if len(env_vars) > len(env):
|
||||||
|
extracted_env_vars = []
|
||||||
|
for key, value in env_vars.items():
|
||||||
|
if key not in env:
|
||||||
|
extracted_env_vars.append(f"{key}={value}")
|
||||||
|
if extracted_env_vars:
|
||||||
|
env_lines = self.formatter.format_environment_setup("command", extracted_env_vars)
|
||||||
|
for line in env_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
# Execute using argument list (no shell interpretation) with updated environment
|
||||||
|
return subprocess.run(actual_command_args, check=True, env=env_vars)
|
||||||
|
|
||||||
|
|
||||||
|
class PromptCompiler:
|
||||||
|
"""Compiles .prompt.md files with parameter substitution."""
|
||||||
|
|
||||||
|
DEFAULT_COMPILED_DIR = Path('.apm/compiled')
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize compiler."""
|
||||||
|
self.compiled_dir = self.DEFAULT_COMPILED_DIR
|
||||||
|
|
||||||
|
def compile(self, prompt_file: str, params: Dict[str, str]) -> str:
|
||||||
|
"""Compile a .prompt.md file with parameter substitution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt_file: Path to the .prompt.md file
|
||||||
|
params: Parameters to substitute
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to the compiled file
|
||||||
|
"""
|
||||||
|
# Resolve the prompt file path - check local first, then dependencies
|
||||||
|
prompt_path = self._resolve_prompt_file(prompt_file)
|
||||||
|
|
||||||
|
# Now ensure compiled directory exists
|
||||||
|
self.compiled_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
with open(prompt_path, 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Parse frontmatter and content
|
||||||
|
if content.startswith('---'):
|
||||||
|
# Split frontmatter and content
|
||||||
|
parts = content.split('---', 2)
|
||||||
|
if len(parts) >= 3:
|
||||||
|
frontmatter = parts[1].strip()
|
||||||
|
main_content = parts[2].strip()
|
||||||
|
else:
|
||||||
|
main_content = content
|
||||||
|
else:
|
||||||
|
main_content = content
|
||||||
|
|
||||||
|
# Substitute parameters in content
|
||||||
|
compiled_content = self._substitute_parameters(main_content, params)
|
||||||
|
|
||||||
|
# Generate output file path
|
||||||
|
output_name = prompt_path.stem.replace('.prompt', '') + '.txt'
|
||||||
|
output_path = self.compiled_dir / output_name
|
||||||
|
|
||||||
|
# Write compiled content
|
||||||
|
with open(output_path, 'w') as f:
|
||||||
|
f.write(compiled_content)
|
||||||
|
|
||||||
|
return str(output_path)
|
||||||
|
|
||||||
|
def _resolve_prompt_file(self, prompt_file: str) -> Path:
|
||||||
|
"""Resolve prompt file path, checking local directory first, then dependencies.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt_file: Relative path to the .prompt.md file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path: Resolved path to the prompt file
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If prompt file is not found in local or dependency modules
|
||||||
|
"""
|
||||||
|
prompt_path = Path(prompt_file)
|
||||||
|
|
||||||
|
# First check if it exists in current directory (local)
|
||||||
|
if prompt_path.exists():
|
||||||
|
return prompt_path
|
||||||
|
|
||||||
|
# If not found locally, search in dependency modules
|
||||||
|
apm_modules_dir = Path("apm_modules")
|
||||||
|
if apm_modules_dir.exists():
|
||||||
|
# Search all dependency directories for the prompt file
|
||||||
|
for dep_dir in apm_modules_dir.iterdir():
|
||||||
|
if dep_dir.is_dir():
|
||||||
|
# Check in the root of the dependency
|
||||||
|
dep_prompt_path = dep_dir / prompt_file
|
||||||
|
if dep_prompt_path.exists():
|
||||||
|
return dep_prompt_path
|
||||||
|
|
||||||
|
# Also check in common subdirectories
|
||||||
|
for subdir in ['prompts', '.', 'workflows']:
|
||||||
|
sub_prompt_path = dep_dir / subdir / prompt_file
|
||||||
|
if sub_prompt_path.exists():
|
||||||
|
return sub_prompt_path
|
||||||
|
|
||||||
|
# If still not found, raise an error with helpful message
|
||||||
|
searched_locations = [
|
||||||
|
f"Local: {prompt_path}",
|
||||||
|
]
|
||||||
|
|
||||||
|
if apm_modules_dir.exists():
|
||||||
|
searched_locations.append("Dependencies:")
|
||||||
|
for dep_dir in apm_modules_dir.iterdir():
|
||||||
|
if dep_dir.is_dir():
|
||||||
|
searched_locations.append(f" - {dep_dir.name}/{prompt_file}")
|
||||||
|
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"Prompt file '{prompt_file}' not found.\n"
|
||||||
|
f"Searched in:\n" + "\n".join(searched_locations) +
|
||||||
|
f"\n\nTip: Run 'apm install' to ensure dependencies are installed."
|
||||||
|
)
|
||||||
|
|
||||||
|
def _substitute_parameters(self, content: str, params: Dict[str, str]) -> str:
|
||||||
|
"""Substitute parameters in content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content: Content to process
|
||||||
|
params: Parameters to substitute
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Content with parameters substituted
|
||||||
|
"""
|
||||||
|
result = content
|
||||||
|
for key, value in params.items():
|
||||||
|
# Replace ${input:key} placeholders
|
||||||
|
placeholder = f"${{input:{key}}}"
|
||||||
|
result = result.replace(placeholder, str(value))
|
||||||
|
return result
|
||||||
197
src/apm_cli/core/token_manager.py
Normal file
197
src/apm_cli/core/token_manager.py
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
"""Centralized GitHub token management for different AI runtimes.
|
||||||
|
|
||||||
|
This module handles the complex token environment setup required by different
|
||||||
|
AI CLI tools, each of which expects different environment variable names for
|
||||||
|
GitHub authentication and API access.
|
||||||
|
|
||||||
|
Token Architecture:
|
||||||
|
- GITHUB_COPILOT_PAT: User-scoped PAT specifically for Copilot
|
||||||
|
- GITHUB_APM_PAT: Fine-grained PAT for APM module access
|
||||||
|
- GITHUB_TOKEN: User-scoped PAT for GitHub Models API access
|
||||||
|
- GITHUB_NPM_PAT: Classic PAT for GitHub npm registry access
|
||||||
|
|
||||||
|
Runtime Requirements:
|
||||||
|
- Codex CLI: Uses GITHUB_TOKEN (must be user-scoped for GitHub Models)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from typing import Dict, Optional, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
class GitHubTokenManager:
|
||||||
|
"""Manages GitHub token environment setup for different AI runtimes."""
|
||||||
|
|
||||||
|
# Define token precedence for different use cases
|
||||||
|
TOKEN_PRECEDENCE = {
|
||||||
|
'copilot': ['GITHUB_COPILOT_PAT', 'GITHUB_TOKEN', 'GITHUB_APM_PAT'],
|
||||||
|
'models': ['GITHUB_TOKEN'], # GitHub Models requires user-scoped PAT
|
||||||
|
'modules': ['GITHUB_APM_PAT', 'GITHUB_TOKEN'], # APM module access
|
||||||
|
'npm': ['GITHUB_NPM_PAT'] # npm registry access
|
||||||
|
}
|
||||||
|
|
||||||
|
# Runtime-specific environment variable mappings
|
||||||
|
RUNTIME_ENV_VARS = {
|
||||||
|
'copilot': ['GH_TOKEN', 'GITHUB_PERSONAL_ACCESS_TOKEN'],
|
||||||
|
'codex': ['GITHUB_TOKEN'], # Uses GITHUB_TOKEN directly
|
||||||
|
'llm': ['GITHUB_MODELS_KEY'], # LLM-specific variable for GitHub Models
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, preserve_existing: bool = True):
|
||||||
|
"""Initialize token manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
preserve_existing: If True, never overwrite existing environment variables
|
||||||
|
"""
|
||||||
|
self.preserve_existing = preserve_existing
|
||||||
|
|
||||||
|
def setup_environment(self, env: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
||||||
|
"""Set up complete token environment for all runtimes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
env: Environment dictionary to modify (defaults to os.environ.copy())
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated environment dictionary with all required tokens set
|
||||||
|
"""
|
||||||
|
if env is None:
|
||||||
|
env = os.environ.copy()
|
||||||
|
|
||||||
|
# Get available tokens
|
||||||
|
available_tokens = self._get_available_tokens(env)
|
||||||
|
|
||||||
|
# Set up tokens for each runtime without overwriting existing values
|
||||||
|
self._setup_copilot_tokens(env, available_tokens)
|
||||||
|
self._setup_codex_tokens(env, available_tokens)
|
||||||
|
self._setup_llm_tokens(env, available_tokens)
|
||||||
|
|
||||||
|
return env
|
||||||
|
|
||||||
|
def get_token_for_purpose(self, purpose: str, env: Optional[Dict[str, str]] = None) -> Optional[str]:
|
||||||
|
"""Get the best available token for a specific purpose.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
purpose: Token purpose ('copilot', 'models', 'modules', 'npm')
|
||||||
|
env: Environment to check (defaults to os.environ)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Best available token for the purpose, or None if not available
|
||||||
|
"""
|
||||||
|
if env is None:
|
||||||
|
env = os.environ
|
||||||
|
|
||||||
|
if purpose not in self.TOKEN_PRECEDENCE:
|
||||||
|
raise ValueError(f"Unknown purpose: {purpose}")
|
||||||
|
|
||||||
|
for token_var in self.TOKEN_PRECEDENCE[purpose]:
|
||||||
|
token = env.get(token_var)
|
||||||
|
if token:
|
||||||
|
return token
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_tokens(self, env: Optional[Dict[str, str]] = None) -> Tuple[bool, str]:
|
||||||
|
"""Validate that required tokens are available.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
env: Environment to check (defaults to os.environ)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (is_valid, error_message)
|
||||||
|
"""
|
||||||
|
if env is None:
|
||||||
|
env = os.environ
|
||||||
|
|
||||||
|
# Check for at least one valid token
|
||||||
|
has_any_token = any(
|
||||||
|
self.get_token_for_purpose(purpose, env)
|
||||||
|
for purpose in ['copilot', 'models', 'modules']
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_any_token:
|
||||||
|
return False, (
|
||||||
|
"No GitHub tokens found. Set one of:\n"
|
||||||
|
"- GITHUB_TOKEN (user-scoped PAT for GitHub Models)\n"
|
||||||
|
"- GITHUB_APM_PAT (fine-grained PAT for APM modules)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Warn about GitHub Models access if only fine-grained PAT is available
|
||||||
|
models_token = self.get_token_for_purpose('models', env)
|
||||||
|
if not models_token:
|
||||||
|
has_fine_grained = env.get('GITHUB_APM_PAT')
|
||||||
|
if has_fine_grained:
|
||||||
|
return True, (
|
||||||
|
"Warning: Only fine-grained PAT available. "
|
||||||
|
"GitHub Models requires GITHUB_TOKEN (user-scoped PAT)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True, "Token validation passed"
|
||||||
|
|
||||||
|
def _get_available_tokens(self, env: Dict[str, str]) -> Dict[str, str]:
|
||||||
|
"""Get all available GitHub tokens from environment."""
|
||||||
|
tokens = {}
|
||||||
|
for purpose, token_vars in self.TOKEN_PRECEDENCE.items():
|
||||||
|
for token_var in token_vars:
|
||||||
|
if token_var in env and env[token_var]:
|
||||||
|
tokens[token_var] = env[token_var]
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
def _setup_copilot_tokens(self, env: Dict[str, str], available_tokens: Dict[str, str]):
|
||||||
|
"""Set up tokens for Copilot."""
|
||||||
|
copilot_token = self.get_token_for_purpose('copilot', available_tokens)
|
||||||
|
if not copilot_token:
|
||||||
|
return
|
||||||
|
|
||||||
|
for env_var in self.RUNTIME_ENV_VARS['copilot']:
|
||||||
|
if self.preserve_existing and env_var in env:
|
||||||
|
continue
|
||||||
|
env[env_var] = copilot_token
|
||||||
|
|
||||||
|
def _setup_codex_tokens(self, env: Dict[str, str], available_tokens: Dict[str, str]):
|
||||||
|
"""Set up tokens for Codex CLI (preserve existing GITHUB_TOKEN)."""
|
||||||
|
# Codex uses GITHUB_TOKEN directly - only set if missing
|
||||||
|
if self.preserve_existing and 'GITHUB_TOKEN' in env:
|
||||||
|
return
|
||||||
|
|
||||||
|
models_token = self.get_token_for_purpose('models', available_tokens)
|
||||||
|
if models_token and 'GITHUB_TOKEN' not in env:
|
||||||
|
env['GITHUB_TOKEN'] = models_token
|
||||||
|
|
||||||
|
def _setup_llm_tokens(self, env: Dict[str, str], available_tokens: Dict[str, str]):
|
||||||
|
"""Set up tokens for LLM CLI."""
|
||||||
|
# LLM uses GITHUB_MODELS_KEY, prefer GITHUB_TOKEN if available
|
||||||
|
if self.preserve_existing and 'GITHUB_MODELS_KEY' in env:
|
||||||
|
return
|
||||||
|
|
||||||
|
models_token = self.get_token_for_purpose('models', available_tokens)
|
||||||
|
if models_token:
|
||||||
|
env['GITHUB_MODELS_KEY'] = models_token
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience functions for common use cases
|
||||||
|
def setup_runtime_environment(env: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
||||||
|
"""Set up complete runtime environment for all AI CLIs."""
|
||||||
|
manager = GitHubTokenManager()
|
||||||
|
return manager.setup_environment(env)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_github_tokens(env: Optional[Dict[str, str]] = None) -> Tuple[bool, str]:
|
||||||
|
"""Validate GitHub token setup."""
|
||||||
|
manager = GitHubTokenManager()
|
||||||
|
return manager.validate_tokens(env)
|
||||||
|
|
||||||
|
|
||||||
|
def get_github_token_for_runtime(runtime: str, env: Optional[Dict[str, str]] = None) -> Optional[str]:
|
||||||
|
"""Get the appropriate GitHub token for a specific runtime."""
|
||||||
|
manager = GitHubTokenManager()
|
||||||
|
|
||||||
|
# Map runtime names to purposes
|
||||||
|
runtime_to_purpose = {
|
||||||
|
'copilot': 'copilot',
|
||||||
|
'codex': 'models',
|
||||||
|
'llm': 'models',
|
||||||
|
}
|
||||||
|
|
||||||
|
purpose = runtime_to_purpose.get(runtime)
|
||||||
|
if not purpose:
|
||||||
|
raise ValueError(f"Unknown runtime: {runtime}")
|
||||||
|
|
||||||
|
return manager.get_token_for_purpose(purpose, env)
|
||||||
28
src/apm_cli/deps/__init__.py
Normal file
28
src/apm_cli/deps/__init__.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
"""Dependencies management package for APM-CLI."""
|
||||||
|
|
||||||
|
from .apm_resolver import APMDependencyResolver
|
||||||
|
from .dependency_graph import (
|
||||||
|
DependencyGraph, DependencyTree, DependencyNode, FlatDependencyMap,
|
||||||
|
CircularRef, ConflictInfo
|
||||||
|
)
|
||||||
|
from .aggregator import sync_workflow_dependencies, scan_workflows_for_dependencies
|
||||||
|
from .verifier import verify_dependencies, install_missing_dependencies, load_apm_config
|
||||||
|
from .github_downloader import GitHubPackageDownloader
|
||||||
|
from .package_validator import PackageValidator
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'sync_workflow_dependencies',
|
||||||
|
'scan_workflows_for_dependencies',
|
||||||
|
'verify_dependencies',
|
||||||
|
'install_missing_dependencies',
|
||||||
|
'load_apm_config',
|
||||||
|
'GitHubPackageDownloader',
|
||||||
|
'PackageValidator',
|
||||||
|
'DependencyGraph',
|
||||||
|
'DependencyTree',
|
||||||
|
'DependencyNode',
|
||||||
|
'FlatDependencyMap',
|
||||||
|
'CircularRef',
|
||||||
|
'ConflictInfo',
|
||||||
|
'APMDependencyResolver'
|
||||||
|
]
|
||||||
67
src/apm_cli/deps/aggregator.py
Normal file
67
src/apm_cli/deps/aggregator.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""Workflow dependency aggregator for APM-CLI."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from pathlib import Path
|
||||||
|
import yaml
|
||||||
|
import frontmatter
|
||||||
|
|
||||||
|
|
||||||
|
def scan_workflows_for_dependencies():
|
||||||
|
"""Scan all workflow files for MCP dependencies following VSCode's .github/prompts convention.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
set: A set of unique MCP server names from all workflows.
|
||||||
|
"""
|
||||||
|
# Support VSCode's .github/prompts convention with .prompt.md files
|
||||||
|
prompt_patterns = [
|
||||||
|
"**/.github/prompts/*.prompt.md", # VSCode convention: .github/prompts/
|
||||||
|
"**/*.prompt.md" # Generic .prompt.md files
|
||||||
|
]
|
||||||
|
|
||||||
|
workflows = []
|
||||||
|
for pattern in prompt_patterns:
|
||||||
|
workflows.extend(glob.glob(pattern, recursive=True))
|
||||||
|
|
||||||
|
# Remove duplicates
|
||||||
|
workflows = list(set(workflows))
|
||||||
|
|
||||||
|
all_servers = set()
|
||||||
|
|
||||||
|
for workflow_file in workflows:
|
||||||
|
try:
|
||||||
|
with open(workflow_file, 'r', encoding='utf-8') as f:
|
||||||
|
content = frontmatter.load(f)
|
||||||
|
if 'mcp' in content.metadata and isinstance(content.metadata['mcp'], list):
|
||||||
|
all_servers.update(content.metadata['mcp'])
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {workflow_file}: {e}")
|
||||||
|
|
||||||
|
return all_servers
|
||||||
|
|
||||||
|
|
||||||
|
def sync_workflow_dependencies(output_file="apm.yml"):
|
||||||
|
"""Extract all MCP servers from workflows into apm.yml.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
output_file (str, optional): Path to the output file. Defaults to "apm.yml".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (bool, list) - Success status and list of servers added
|
||||||
|
"""
|
||||||
|
all_servers = scan_workflows_for_dependencies()
|
||||||
|
|
||||||
|
# Prepare the configuration
|
||||||
|
apm_config = {
|
||||||
|
'version': '1.0',
|
||||||
|
'servers': sorted(list(all_servers))
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create the file
|
||||||
|
with open(output_file, 'w', encoding='utf-8') as f:
|
||||||
|
yaml.dump(apm_config, f, default_flow_style=False)
|
||||||
|
return True, apm_config['servers']
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error writing to {output_file}: {e}")
|
||||||
|
return False, []
|
||||||
362
src/apm_cli/deps/apm_resolver.py
Normal file
362
src/apm_cli/deps/apm_resolver.py
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
"""APM dependency resolution engine with recursive resolution and conflict detection."""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Set, Optional, Tuple
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
from ..models.apm_package import APMPackage, DependencyReference
|
||||||
|
from .dependency_graph import (
|
||||||
|
DependencyGraph, DependencyTree, DependencyNode, FlatDependencyMap,
|
||||||
|
CircularRef, ConflictInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class APMDependencyResolver:
|
||||||
|
"""Handles recursive APM dependency resolution similar to NPM."""
|
||||||
|
|
||||||
|
def __init__(self, max_depth: int = 50):
|
||||||
|
"""Initialize the resolver with maximum recursion depth."""
|
||||||
|
self.max_depth = max_depth
|
||||||
|
self._resolution_path = [] # For test compatibility
|
||||||
|
|
||||||
|
def resolve_dependencies(self, project_root: Path) -> DependencyGraph:
|
||||||
|
"""
|
||||||
|
Resolve all APM dependencies recursively.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_root: Path to the project root containing apm.yml
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DependencyGraph: Complete resolved dependency graph
|
||||||
|
"""
|
||||||
|
# Load the root package
|
||||||
|
apm_yml_path = project_root / "apm.yml"
|
||||||
|
if not apm_yml_path.exists():
|
||||||
|
# Create empty dependency graph for projects without apm.yml
|
||||||
|
empty_package = APMPackage(name="unknown", version="0.0.0", package_path=project_root)
|
||||||
|
empty_tree = DependencyTree(root_package=empty_package)
|
||||||
|
empty_flat = FlatDependencyMap()
|
||||||
|
return DependencyGraph(
|
||||||
|
root_package=empty_package,
|
||||||
|
dependency_tree=empty_tree,
|
||||||
|
flattened_dependencies=empty_flat
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
root_package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
except (ValueError, FileNotFoundError) as e:
|
||||||
|
# Create error graph
|
||||||
|
empty_package = APMPackage(name="error", version="0.0.0", package_path=project_root)
|
||||||
|
empty_tree = DependencyTree(root_package=empty_package)
|
||||||
|
empty_flat = FlatDependencyMap()
|
||||||
|
graph = DependencyGraph(
|
||||||
|
root_package=empty_package,
|
||||||
|
dependency_tree=empty_tree,
|
||||||
|
flattened_dependencies=empty_flat
|
||||||
|
)
|
||||||
|
graph.add_error(f"Failed to load root apm.yml: {e}")
|
||||||
|
return graph
|
||||||
|
|
||||||
|
# Build the complete dependency tree
|
||||||
|
dependency_tree = self.build_dependency_tree(apm_yml_path)
|
||||||
|
|
||||||
|
# Detect circular dependencies
|
||||||
|
circular_deps = self.detect_circular_dependencies(dependency_tree)
|
||||||
|
|
||||||
|
# Flatten dependencies for installation
|
||||||
|
flattened_deps = self.flatten_dependencies(dependency_tree)
|
||||||
|
|
||||||
|
# Create and return the complete graph
|
||||||
|
graph = DependencyGraph(
|
||||||
|
root_package=root_package,
|
||||||
|
dependency_tree=dependency_tree,
|
||||||
|
flattened_dependencies=flattened_deps,
|
||||||
|
circular_dependencies=circular_deps
|
||||||
|
)
|
||||||
|
|
||||||
|
return graph
|
||||||
|
|
||||||
|
def build_dependency_tree(self, root_apm_yml: Path) -> DependencyTree:
|
||||||
|
"""
|
||||||
|
Build complete tree of all dependencies and sub-dependencies.
|
||||||
|
|
||||||
|
Uses breadth-first traversal to build the dependency tree level by level.
|
||||||
|
This allows for early conflict detection and clearer error reporting.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
root_apm_yml: Path to the root apm.yml file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DependencyTree: Hierarchical dependency tree
|
||||||
|
"""
|
||||||
|
# Load root package
|
||||||
|
try:
|
||||||
|
root_package = APMPackage.from_apm_yml(root_apm_yml)
|
||||||
|
except (ValueError, FileNotFoundError) as e:
|
||||||
|
# Return empty tree with error
|
||||||
|
empty_package = APMPackage(name="error", version="0.0.0")
|
||||||
|
tree = DependencyTree(root_package=empty_package)
|
||||||
|
return tree
|
||||||
|
|
||||||
|
# Initialize the tree
|
||||||
|
tree = DependencyTree(root_package=root_package)
|
||||||
|
|
||||||
|
# Queue for breadth-first traversal: (dependency_ref, depth, parent_node)
|
||||||
|
processing_queue: deque[Tuple[DependencyReference, int, Optional[DependencyNode]]] = deque()
|
||||||
|
|
||||||
|
# Set to track queued repo URLs for O(1) lookup instead of O(n) list comprehension
|
||||||
|
queued_repo_urls: Set[str] = set()
|
||||||
|
|
||||||
|
# Add root dependencies to queue
|
||||||
|
root_deps = root_package.get_apm_dependencies()
|
||||||
|
for dep_ref in root_deps:
|
||||||
|
processing_queue.append((dep_ref, 1, None))
|
||||||
|
queued_repo_urls.add(dep_ref.repo_url)
|
||||||
|
|
||||||
|
# Process dependencies breadth-first
|
||||||
|
while processing_queue:
|
||||||
|
dep_ref, depth, parent_node = processing_queue.popleft()
|
||||||
|
|
||||||
|
# Remove from queued set since we're now processing this dependency
|
||||||
|
queued_repo_urls.discard(dep_ref.repo_url)
|
||||||
|
|
||||||
|
# Check maximum depth to prevent infinite recursion
|
||||||
|
if depth > self.max_depth:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if we already processed this dependency at this level or higher
|
||||||
|
existing_node = tree.get_node(dep_ref.repo_url)
|
||||||
|
if existing_node and existing_node.depth <= depth:
|
||||||
|
# We've already processed this dependency at a shallower or equal depth
|
||||||
|
# Create parent-child relationship if parent exists
|
||||||
|
if parent_node and existing_node not in parent_node.children:
|
||||||
|
parent_node.children.append(existing_node)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Create a new node for this dependency
|
||||||
|
# Note: In a real implementation, we would load the actual package here
|
||||||
|
# For now, create a placeholder package
|
||||||
|
placeholder_package = APMPackage(
|
||||||
|
name=dep_ref.get_display_name(),
|
||||||
|
version="unknown",
|
||||||
|
source=dep_ref.repo_url
|
||||||
|
)
|
||||||
|
|
||||||
|
node = DependencyNode(
|
||||||
|
package=placeholder_package,
|
||||||
|
dependency_ref=dep_ref,
|
||||||
|
depth=depth,
|
||||||
|
parent=parent_node
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add to tree
|
||||||
|
tree.add_node(node)
|
||||||
|
|
||||||
|
# Create parent-child relationship
|
||||||
|
if parent_node:
|
||||||
|
parent_node.children.append(node)
|
||||||
|
|
||||||
|
# Try to load the dependency package and its dependencies
|
||||||
|
# For Task 3, this focuses on the resolution algorithm structure
|
||||||
|
# Package loading integration will be completed in Tasks 2 & 4
|
||||||
|
try:
|
||||||
|
# Attempt to load package - currently returns None (placeholder implementation)
|
||||||
|
# This will integrate with Task 2 (GitHub downloader) and Task 4 (apm_modules scanning)
|
||||||
|
loaded_package = self._try_load_dependency_package(dep_ref)
|
||||||
|
if loaded_package:
|
||||||
|
# Update the node with the actual loaded package
|
||||||
|
node.package = loaded_package
|
||||||
|
|
||||||
|
# Get sub-dependencies and add them to the processing queue
|
||||||
|
sub_dependencies = loaded_package.get_apm_dependencies()
|
||||||
|
for sub_dep in sub_dependencies:
|
||||||
|
# Avoid infinite recursion by checking if we're already processing this dep
|
||||||
|
# Use O(1) set lookup instead of O(n) list comprehension
|
||||||
|
if sub_dep.repo_url not in queued_repo_urls:
|
||||||
|
processing_queue.append((sub_dep, depth + 1, node))
|
||||||
|
queued_repo_urls.add(sub_dep.repo_url)
|
||||||
|
except (ValueError, FileNotFoundError) as e:
|
||||||
|
# Could not load dependency package - this is expected for remote dependencies
|
||||||
|
# The node already has a placeholder package, so continue with that
|
||||||
|
pass
|
||||||
|
|
||||||
|
return tree
|
||||||
|
|
||||||
|
def detect_circular_dependencies(self, tree: DependencyTree) -> List[CircularRef]:
|
||||||
|
"""
|
||||||
|
Detect and report circular dependency chains.
|
||||||
|
|
||||||
|
Uses depth-first search to detect cycles in the dependency graph.
|
||||||
|
A cycle is detected when we encounter the same repository URL
|
||||||
|
in our current traversal path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tree: The dependency tree to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[CircularRef]: List of detected circular dependencies
|
||||||
|
"""
|
||||||
|
circular_deps = []
|
||||||
|
visited: Set[str] = set()
|
||||||
|
current_path: List[str] = []
|
||||||
|
|
||||||
|
def dfs_detect_cycles(node: DependencyNode) -> None:
|
||||||
|
"""Recursive DFS function to detect cycles."""
|
||||||
|
node_id = node.get_id()
|
||||||
|
repo_url = node.dependency_ref.repo_url
|
||||||
|
|
||||||
|
# Check if this repo URL is already in our current path (cycle detected)
|
||||||
|
if repo_url in current_path:
|
||||||
|
# Found a cycle - create the cycle path
|
||||||
|
cycle_start_index = current_path.index(repo_url)
|
||||||
|
cycle_path = current_path[cycle_start_index:] + [repo_url]
|
||||||
|
|
||||||
|
circular_ref = CircularRef(
|
||||||
|
cycle_path=cycle_path,
|
||||||
|
detected_at_depth=node.depth
|
||||||
|
)
|
||||||
|
circular_deps.append(circular_ref)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Mark current node as visited and add repo URL to path
|
||||||
|
visited.add(node_id)
|
||||||
|
current_path.append(repo_url)
|
||||||
|
|
||||||
|
# Check all children
|
||||||
|
for child in node.children:
|
||||||
|
child_id = child.get_id()
|
||||||
|
|
||||||
|
# Only recurse if we haven't processed this subtree completely
|
||||||
|
if child_id not in visited or child.dependency_ref.repo_url in current_path:
|
||||||
|
dfs_detect_cycles(child)
|
||||||
|
|
||||||
|
# Remove from path when backtracking (but keep in visited)
|
||||||
|
current_path.pop()
|
||||||
|
|
||||||
|
# Start DFS from all root level dependencies (depth 1)
|
||||||
|
root_deps = tree.get_nodes_at_depth(1)
|
||||||
|
for root_dep in root_deps:
|
||||||
|
if root_dep.get_id() not in visited:
|
||||||
|
current_path = [] # Reset path for each root
|
||||||
|
dfs_detect_cycles(root_dep)
|
||||||
|
|
||||||
|
return circular_deps
|
||||||
|
|
||||||
|
def flatten_dependencies(self, tree: DependencyTree) -> FlatDependencyMap:
|
||||||
|
"""
|
||||||
|
Flatten tree to avoid duplicate installations (NPM hoisting).
|
||||||
|
|
||||||
|
Implements "first wins" conflict resolution strategy where the first
|
||||||
|
declared dependency takes precedence over later conflicting dependencies.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tree: The dependency tree to flatten
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
FlatDependencyMap: Flattened dependencies ready for installation
|
||||||
|
"""
|
||||||
|
flat_map = FlatDependencyMap()
|
||||||
|
seen_repos: Set[str] = set()
|
||||||
|
|
||||||
|
# Process dependencies level by level (breadth-first)
|
||||||
|
# This ensures that dependencies declared earlier in the tree get priority
|
||||||
|
for depth in range(1, tree.max_depth + 1):
|
||||||
|
nodes_at_depth = tree.get_nodes_at_depth(depth)
|
||||||
|
|
||||||
|
# Sort nodes by their position in the tree to ensure deterministic ordering
|
||||||
|
# In a real implementation, this would be based on declaration order
|
||||||
|
nodes_at_depth.sort(key=lambda node: node.get_id())
|
||||||
|
|
||||||
|
for node in nodes_at_depth:
|
||||||
|
repo_url = node.dependency_ref.repo_url
|
||||||
|
|
||||||
|
if repo_url not in seen_repos:
|
||||||
|
# First occurrence - add without conflict
|
||||||
|
flat_map.add_dependency(node.dependency_ref, is_conflict=False)
|
||||||
|
seen_repos.add(repo_url)
|
||||||
|
else:
|
||||||
|
# Conflict - record it but keep the first one
|
||||||
|
flat_map.add_dependency(node.dependency_ref, is_conflict=True)
|
||||||
|
|
||||||
|
return flat_map
|
||||||
|
|
||||||
|
def _validate_dependency_reference(self, dep_ref: DependencyReference) -> bool:
|
||||||
|
"""
|
||||||
|
Validate that a dependency reference is well-formed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dep_ref: The dependency reference to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if valid, False otherwise
|
||||||
|
"""
|
||||||
|
if not dep_ref.repo_url:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Basic validation - in real implementation would be more thorough
|
||||||
|
if '/' not in dep_ref.repo_url:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _try_load_dependency_package(self, dep_ref: DependencyReference) -> Optional[APMPackage]:
|
||||||
|
"""
|
||||||
|
Try to load a dependency package from local paths.
|
||||||
|
|
||||||
|
This is a placeholder implementation for Task 3 (dependency resolution algorithm).
|
||||||
|
The actual package loading from apm_modules/ will be implemented in Task 4
|
||||||
|
(Enhanced Primitive Discovery System) and Task 2 (GitHub Package Downloader).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dep_ref: Reference to the dependency to load
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
APMPackage: Loaded package if found, None otherwise
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If package exists but has invalid format
|
||||||
|
FileNotFoundError: If package cannot be found
|
||||||
|
"""
|
||||||
|
# For Task 3 (dependency resolution), we focus on the algorithm logic
|
||||||
|
# without implementing specific file system scanning which belongs to Task 4
|
||||||
|
#
|
||||||
|
# In the final implementation:
|
||||||
|
# - Task 2 will handle downloading packages from GitHub repositories
|
||||||
|
# - Task 4 will handle scanning apm_modules/ directory structure
|
||||||
|
# - This method will integrate with both systems
|
||||||
|
|
||||||
|
# For now, return None to indicate package not found locally
|
||||||
|
# This allows the resolution algorithm to create placeholder nodes
|
||||||
|
# and continue with dependency graph construction
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _create_resolution_summary(self, graph: DependencyGraph) -> str:
|
||||||
|
"""
|
||||||
|
Create a human-readable summary of the resolution results.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
graph: The resolved dependency graph
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Summary string
|
||||||
|
"""
|
||||||
|
summary = graph.get_summary()
|
||||||
|
lines = [
|
||||||
|
f"Dependency Resolution Summary:",
|
||||||
|
f" Root package: {summary['root_package']}",
|
||||||
|
f" Total dependencies: {summary['total_dependencies']}",
|
||||||
|
f" Maximum depth: {summary['max_depth']}",
|
||||||
|
]
|
||||||
|
|
||||||
|
if summary['has_conflicts']:
|
||||||
|
lines.append(f" Conflicts detected: {summary['conflict_count']}")
|
||||||
|
|
||||||
|
if summary['has_circular_dependencies']:
|
||||||
|
lines.append(f" Circular dependencies: {summary['circular_count']}")
|
||||||
|
|
||||||
|
if summary['has_errors']:
|
||||||
|
lines.append(f" Resolution errors: {summary['error_count']}")
|
||||||
|
|
||||||
|
lines.append(f" Status: {'✅ Valid' if summary['is_valid'] else '❌ Invalid'}")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
187
src/apm_cli/deps/dependency_graph.py
Normal file
187
src/apm_cli/deps/dependency_graph.py
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
"""Data structures for dependency graph representation and resolution."""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Set, Tuple, Any
|
||||||
|
from ..models.apm_package import APMPackage, DependencyReference
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DependencyNode:
|
||||||
|
"""Represents a single dependency node in the dependency graph."""
|
||||||
|
package: APMPackage
|
||||||
|
dependency_ref: DependencyReference
|
||||||
|
depth: int = 0
|
||||||
|
children: List['DependencyNode'] = field(default_factory=list)
|
||||||
|
parent: Optional['DependencyNode'] = None
|
||||||
|
|
||||||
|
def get_id(self) -> str:
|
||||||
|
"""Get unique identifier for this node."""
|
||||||
|
# Include reference to distinguish between different versions/branches of same repo
|
||||||
|
if self.dependency_ref.reference:
|
||||||
|
return f"{self.dependency_ref.repo_url}#{self.dependency_ref.reference}"
|
||||||
|
return self.dependency_ref.repo_url
|
||||||
|
|
||||||
|
def get_display_name(self) -> str:
|
||||||
|
"""Get display name for this dependency."""
|
||||||
|
return self.dependency_ref.get_display_name()
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CircularRef:
|
||||||
|
"""Represents a circular dependency reference."""
|
||||||
|
cycle_path: List[str] # List of repo URLs forming the cycle
|
||||||
|
detected_at_depth: int
|
||||||
|
|
||||||
|
def _format_complete_cycle(self) -> str:
|
||||||
|
"""
|
||||||
|
Return a string representation of the cycle, ensuring it is visually complete.
|
||||||
|
If the cycle path does not end at the starting node, append the start to the end.
|
||||||
|
"""
|
||||||
|
if not self.cycle_path:
|
||||||
|
return "(empty path)"
|
||||||
|
cycle_display = " -> ".join(self.cycle_path)
|
||||||
|
# Ensure the cycle visually returns to the start node
|
||||||
|
if len(self.cycle_path) > 1 and self.cycle_path[0] != self.cycle_path[-1]:
|
||||||
|
cycle_display += f" -> {self.cycle_path[0]}"
|
||||||
|
return cycle_display
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""String representation of the circular dependency."""
|
||||||
|
return f"Circular dependency detected: {self._format_complete_cycle()}"
|
||||||
|
@dataclass
|
||||||
|
class DependencyTree:
|
||||||
|
"""Hierarchical representation of dependencies before flattening."""
|
||||||
|
root_package: APMPackage
|
||||||
|
nodes: Dict[str, DependencyNode] = field(default_factory=dict)
|
||||||
|
max_depth: int = 0
|
||||||
|
|
||||||
|
def add_node(self, node: DependencyNode) -> None:
|
||||||
|
"""Add a node to the tree."""
|
||||||
|
self.nodes[node.get_id()] = node
|
||||||
|
self.max_depth = max(self.max_depth, node.depth)
|
||||||
|
|
||||||
|
def get_node(self, repo_url: str) -> Optional[DependencyNode]:
|
||||||
|
"""Get a node by its repository URL."""
|
||||||
|
return self.nodes.get(repo_url)
|
||||||
|
|
||||||
|
def get_nodes_at_depth(self, depth: int) -> List[DependencyNode]:
|
||||||
|
"""Get all nodes at a specific depth level."""
|
||||||
|
return [node for node in self.nodes.values() if node.depth == depth]
|
||||||
|
|
||||||
|
def has_dependency(self, repo_url: str) -> bool:
|
||||||
|
"""Check if a dependency exists in the tree."""
|
||||||
|
# Check by repo URL, not by full node ID (which may include reference)
|
||||||
|
return any(node.dependency_ref.repo_url == repo_url for node in self.nodes.values())
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConflictInfo:
|
||||||
|
"""Information about a dependency conflict."""
|
||||||
|
repo_url: str
|
||||||
|
winner: DependencyReference # The dependency that "wins"
|
||||||
|
conflicts: List[DependencyReference] # All conflicting dependencies
|
||||||
|
reason: str # Explanation of why winner was chosen
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""String representation of the conflict."""
|
||||||
|
conflict_refs = [str(ref) for ref in self.conflicts]
|
||||||
|
return f"Conflict for {self.repo_url}: {str(self.winner)} wins over {', '.join(conflict_refs)} ({self.reason})"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FlatDependencyMap:
|
||||||
|
"""Final flattened dependency mapping ready for installation."""
|
||||||
|
dependencies: Dict[str, DependencyReference] = field(default_factory=dict)
|
||||||
|
conflicts: List[ConflictInfo] = field(default_factory=list)
|
||||||
|
install_order: List[str] = field(default_factory=list) # Order for installation
|
||||||
|
|
||||||
|
def add_dependency(self, dep_ref: DependencyReference, is_conflict: bool = False) -> None:
|
||||||
|
"""Add a dependency to the flat map."""
|
||||||
|
repo_url = dep_ref.repo_url
|
||||||
|
|
||||||
|
# If this is the first occurrence, just add it
|
||||||
|
if repo_url not in self.dependencies:
|
||||||
|
self.dependencies[repo_url] = dep_ref
|
||||||
|
self.install_order.append(repo_url)
|
||||||
|
elif is_conflict:
|
||||||
|
# Record the conflict but keep the first one (first wins strategy)
|
||||||
|
existing_ref = self.dependencies[repo_url]
|
||||||
|
conflict = ConflictInfo(
|
||||||
|
repo_url=repo_url,
|
||||||
|
winner=existing_ref,
|
||||||
|
conflicts=[dep_ref],
|
||||||
|
reason="first declared dependency wins"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if we already have a conflict for this repo
|
||||||
|
existing_conflict = next((c for c in self.conflicts if c.repo_url == repo_url), None)
|
||||||
|
if existing_conflict:
|
||||||
|
existing_conflict.conflicts.append(dep_ref)
|
||||||
|
else:
|
||||||
|
self.conflicts.append(conflict)
|
||||||
|
|
||||||
|
def get_dependency(self, repo_url: str) -> Optional[DependencyReference]:
|
||||||
|
"""Get a dependency by repository URL."""
|
||||||
|
return self.dependencies.get(repo_url)
|
||||||
|
|
||||||
|
def has_conflicts(self) -> bool:
|
||||||
|
"""Check if there are any conflicts in the flattened map."""
|
||||||
|
return bool(self.conflicts)
|
||||||
|
|
||||||
|
def total_dependencies(self) -> int:
|
||||||
|
"""Get total number of unique dependencies."""
|
||||||
|
return len(self.dependencies)
|
||||||
|
|
||||||
|
def get_installation_list(self) -> List[DependencyReference]:
|
||||||
|
"""Get dependencies in installation order."""
|
||||||
|
return [self.dependencies[repo_url] for repo_url in self.install_order if repo_url in self.dependencies]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DependencyGraph:
|
||||||
|
"""Complete resolved dependency information."""
|
||||||
|
root_package: APMPackage
|
||||||
|
dependency_tree: DependencyTree
|
||||||
|
flattened_dependencies: FlatDependencyMap
|
||||||
|
circular_dependencies: List[CircularRef] = field(default_factory=list)
|
||||||
|
resolution_errors: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
def has_circular_dependencies(self) -> bool:
|
||||||
|
"""Check if there are any circular dependencies."""
|
||||||
|
return bool(self.circular_dependencies)
|
||||||
|
|
||||||
|
def has_conflicts(self) -> bool:
|
||||||
|
"""Check if there are any dependency conflicts."""
|
||||||
|
return self.flattened_dependencies.has_conflicts()
|
||||||
|
|
||||||
|
def has_errors(self) -> bool:
|
||||||
|
"""Check if there are any resolution errors."""
|
||||||
|
return bool(self.resolution_errors)
|
||||||
|
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
"""Check if the dependency graph is valid (no circular deps or errors)."""
|
||||||
|
return not self.has_circular_dependencies() and not self.has_errors()
|
||||||
|
|
||||||
|
def get_summary(self) -> Dict[str, Any]:
|
||||||
|
"""Get a summary of the dependency resolution."""
|
||||||
|
return {
|
||||||
|
"root_package": self.root_package.name,
|
||||||
|
"total_dependencies": self.flattened_dependencies.total_dependencies(),
|
||||||
|
"max_depth": self.dependency_tree.max_depth,
|
||||||
|
"has_circular_dependencies": self.has_circular_dependencies(),
|
||||||
|
"circular_count": len(self.circular_dependencies),
|
||||||
|
"has_conflicts": self.has_conflicts(),
|
||||||
|
"conflict_count": len(self.flattened_dependencies.conflicts),
|
||||||
|
"has_errors": self.has_errors(),
|
||||||
|
"error_count": len(self.resolution_errors),
|
||||||
|
"is_valid": self.is_valid()
|
||||||
|
}
|
||||||
|
|
||||||
|
def add_error(self, error: str) -> None:
|
||||||
|
"""Add a resolution error."""
|
||||||
|
self.resolution_errors.append(error)
|
||||||
|
|
||||||
|
def add_circular_dependency(self, circular_ref: CircularRef) -> None:
|
||||||
|
"""Add a circular dependency detection."""
|
||||||
|
self.circular_dependencies.append(circular_ref)
|
||||||
381
src/apm_cli/deps/github_downloader.py
Normal file
381
src/apm_cli/deps/github_downloader.py
Normal file
@@ -0,0 +1,381 @@
|
|||||||
|
"""GitHub package downloader for APM dependencies."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, Dict, Any
|
||||||
|
import re
|
||||||
|
|
||||||
|
import git
|
||||||
|
from git import Repo
|
||||||
|
from git.exc import GitCommandError, InvalidGitRepositoryError
|
||||||
|
|
||||||
|
from ..core.token_manager import GitHubTokenManager
|
||||||
|
from ..models.apm_package import (
|
||||||
|
DependencyReference,
|
||||||
|
PackageInfo,
|
||||||
|
ResolvedReference,
|
||||||
|
GitReferenceType,
|
||||||
|
validate_apm_package,
|
||||||
|
APMPackage
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GitHubPackageDownloader:
|
||||||
|
"""Downloads and validates APM packages from GitHub repositories."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize the GitHub package downloader."""
|
||||||
|
self.token_manager = GitHubTokenManager()
|
||||||
|
self.git_env = self._setup_git_environment()
|
||||||
|
|
||||||
|
def _setup_git_environment(self) -> Dict[str, Any]:
|
||||||
|
"""Set up Git environment with GitHub authentication using centralized token manager.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing environment variables for Git operations
|
||||||
|
"""
|
||||||
|
# Use centralized token management
|
||||||
|
env = self.token_manager.setup_environment()
|
||||||
|
|
||||||
|
# Get the token for modules (APM package access)
|
||||||
|
self.github_token = self.token_manager.get_token_for_purpose('modules', env)
|
||||||
|
self.has_github_token = self.github_token is not None
|
||||||
|
|
||||||
|
# Configure Git security settings
|
||||||
|
env['GIT_TERMINAL_PROMPT'] = '0'
|
||||||
|
env['GIT_ASKPASS'] = 'echo' # Prevent interactive credential prompts
|
||||||
|
env['GIT_CONFIG_NOSYSTEM'] = '1'
|
||||||
|
env['GIT_CONFIG_GLOBAL'] = '/dev/null'
|
||||||
|
|
||||||
|
return env
|
||||||
|
|
||||||
|
def _sanitize_git_error(self, error_message: str) -> str:
|
||||||
|
"""Sanitize Git error messages to remove potentially sensitive authentication information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error_message: Raw error message from Git operations
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Sanitized error message with sensitive data removed
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Remove any tokens that might appear in URLs (format: https://token@github.com)
|
||||||
|
sanitized = re.sub(r'https://[^@\s]+@github\.com', 'https://***@github.com', error_message)
|
||||||
|
|
||||||
|
# Remove any tokens that might appear as standalone values
|
||||||
|
sanitized = re.sub(r'(ghp_|gho_|ghu_|ghs_|ghr_)[a-zA-Z0-9_]+', '***', sanitized)
|
||||||
|
|
||||||
|
# Remove environment variable values that might contain tokens
|
||||||
|
sanitized = re.sub(r'(GITHUB_TOKEN|GITHUB_APM_PAT|GH_TOKEN|GITHUB_COPILOT_PAT|GITHUB_NPM_PAT)=[^\s]+', r'\1=***', sanitized)
|
||||||
|
|
||||||
|
return sanitized
|
||||||
|
|
||||||
|
def _build_repo_url(self, repo_ref: str, use_ssh: bool = False) -> str:
|
||||||
|
"""Build the appropriate repository URL for cloning.
|
||||||
|
|
||||||
|
Uses GitHub Enterprise authentication format for private repositories:
|
||||||
|
- x-access-token format for authenticated HTTPS (GitHub Enterprise standard)
|
||||||
|
- SSH URLs for SSH key-based authentication
|
||||||
|
- Standard HTTPS URLs as fallback
|
||||||
|
|
||||||
|
Args:
|
||||||
|
repo_ref: Repository reference in format "owner/repo"
|
||||||
|
use_ssh: Whether to use SSH URL for git operations
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Repository URL suitable for git clone operations
|
||||||
|
"""
|
||||||
|
if use_ssh:
|
||||||
|
# Use SSH URL for private repository access with SSH keys
|
||||||
|
return f"git@github.com:{repo_ref}.git"
|
||||||
|
elif self.github_token:
|
||||||
|
# Use GitHub Enterprise x-access-token format for authenticated access
|
||||||
|
# This is the standard format for GitHub Actions and Enterprise environments
|
||||||
|
return f"https://x-access-token:{self.github_token}@github.com/{repo_ref}.git"
|
||||||
|
else:
|
||||||
|
# Use standard HTTPS URL for public repositories
|
||||||
|
return f"https://github.com/{repo_ref}"
|
||||||
|
|
||||||
|
def _clone_with_fallback(self, repo_url_base: str, target_path: Path, **clone_kwargs) -> Repo:
|
||||||
|
"""Attempt to clone a repository with fallback authentication methods.
|
||||||
|
|
||||||
|
Uses GitHub Enterprise authentication patterns:
|
||||||
|
1. x-access-token format for private repos (GitHub Enterprise standard)
|
||||||
|
2. SSH for SSH key-based authentication
|
||||||
|
3. Standard HTTPS for public repos (fallback)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
repo_url_base: Base repository reference (owner/repo)
|
||||||
|
target_path: Target path for cloning
|
||||||
|
**clone_kwargs: Additional arguments for Repo.clone_from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Repo: Successfully cloned repository
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If all authentication methods fail
|
||||||
|
"""
|
||||||
|
last_error = None
|
||||||
|
|
||||||
|
# Method 1: Try x-access-token format if token is available (GitHub Enterprise)
|
||||||
|
if self.github_token:
|
||||||
|
try:
|
||||||
|
auth_url = self._build_repo_url(repo_url_base, use_ssh=False)
|
||||||
|
return Repo.clone_from(auth_url, target_path, env=self.git_env, **clone_kwargs)
|
||||||
|
except GitCommandError as e:
|
||||||
|
last_error = e
|
||||||
|
# Continue to next method
|
||||||
|
|
||||||
|
# Method 2: Try SSH if it might work (for SSH key-based authentication)
|
||||||
|
try:
|
||||||
|
ssh_url = self._build_repo_url(repo_url_base, use_ssh=True)
|
||||||
|
return Repo.clone_from(ssh_url, target_path, env=self.git_env, **clone_kwargs)
|
||||||
|
except GitCommandError as e:
|
||||||
|
last_error = e
|
||||||
|
# Continue to next method
|
||||||
|
|
||||||
|
# Method 3: Try standard HTTPS as fallback for public repos
|
||||||
|
try:
|
||||||
|
public_url = f"https://github.com/{repo_url_base}"
|
||||||
|
return Repo.clone_from(public_url, target_path, env=self.git_env, **clone_kwargs)
|
||||||
|
except GitCommandError as e:
|
||||||
|
last_error = e
|
||||||
|
|
||||||
|
# All methods failed
|
||||||
|
error_msg = f"Failed to clone repository {repo_url_base} using all available methods. "
|
||||||
|
if not self.has_github_token:
|
||||||
|
error_msg += "For private repositories, set GITHUB_APM_PAT or GITHUB_TOKEN environment variable, " \
|
||||||
|
"or ensure SSH keys are configured."
|
||||||
|
else:
|
||||||
|
error_msg += "Please check repository access permissions and authentication setup."
|
||||||
|
|
||||||
|
if last_error:
|
||||||
|
sanitized_error = self._sanitize_git_error(str(last_error))
|
||||||
|
error_msg += f" Last error: {sanitized_error}"
|
||||||
|
|
||||||
|
raise RuntimeError(error_msg)
|
||||||
|
|
||||||
|
def resolve_git_reference(self, repo_ref: str) -> ResolvedReference:
|
||||||
|
"""Resolve a Git reference (branch/tag/commit) to a specific commit SHA.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
repo_ref: Repository reference string (e.g., "user/repo#branch")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ResolvedReference: Resolved reference with commit SHA
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the reference format is invalid
|
||||||
|
RuntimeError: If Git operations fail
|
||||||
|
"""
|
||||||
|
# Parse the repository reference
|
||||||
|
try:
|
||||||
|
dep_ref = DependencyReference.parse(repo_ref)
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid repository reference '{repo_ref}': {e}")
|
||||||
|
|
||||||
|
# Default to main branch if no reference specified
|
||||||
|
ref = dep_ref.reference or "main"
|
||||||
|
|
||||||
|
# Pre-analyze the reference type to determine the best approach
|
||||||
|
is_likely_commit = re.match(r'^[a-f0-9]{7,40}$', ref.lower()) is not None
|
||||||
|
|
||||||
|
# Create a temporary directory for Git operations
|
||||||
|
temp_dir = None
|
||||||
|
try:
|
||||||
|
import tempfile
|
||||||
|
temp_dir = Path(tempfile.mkdtemp())
|
||||||
|
|
||||||
|
if is_likely_commit:
|
||||||
|
# For commit SHAs, clone full repository first, then checkout the commit
|
||||||
|
try:
|
||||||
|
repo = self._clone_with_fallback(dep_ref.repo_url, temp_dir)
|
||||||
|
commit = repo.commit(ref)
|
||||||
|
ref_type = GitReferenceType.COMMIT
|
||||||
|
resolved_commit = commit.hexsha
|
||||||
|
ref_name = ref
|
||||||
|
except Exception as e:
|
||||||
|
sanitized_error = self._sanitize_git_error(str(e))
|
||||||
|
raise ValueError(f"Could not resolve commit '{ref}' in repository {dep_ref.repo_url}: {sanitized_error}")
|
||||||
|
else:
|
||||||
|
# For branches and tags, try shallow clone first
|
||||||
|
try:
|
||||||
|
# Try to clone with specific branch/tag first
|
||||||
|
repo = self._clone_with_fallback(
|
||||||
|
dep_ref.repo_url,
|
||||||
|
temp_dir,
|
||||||
|
depth=1,
|
||||||
|
branch=ref
|
||||||
|
)
|
||||||
|
ref_type = GitReferenceType.BRANCH # Could be branch or tag
|
||||||
|
resolved_commit = repo.head.commit.hexsha
|
||||||
|
ref_name = ref
|
||||||
|
|
||||||
|
except GitCommandError:
|
||||||
|
# If branch/tag clone fails, try full clone and resolve reference
|
||||||
|
try:
|
||||||
|
repo = self._clone_with_fallback(dep_ref.repo_url, temp_dir)
|
||||||
|
|
||||||
|
# Try to resolve the reference
|
||||||
|
try:
|
||||||
|
# Try as branch first
|
||||||
|
try:
|
||||||
|
branch = repo.refs[f"origin/{ref}"]
|
||||||
|
ref_type = GitReferenceType.BRANCH
|
||||||
|
resolved_commit = branch.commit.hexsha
|
||||||
|
ref_name = ref
|
||||||
|
except IndexError:
|
||||||
|
# Try as tag
|
||||||
|
try:
|
||||||
|
tag = repo.tags[ref]
|
||||||
|
ref_type = GitReferenceType.TAG
|
||||||
|
resolved_commit = tag.commit.hexsha
|
||||||
|
ref_name = ref
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError(f"Reference '{ref}' not found in repository {dep_ref.repo_url}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
sanitized_error = self._sanitize_git_error(str(e))
|
||||||
|
raise ValueError(f"Could not resolve reference '{ref}' in repository {dep_ref.repo_url}: {sanitized_error}")
|
||||||
|
|
||||||
|
except GitCommandError as e:
|
||||||
|
# Check if this might be a private repository access issue
|
||||||
|
if "Authentication failed" in str(e) or "remote: Repository not found" in str(e):
|
||||||
|
error_msg = f"Failed to clone repository {dep_ref.repo_url}. "
|
||||||
|
if not self.has_github_token:
|
||||||
|
error_msg += "This might be a private repository that requires authentication. " \
|
||||||
|
"Please set GITHUB_APM_PAT or GITHUB_TOKEN environment variable."
|
||||||
|
else:
|
||||||
|
error_msg += "Authentication failed. Please check your GitHub token permissions."
|
||||||
|
raise RuntimeError(error_msg)
|
||||||
|
else:
|
||||||
|
sanitized_error = self._sanitize_git_error(str(e))
|
||||||
|
raise RuntimeError(f"Failed to clone repository {dep_ref.repo_url}: {sanitized_error}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Clean up temporary directory
|
||||||
|
if temp_dir and temp_dir.exists():
|
||||||
|
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||||
|
|
||||||
|
return ResolvedReference(
|
||||||
|
original_ref=repo_ref,
|
||||||
|
ref_type=ref_type,
|
||||||
|
resolved_commit=resolved_commit,
|
||||||
|
ref_name=ref_name
|
||||||
|
)
|
||||||
|
|
||||||
|
def download_package(self, repo_ref: str, target_path: Path) -> PackageInfo:
|
||||||
|
"""Download a GitHub repository and validate it as an APM package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
repo_ref: Repository reference string (e.g., "user/repo#branch")
|
||||||
|
target_path: Local path where package should be downloaded
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PackageInfo: Information about the downloaded package
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the repository reference is invalid
|
||||||
|
RuntimeError: If download or validation fails
|
||||||
|
"""
|
||||||
|
# Parse the repository reference
|
||||||
|
try:
|
||||||
|
dep_ref = DependencyReference.parse(repo_ref)
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid repository reference '{repo_ref}': {e}")
|
||||||
|
|
||||||
|
# Resolve the Git reference to get specific commit
|
||||||
|
resolved_ref = self.resolve_git_reference(repo_ref)
|
||||||
|
|
||||||
|
# Create target directory if it doesn't exist
|
||||||
|
target_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# If directory already exists and has content, remove it
|
||||||
|
if target_path.exists() and any(target_path.iterdir()):
|
||||||
|
shutil.rmtree(target_path)
|
||||||
|
target_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Clone the repository using fallback authentication methods
|
||||||
|
# Use shallow clone for performance if we have a specific commit
|
||||||
|
if resolved_ref.ref_type == GitReferenceType.COMMIT:
|
||||||
|
# For commits, we need to clone and checkout the specific commit
|
||||||
|
repo = self._clone_with_fallback(dep_ref.repo_url, target_path)
|
||||||
|
repo.git.checkout(resolved_ref.resolved_commit)
|
||||||
|
else:
|
||||||
|
# For branches and tags, we can use shallow clone
|
||||||
|
repo = self._clone_with_fallback(
|
||||||
|
dep_ref.repo_url,
|
||||||
|
target_path,
|
||||||
|
depth=1,
|
||||||
|
branch=resolved_ref.ref_name
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove .git directory to save space and prevent treating as a Git repository
|
||||||
|
git_dir = target_path / ".git"
|
||||||
|
if git_dir.exists():
|
||||||
|
shutil.rmtree(git_dir, ignore_errors=True)
|
||||||
|
|
||||||
|
except GitCommandError as e:
|
||||||
|
# Check if this might be a private repository access issue
|
||||||
|
if "Authentication failed" in str(e) or "remote: Repository not found" in str(e):
|
||||||
|
error_msg = f"Failed to clone repository {dep_ref.repo_url}. "
|
||||||
|
if not self.has_github_token:
|
||||||
|
error_msg += "This might be a private repository that requires authentication. " \
|
||||||
|
"Please set GITHUB_APM_PAT or GITHUB_TOKEN environment variable."
|
||||||
|
else:
|
||||||
|
error_msg += "Authentication failed. Please check your GitHub token permissions."
|
||||||
|
raise RuntimeError(error_msg)
|
||||||
|
else:
|
||||||
|
sanitized_error = self._sanitize_git_error(str(e))
|
||||||
|
raise RuntimeError(f"Failed to clone repository {dep_ref.repo_url}: {sanitized_error}")
|
||||||
|
except RuntimeError:
|
||||||
|
# Re-raise RuntimeError from _clone_with_fallback
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Validate the downloaded package
|
||||||
|
validation_result = validate_apm_package(target_path)
|
||||||
|
if not validation_result.is_valid:
|
||||||
|
# Clean up on validation failure
|
||||||
|
if target_path.exists():
|
||||||
|
shutil.rmtree(target_path, ignore_errors=True)
|
||||||
|
|
||||||
|
error_msg = f"Invalid APM package {dep_ref.repo_url}:\n"
|
||||||
|
for error in validation_result.errors:
|
||||||
|
error_msg += f" - {error}\n"
|
||||||
|
raise RuntimeError(error_msg.strip())
|
||||||
|
|
||||||
|
# Load the APM package metadata
|
||||||
|
if not validation_result.package:
|
||||||
|
raise RuntimeError(f"Package validation succeeded but no package metadata found for {dep_ref.repo_url}")
|
||||||
|
|
||||||
|
package = validation_result.package
|
||||||
|
package.source = dep_ref.to_github_url()
|
||||||
|
package.resolved_commit = resolved_ref.resolved_commit
|
||||||
|
|
||||||
|
# Create and return PackageInfo
|
||||||
|
return PackageInfo(
|
||||||
|
package=package,
|
||||||
|
install_path=target_path,
|
||||||
|
resolved_reference=resolved_ref,
|
||||||
|
installed_at=datetime.now().isoformat()
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_clone_progress_callback(self):
|
||||||
|
"""Get a progress callback for Git clone operations.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Callable that can be used as progress callback for GitPython
|
||||||
|
"""
|
||||||
|
def progress_callback(op_code, cur_count, max_count=None, message=''):
|
||||||
|
"""Progress callback for Git operations."""
|
||||||
|
if max_count:
|
||||||
|
percentage = int((cur_count / max_count) * 100)
|
||||||
|
print(f"\r🚀 Cloning: {percentage}% ({cur_count}/{max_count}) {message}", end='', flush=True)
|
||||||
|
else:
|
||||||
|
print(f"\r🚀 Cloning: {message} ({cur_count})", end='', flush=True)
|
||||||
|
|
||||||
|
return progress_callback
|
||||||
216
src/apm_cli/deps/package_validator.py
Normal file
216
src/apm_cli/deps/package_validator.py
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
"""APM package structure validation."""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
import os
|
||||||
|
|
||||||
|
from ..models.apm_package import (
|
||||||
|
ValidationResult,
|
||||||
|
APMPackage,
|
||||||
|
validate_apm_package as base_validate_apm_package
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PackageValidator:
|
||||||
|
"""Validates APM package structure and content."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize the package validator."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def validate_package(self, package_path: Path) -> ValidationResult:
|
||||||
|
"""Validate that a directory contains a valid APM package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path: Path to the directory to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ValidationResult: Validation results with any errors/warnings
|
||||||
|
"""
|
||||||
|
return base_validate_apm_package(package_path)
|
||||||
|
|
||||||
|
def validate_package_structure(self, package_path: Path) -> ValidationResult:
|
||||||
|
"""Validate APM package directory structure.
|
||||||
|
|
||||||
|
Checks for required files and directories:
|
||||||
|
- apm.yml at root
|
||||||
|
- .apm/ directory with primitives
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path: Path to the package directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ValidationResult: Detailed validation results
|
||||||
|
"""
|
||||||
|
result = ValidationResult()
|
||||||
|
|
||||||
|
if not package_path.exists():
|
||||||
|
result.add_error(f"Package directory does not exist: {package_path}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
if not package_path.is_dir():
|
||||||
|
result.add_error(f"Package path is not a directory: {package_path}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Check for apm.yml
|
||||||
|
apm_yml = package_path / "apm.yml"
|
||||||
|
if not apm_yml.exists():
|
||||||
|
result.add_error("Missing required file: apm.yml")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Try to parse apm.yml
|
||||||
|
try:
|
||||||
|
package = APMPackage.from_apm_yml(apm_yml)
|
||||||
|
result.package = package
|
||||||
|
except (ValueError, FileNotFoundError) as e:
|
||||||
|
result.add_error(f"Invalid apm.yml: {e}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Check for .apm directory
|
||||||
|
apm_dir = package_path / ".apm"
|
||||||
|
if not apm_dir.exists():
|
||||||
|
result.add_error("Missing required directory: .apm/")
|
||||||
|
return result
|
||||||
|
|
||||||
|
if not apm_dir.is_dir():
|
||||||
|
result.add_error(".apm must be a directory")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Check for primitive content
|
||||||
|
primitive_types = ['instructions', 'chatmodes', 'contexts', 'prompts']
|
||||||
|
has_primitives = False
|
||||||
|
|
||||||
|
for primitive_type in primitive_types:
|
||||||
|
primitive_dir = apm_dir / primitive_type
|
||||||
|
if primitive_dir.exists() and primitive_dir.is_dir():
|
||||||
|
md_files = list(primitive_dir.glob("*.md"))
|
||||||
|
if md_files:
|
||||||
|
has_primitives = True
|
||||||
|
# Validate each primitive file
|
||||||
|
for md_file in md_files:
|
||||||
|
self._validate_primitive_file(md_file, result)
|
||||||
|
|
||||||
|
if not has_primitives:
|
||||||
|
result.add_warning("No primitive files found in .apm/ directory")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _validate_primitive_file(self, file_path: Path, result: ValidationResult) -> None:
|
||||||
|
"""Validate a single primitive file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path to the primitive markdown file
|
||||||
|
result: ValidationResult to add warnings/errors to
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
content = file_path.read_text(encoding='utf-8')
|
||||||
|
if not content.strip():
|
||||||
|
result.add_warning(f"Empty primitive file: {file_path.name}")
|
||||||
|
except Exception as e:
|
||||||
|
result.add_warning(f"Could not read primitive file {file_path.name}: {e}")
|
||||||
|
|
||||||
|
def validate_primitive_structure(self, apm_dir: Path) -> List[str]:
|
||||||
|
"""Validate the structure of primitives in .apm directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
apm_dir: Path to the .apm directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation warnings/issues found
|
||||||
|
"""
|
||||||
|
issues = []
|
||||||
|
|
||||||
|
if not apm_dir.exists():
|
||||||
|
issues.append("Missing .apm directory")
|
||||||
|
return issues
|
||||||
|
|
||||||
|
primitive_types = ['instructions', 'chatmodes', 'contexts', 'prompts']
|
||||||
|
found_primitives = False
|
||||||
|
|
||||||
|
for primitive_type in primitive_types:
|
||||||
|
primitive_dir = apm_dir / primitive_type
|
||||||
|
if primitive_dir.exists():
|
||||||
|
if not primitive_dir.is_dir():
|
||||||
|
issues.append(f"{primitive_type} should be a directory")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check for markdown files
|
||||||
|
md_files = list(primitive_dir.glob("*.md"))
|
||||||
|
if md_files:
|
||||||
|
found_primitives = True
|
||||||
|
|
||||||
|
# Validate naming convention
|
||||||
|
for md_file in md_files:
|
||||||
|
if not self._is_valid_primitive_name(md_file.name, primitive_type):
|
||||||
|
issues.append(f"Invalid primitive file name: {md_file.name}")
|
||||||
|
|
||||||
|
if not found_primitives:
|
||||||
|
issues.append("No primitive files found in .apm directory")
|
||||||
|
|
||||||
|
return issues
|
||||||
|
|
||||||
|
def _is_valid_primitive_name(self, filename: str, primitive_type: str) -> bool:
|
||||||
|
"""Check if a primitive filename follows naming conventions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filename: The filename to validate
|
||||||
|
primitive_type: Type of primitive (instructions, chatmodes, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if filename is valid
|
||||||
|
"""
|
||||||
|
# Basic validation - should end with .md
|
||||||
|
if not filename.endswith('.md'):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Should not contain spaces (prefer hyphens or underscores)
|
||||||
|
if ' ' in filename:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# For specific types, check expected suffixes using a mapping
|
||||||
|
name_without_ext = filename[:-3] # Remove .md
|
||||||
|
suffix_map = {
|
||||||
|
'instructions': '.instructions',
|
||||||
|
'chatmodes': '.chatmode',
|
||||||
|
'contexts': '.context',
|
||||||
|
'prompts': '.prompt',
|
||||||
|
}
|
||||||
|
expected_suffix = suffix_map.get(primitive_type)
|
||||||
|
if expected_suffix and not name_without_ext.endswith(expected_suffix):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_package_info_summary(self, package_path: Path) -> Optional[str]:
|
||||||
|
"""Get a summary of package information for display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path: Path to the package directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[str]: Summary string or None if package is invalid
|
||||||
|
"""
|
||||||
|
validation_result = self.validate_package(package_path)
|
||||||
|
|
||||||
|
if not validation_result.is_valid or not validation_result.package:
|
||||||
|
return None
|
||||||
|
|
||||||
|
package = validation_result.package
|
||||||
|
summary = f"{package.name} v{package.version}"
|
||||||
|
|
||||||
|
if package.description:
|
||||||
|
summary += f" - {package.description}"
|
||||||
|
|
||||||
|
# Count primitives
|
||||||
|
apm_dir = package_path / ".apm"
|
||||||
|
if apm_dir.exists():
|
||||||
|
primitive_count = 0
|
||||||
|
for primitive_type in ['instructions', 'chatmodes', 'contexts', 'prompts']:
|
||||||
|
primitive_dir = apm_dir / primitive_type
|
||||||
|
if primitive_dir.exists():
|
||||||
|
primitive_count += len(list(primitive_dir.glob("*.md")))
|
||||||
|
|
||||||
|
if primitive_count > 0:
|
||||||
|
summary += f" ({primitive_count} primitives)"
|
||||||
|
|
||||||
|
return summary
|
||||||
102
src/apm_cli/deps/verifier.py
Normal file
102
src/apm_cli/deps/verifier.py
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
"""Dependency verification for APM-CLI."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
import yaml
|
||||||
|
from ..factory import PackageManagerFactory, ClientFactory
|
||||||
|
|
||||||
|
|
||||||
|
def load_apm_config(config_file="apm.yml"):
|
||||||
|
"""Load the APM configuration file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_file (str, optional): Path to the configuration file. Defaults to "apm.yml".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: The configuration, or None if loading failed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
config_path = Path(config_file)
|
||||||
|
if not config_path.exists():
|
||||||
|
print(f"Configuration file {config_file} not found.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
with open(config_path, 'r', encoding='utf-8') as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
return config
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error loading {config_file}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def verify_dependencies(config_file="apm.yml"):
|
||||||
|
"""Check if apm.yml servers are installed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_file (str, optional): Path to the configuration file. Defaults to "apm.yml".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (bool, list, list) - All installed status, list of installed, list of missing
|
||||||
|
"""
|
||||||
|
config = load_apm_config(config_file)
|
||||||
|
if not config or 'servers' not in config:
|
||||||
|
return False, [], []
|
||||||
|
|
||||||
|
try:
|
||||||
|
package_manager = PackageManagerFactory.create_package_manager()
|
||||||
|
installed = package_manager.list_installed()
|
||||||
|
|
||||||
|
# Check which servers are missing
|
||||||
|
required_servers = config['servers']
|
||||||
|
missing = [server for server in required_servers if server not in installed]
|
||||||
|
installed_servers = [server for server in required_servers if server in installed]
|
||||||
|
|
||||||
|
all_installed = len(missing) == 0
|
||||||
|
|
||||||
|
return all_installed, installed_servers, missing
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error verifying dependencies: {e}")
|
||||||
|
return False, [], []
|
||||||
|
|
||||||
|
|
||||||
|
def install_missing_dependencies(config_file="apm.yml", client_type="vscode"):
|
||||||
|
"""Install missing dependencies from apm.yml for specified client.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_file (str, optional): Path to the configuration file. Defaults to "apm.yml".
|
||||||
|
client_type (str, optional): Type of client to configure. Defaults to "vscode".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (bool, list) - Success status and list of installed packages
|
||||||
|
"""
|
||||||
|
_, _, missing = verify_dependencies(config_file)
|
||||||
|
|
||||||
|
if not missing:
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
installed = []
|
||||||
|
|
||||||
|
# Get client adapter and package manager
|
||||||
|
client = ClientFactory.create_client(client_type)
|
||||||
|
package_manager = PackageManagerFactory.create_package_manager()
|
||||||
|
|
||||||
|
for server in missing:
|
||||||
|
try:
|
||||||
|
# Install the package using the package manager
|
||||||
|
install_result = package_manager.install(server)
|
||||||
|
|
||||||
|
if install_result:
|
||||||
|
# Configure the client to use the server
|
||||||
|
# For VSCode this updates the .vscode/mcp.json file in the project root
|
||||||
|
client_result = client.configure_mcp_server(server, server_name=server)
|
||||||
|
|
||||||
|
if client_result:
|
||||||
|
installed.append(server)
|
||||||
|
else:
|
||||||
|
print(f"Warning: Package {server} installed but client configuration failed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error installing {server}: {e}")
|
||||||
|
|
||||||
|
return len(installed) == len(missing), installed
|
||||||
61
src/apm_cli/factory.py
Normal file
61
src/apm_cli/factory.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
"""Factory classes for creating adapters."""
|
||||||
|
|
||||||
|
from .adapters.client.vscode import VSCodeClientAdapter
|
||||||
|
from .adapters.client.codex import CodexClientAdapter
|
||||||
|
from .adapters.package_manager.default_manager import DefaultMCPPackageManager
|
||||||
|
|
||||||
|
|
||||||
|
class ClientFactory:
|
||||||
|
"""Factory for creating MCP client adapters."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_client(client_type):
|
||||||
|
"""Create a client adapter based on the specified type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_type (str): Type of client adapter to create.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
MCPClientAdapter: An instance of the specified client adapter.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the client type is not supported.
|
||||||
|
"""
|
||||||
|
clients = {
|
||||||
|
"vscode": VSCodeClientAdapter,
|
||||||
|
"codex": CodexClientAdapter,
|
||||||
|
# Add more clients as needed
|
||||||
|
}
|
||||||
|
|
||||||
|
if client_type.lower() not in clients:
|
||||||
|
raise ValueError(f"Unsupported client type: {client_type}")
|
||||||
|
|
||||||
|
return clients[client_type.lower()]()
|
||||||
|
|
||||||
|
|
||||||
|
class PackageManagerFactory:
|
||||||
|
"""Factory for creating MCP package manager adapters."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_package_manager(manager_type="default"):
|
||||||
|
"""Create a package manager adapter based on the specified type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
manager_type (str, optional): Type of package manager adapter to create.
|
||||||
|
Defaults to "default".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
MCPPackageManagerAdapter: An instance of the specified package manager adapter.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the package manager type is not supported.
|
||||||
|
"""
|
||||||
|
managers = {
|
||||||
|
"default": DefaultMCPPackageManager,
|
||||||
|
# Add more package managers as they emerge
|
||||||
|
}
|
||||||
|
|
||||||
|
if manager_type.lower() not in managers:
|
||||||
|
raise ValueError(f"Unsupported package manager type: {manager_type}")
|
||||||
|
|
||||||
|
return managers[manager_type.lower()]()
|
||||||
21
src/apm_cli/models/__init__.py
Normal file
21
src/apm_cli/models/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
"""Models for APM CLI data structures."""
|
||||||
|
|
||||||
|
from .apm_package import (
|
||||||
|
APMPackage,
|
||||||
|
DependencyReference,
|
||||||
|
ValidationResult,
|
||||||
|
ValidationError,
|
||||||
|
ResolvedReference,
|
||||||
|
PackageInfo,
|
||||||
|
GitReferenceType,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"APMPackage",
|
||||||
|
"DependencyReference",
|
||||||
|
"ValidationResult",
|
||||||
|
"ValidationError",
|
||||||
|
"ResolvedReference",
|
||||||
|
"PackageInfo",
|
||||||
|
"GitReferenceType",
|
||||||
|
]
|
||||||
483
src/apm_cli/models/apm_package.py
Normal file
483
src/apm_cli/models/apm_package.py
Normal file
@@ -0,0 +1,483 @@
|
|||||||
|
"""APM Package data models and validation logic."""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
import yaml
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, List, Dict, Any, Union
|
||||||
|
|
||||||
|
|
||||||
|
class GitReferenceType(Enum):
|
||||||
|
"""Types of Git references supported."""
|
||||||
|
BRANCH = "branch"
|
||||||
|
TAG = "tag"
|
||||||
|
COMMIT = "commit"
|
||||||
|
|
||||||
|
|
||||||
|
class ValidationError(Enum):
|
||||||
|
"""Types of validation errors for APM packages."""
|
||||||
|
MISSING_APM_YML = "missing_apm_yml"
|
||||||
|
MISSING_APM_DIR = "missing_apm_dir"
|
||||||
|
INVALID_YML_FORMAT = "invalid_yml_format"
|
||||||
|
MISSING_REQUIRED_FIELD = "missing_required_field"
|
||||||
|
INVALID_VERSION_FORMAT = "invalid_version_format"
|
||||||
|
INVALID_DEPENDENCY_FORMAT = "invalid_dependency_format"
|
||||||
|
EMPTY_APM_DIR = "empty_apm_dir"
|
||||||
|
INVALID_PRIMITIVE_STRUCTURE = "invalid_primitive_structure"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ResolvedReference:
|
||||||
|
"""Represents a resolved Git reference."""
|
||||||
|
original_ref: str
|
||||||
|
ref_type: GitReferenceType
|
||||||
|
resolved_commit: str
|
||||||
|
ref_name: str # The actual branch/tag/commit name
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""String representation of resolved reference."""
|
||||||
|
if self.ref_type == GitReferenceType.COMMIT:
|
||||||
|
return f"{self.resolved_commit[:8]}"
|
||||||
|
return f"{self.ref_name} ({self.resolved_commit[:8]})"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DependencyReference:
|
||||||
|
"""Represents a reference to an APM dependency."""
|
||||||
|
repo_url: str # e.g., "user/repo" or "github.com/user/repo"
|
||||||
|
reference: Optional[str] = None # e.g., "main", "v1.0.0", "abc123"
|
||||||
|
alias: Optional[str] = None # Optional alias for the dependency
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, dependency_str: str) -> "DependencyReference":
|
||||||
|
"""Parse a dependency string into a DependencyReference.
|
||||||
|
|
||||||
|
Supports formats:
|
||||||
|
- user/repo
|
||||||
|
- user/repo#branch
|
||||||
|
- user/repo#v1.0.0
|
||||||
|
- user/repo#commit_sha
|
||||||
|
- github.com/user/repo#ref
|
||||||
|
- user/repo@alias
|
||||||
|
- user/repo#ref@alias
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dependency_str: The dependency string to parse
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DependencyReference: Parsed dependency reference
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the dependency string format is invalid
|
||||||
|
"""
|
||||||
|
if not dependency_str.strip():
|
||||||
|
raise ValueError("Empty dependency string")
|
||||||
|
|
||||||
|
# Check for control characters (newlines, tabs, etc.)
|
||||||
|
if any(ord(c) < 32 for c in dependency_str):
|
||||||
|
raise ValueError("Dependency string contains invalid control characters")
|
||||||
|
|
||||||
|
# Handle SSH URLs first (before @ processing) to avoid conflict with alias separator
|
||||||
|
original_str = dependency_str
|
||||||
|
if dependency_str.startswith("git@github.com:"):
|
||||||
|
# For SSH URLs, extract repo part before @ processing
|
||||||
|
ssh_repo_part = dependency_str[len("git@github.com:"):]
|
||||||
|
if ssh_repo_part.endswith(".git"):
|
||||||
|
ssh_repo_part = ssh_repo_part[:-4]
|
||||||
|
|
||||||
|
# Handle reference and alias in SSH URL
|
||||||
|
reference = None
|
||||||
|
alias = None
|
||||||
|
|
||||||
|
if "@" in ssh_repo_part:
|
||||||
|
ssh_repo_part, alias = ssh_repo_part.rsplit("@", 1)
|
||||||
|
alias = alias.strip()
|
||||||
|
|
||||||
|
if "#" in ssh_repo_part:
|
||||||
|
repo_part, reference = ssh_repo_part.rsplit("#", 1)
|
||||||
|
reference = reference.strip()
|
||||||
|
else:
|
||||||
|
repo_part = ssh_repo_part
|
||||||
|
|
||||||
|
repo_url = repo_part.strip()
|
||||||
|
else:
|
||||||
|
# Handle alias (@alias) for non-SSH URLs
|
||||||
|
alias = None
|
||||||
|
if "@" in dependency_str:
|
||||||
|
dependency_str, alias = dependency_str.rsplit("@", 1)
|
||||||
|
alias = alias.strip()
|
||||||
|
|
||||||
|
# Handle reference (#ref)
|
||||||
|
reference = None
|
||||||
|
if "#" in dependency_str:
|
||||||
|
repo_part, reference = dependency_str.rsplit("#", 1)
|
||||||
|
reference = reference.strip()
|
||||||
|
else:
|
||||||
|
repo_part = dependency_str
|
||||||
|
|
||||||
|
# SECURITY: Use urllib.parse for all URL validation to avoid substring vulnerabilities
|
||||||
|
|
||||||
|
repo_url = repo_part.strip()
|
||||||
|
|
||||||
|
# Normalize to URL format for secure parsing - always use urllib.parse, never substring checks
|
||||||
|
if repo_url.startswith(("https://", "http://")):
|
||||||
|
# Already a full URL - parse directly
|
||||||
|
parsed_url = urllib.parse.urlparse(repo_url)
|
||||||
|
else:
|
||||||
|
# Safely construct GitHub URL from various input formats
|
||||||
|
parts = repo_url.split("/")
|
||||||
|
if len(parts) >= 3 and parts[0] == "github.com":
|
||||||
|
# Format: github.com/user/repo (must be precisely so)
|
||||||
|
user_repo = "/".join(parts[1:3])
|
||||||
|
elif len(parts) >= 2 and "." not in parts[0]:
|
||||||
|
# Format: user/repo (no dot in user part, so not a domain)
|
||||||
|
user_repo = "/".join(parts[:2])
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Only GitHub repositories are supported. Use 'user/repo' or 'github.com/user/repo' format")
|
||||||
|
|
||||||
|
# Validate format before URL construction (security critical)
|
||||||
|
if not user_repo or "/" not in user_repo:
|
||||||
|
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo' or 'github.com/user/repo'")
|
||||||
|
|
||||||
|
parts = user_repo.split("/")
|
||||||
|
if len(parts) < 2 or not parts[0] or not parts[1]:
|
||||||
|
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo' or 'github.com/user/repo'")
|
||||||
|
|
||||||
|
user, repo = parts[0], parts[1]
|
||||||
|
|
||||||
|
# Security: validate characters to prevent injection
|
||||||
|
if not re.match(r'^[a-zA-Z0-9._-]+$', user):
|
||||||
|
raise ValueError(f"Invalid user name: {user}")
|
||||||
|
if not re.match(r'^[a-zA-Z0-9._-]+$', repo.rstrip('.git')):
|
||||||
|
raise ValueError(f"Invalid repository name: {repo}")
|
||||||
|
|
||||||
|
# Safely construct URL - this is now secure
|
||||||
|
github_url = urllib.parse.urljoin("https://github.com/", f"{user}/{repo}")
|
||||||
|
parsed_url = urllib.parse.urlparse(github_url)
|
||||||
|
|
||||||
|
# SECURITY: Validate that this is actually a GitHub URL with exact hostname match
|
||||||
|
if parsed_url.netloc != "github.com":
|
||||||
|
raise ValueError(f"Only GitHub repositories are supported, got hostname: {parsed_url.netloc}")
|
||||||
|
|
||||||
|
# Extract and validate the path
|
||||||
|
path = parsed_url.path.strip("/")
|
||||||
|
if not path:
|
||||||
|
raise ValueError("Repository path cannot be empty")
|
||||||
|
|
||||||
|
# Remove .git suffix if present
|
||||||
|
if path.endswith(".git"):
|
||||||
|
path = path[:-4]
|
||||||
|
|
||||||
|
# Validate path is exactly user/repo format
|
||||||
|
path_parts = path.split("/")
|
||||||
|
if len(path_parts) != 2:
|
||||||
|
raise ValueError(f"Invalid repository path: expected 'user/repo', got '{path}'")
|
||||||
|
|
||||||
|
user, repo = path_parts
|
||||||
|
if not user or not repo:
|
||||||
|
raise ValueError(f"Invalid repository format: user and repo names cannot be empty")
|
||||||
|
|
||||||
|
# Validate user and repo names contain only allowed characters
|
||||||
|
if not re.match(r'^[a-zA-Z0-9._-]+$', user):
|
||||||
|
raise ValueError(f"Invalid user name: {user}")
|
||||||
|
if not re.match(r'^[a-zA-Z0-9._-]+$', repo):
|
||||||
|
raise ValueError(f"Invalid repository name: {repo}")
|
||||||
|
|
||||||
|
repo_url = f"{user}/{repo}"
|
||||||
|
|
||||||
|
# Remove trailing .git if present after normalization
|
||||||
|
if repo_url.endswith(".git"):
|
||||||
|
repo_url = repo_url[:-4]
|
||||||
|
|
||||||
|
|
||||||
|
# Validate repo format (should be user/repo)
|
||||||
|
if not re.match(r'^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$', repo_url):
|
||||||
|
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo'")
|
||||||
|
|
||||||
|
# Validate alias characters if present
|
||||||
|
if alias and not re.match(r'^[a-zA-Z0-9._-]+$', alias):
|
||||||
|
raise ValueError(f"Invalid alias: {alias}. Aliases can only contain letters, numbers, dots, underscores, and hyphens")
|
||||||
|
|
||||||
|
return cls(repo_url=repo_url, reference=reference, alias=alias)
|
||||||
|
|
||||||
|
def to_github_url(self) -> str:
|
||||||
|
"""Convert to full GitHub URL."""
|
||||||
|
return f"https://github.com/{self.repo_url}"
|
||||||
|
|
||||||
|
def get_display_name(self) -> str:
|
||||||
|
"""Get display name for this dependency (alias or repo name)."""
|
||||||
|
if self.alias:
|
||||||
|
return self.alias
|
||||||
|
return self.repo_url # Full repo URL for disambiguation
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""String representation of the dependency reference."""
|
||||||
|
result = self.repo_url
|
||||||
|
if self.reference:
|
||||||
|
result += f"#{self.reference}"
|
||||||
|
if self.alias:
|
||||||
|
result += f"@{self.alias}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class APMPackage:
|
||||||
|
"""Represents an APM package with metadata."""
|
||||||
|
name: str
|
||||||
|
version: str
|
||||||
|
description: Optional[str] = None
|
||||||
|
author: Optional[str] = None
|
||||||
|
license: Optional[str] = None
|
||||||
|
source: Optional[str] = None # Source location (for dependencies)
|
||||||
|
resolved_commit: Optional[str] = None # Resolved commit SHA (for dependencies)
|
||||||
|
dependencies: Optional[Dict[str, List[Union[DependencyReference, str]]]] = None # Mixed types for APM/MCP
|
||||||
|
scripts: Optional[Dict[str, str]] = None
|
||||||
|
package_path: Optional[Path] = None # Local path to package
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_apm_yml(cls, apm_yml_path: Path) -> "APMPackage":
|
||||||
|
"""Load APM package from apm.yml file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
apm_yml_path: Path to the apm.yml file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
APMPackage: Loaded package instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the file is invalid or missing required fields
|
||||||
|
FileNotFoundError: If the file doesn't exist
|
||||||
|
"""
|
||||||
|
if not apm_yml_path.exists():
|
||||||
|
raise FileNotFoundError(f"apm.yml not found: {apm_yml_path}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(apm_yml_path, 'r', encoding='utf-8') as f:
|
||||||
|
data = yaml.safe_load(f)
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
raise ValueError(f"Invalid YAML format in {apm_yml_path}: {e}")
|
||||||
|
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
raise ValueError(f"apm.yml must contain a YAML object, got {type(data)}")
|
||||||
|
|
||||||
|
# Required fields
|
||||||
|
if 'name' not in data:
|
||||||
|
raise ValueError("Missing required field 'name' in apm.yml")
|
||||||
|
if 'version' not in data:
|
||||||
|
raise ValueError("Missing required field 'version' in apm.yml")
|
||||||
|
|
||||||
|
# Parse dependencies
|
||||||
|
dependencies = None
|
||||||
|
if 'dependencies' in data and isinstance(data['dependencies'], dict):
|
||||||
|
dependencies = {}
|
||||||
|
for dep_type, dep_list in data['dependencies'].items():
|
||||||
|
if isinstance(dep_list, list):
|
||||||
|
if dep_type == 'apm':
|
||||||
|
# APM dependencies need to be parsed as DependencyReference objects
|
||||||
|
parsed_deps = []
|
||||||
|
for dep_str in dep_list:
|
||||||
|
if isinstance(dep_str, str):
|
||||||
|
try:
|
||||||
|
parsed_deps.append(DependencyReference.parse(dep_str))
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid APM dependency '{dep_str}': {e}")
|
||||||
|
dependencies[dep_type] = parsed_deps
|
||||||
|
else:
|
||||||
|
# Other dependencies (like MCP) remain as strings
|
||||||
|
dependencies[dep_type] = [str(dep) for dep in dep_list if isinstance(dep, str)]
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
name=data['name'],
|
||||||
|
version=data['version'],
|
||||||
|
description=data.get('description'),
|
||||||
|
author=data.get('author'),
|
||||||
|
license=data.get('license'),
|
||||||
|
dependencies=dependencies,
|
||||||
|
scripts=data.get('scripts'),
|
||||||
|
package_path=apm_yml_path.parent
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_apm_dependencies(self) -> List[DependencyReference]:
|
||||||
|
"""Get list of APM dependencies."""
|
||||||
|
if not self.dependencies or 'apm' not in self.dependencies:
|
||||||
|
return []
|
||||||
|
# Filter to only return DependencyReference objects
|
||||||
|
return [dep for dep in self.dependencies['apm'] if isinstance(dep, DependencyReference)]
|
||||||
|
|
||||||
|
def get_mcp_dependencies(self) -> List[str]:
|
||||||
|
"""Get list of MCP dependencies (as strings for compatibility)."""
|
||||||
|
if not self.dependencies or 'mcp' not in self.dependencies:
|
||||||
|
return []
|
||||||
|
# MCP deps are stored as strings, not DependencyReference objects
|
||||||
|
return [str(dep) if isinstance(dep, DependencyReference) else dep
|
||||||
|
for dep in self.dependencies.get('mcp', [])]
|
||||||
|
|
||||||
|
def has_apm_dependencies(self) -> bool:
|
||||||
|
"""Check if this package has APM dependencies."""
|
||||||
|
return bool(self.get_apm_dependencies())
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ValidationResult:
|
||||||
|
"""Result of APM package validation."""
|
||||||
|
is_valid: bool
|
||||||
|
errors: List[str]
|
||||||
|
warnings: List[str]
|
||||||
|
package: Optional[APMPackage] = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.is_valid = True
|
||||||
|
self.errors = []
|
||||||
|
self.warnings = []
|
||||||
|
self.package = None
|
||||||
|
|
||||||
|
def add_error(self, error: str) -> None:
|
||||||
|
"""Add a validation error."""
|
||||||
|
self.errors.append(error)
|
||||||
|
self.is_valid = False
|
||||||
|
|
||||||
|
def add_warning(self, warning: str) -> None:
|
||||||
|
"""Add a validation warning."""
|
||||||
|
self.warnings.append(warning)
|
||||||
|
|
||||||
|
def has_issues(self) -> bool:
|
||||||
|
"""Check if there are any errors or warnings."""
|
||||||
|
return bool(self.errors or self.warnings)
|
||||||
|
|
||||||
|
def summary(self) -> str:
|
||||||
|
"""Get a summary of validation results."""
|
||||||
|
if self.is_valid and not self.warnings:
|
||||||
|
return "✅ Package is valid"
|
||||||
|
elif self.is_valid and self.warnings:
|
||||||
|
return f"⚠️ Package is valid with {len(self.warnings)} warning(s)"
|
||||||
|
else:
|
||||||
|
return f"❌ Package is invalid with {len(self.errors)} error(s)"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PackageInfo:
|
||||||
|
"""Information about a downloaded/installed package."""
|
||||||
|
package: APMPackage
|
||||||
|
install_path: Path
|
||||||
|
resolved_reference: Optional[ResolvedReference] = None
|
||||||
|
installed_at: Optional[str] = None # ISO timestamp
|
||||||
|
|
||||||
|
def get_primitives_path(self) -> Path:
|
||||||
|
"""Get path to the .apm directory for this package."""
|
||||||
|
return self.install_path / ".apm"
|
||||||
|
|
||||||
|
def has_primitives(self) -> bool:
|
||||||
|
"""Check if the package has any primitives."""
|
||||||
|
apm_dir = self.get_primitives_path()
|
||||||
|
if not apm_dir.exists():
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check for any primitive files in subdirectories
|
||||||
|
for primitive_type in ['instructions', 'chatmodes', 'contexts', 'prompts']:
|
||||||
|
primitive_dir = apm_dir / primitive_type
|
||||||
|
if primitive_dir.exists() and any(primitive_dir.iterdir()):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def validate_apm_package(package_path: Path) -> ValidationResult:
|
||||||
|
"""Validate that a directory contains a valid APM package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path: Path to the directory to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ValidationResult: Validation results with any errors/warnings
|
||||||
|
"""
|
||||||
|
result = ValidationResult()
|
||||||
|
|
||||||
|
# Check if directory exists
|
||||||
|
if not package_path.exists():
|
||||||
|
result.add_error(f"Package directory does not exist: {package_path}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
if not package_path.is_dir():
|
||||||
|
result.add_error(f"Package path is not a directory: {package_path}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Check for apm.yml
|
||||||
|
apm_yml_path = package_path / "apm.yml"
|
||||||
|
if not apm_yml_path.exists():
|
||||||
|
result.add_error("Missing required file: apm.yml")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Try to parse apm.yml
|
||||||
|
try:
|
||||||
|
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
result.package = package
|
||||||
|
except (ValueError, FileNotFoundError) as e:
|
||||||
|
result.add_error(f"Invalid apm.yml: {e}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Check for .apm directory
|
||||||
|
apm_dir = package_path / ".apm"
|
||||||
|
if not apm_dir.exists():
|
||||||
|
result.add_error("Missing required directory: .apm/")
|
||||||
|
return result
|
||||||
|
|
||||||
|
if not apm_dir.is_dir():
|
||||||
|
result.add_error(".apm must be a directory")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Check if .apm directory has any content
|
||||||
|
primitive_types = ['instructions', 'chatmodes', 'contexts', 'prompts']
|
||||||
|
has_primitives = False
|
||||||
|
|
||||||
|
for primitive_type in primitive_types:
|
||||||
|
primitive_dir = apm_dir / primitive_type
|
||||||
|
if primitive_dir.exists() and primitive_dir.is_dir():
|
||||||
|
# Check if directory has any markdown files
|
||||||
|
md_files = list(primitive_dir.glob("*.md"))
|
||||||
|
if md_files:
|
||||||
|
has_primitives = True
|
||||||
|
# Validate each primitive file has basic structure
|
||||||
|
for md_file in md_files:
|
||||||
|
try:
|
||||||
|
content = md_file.read_text(encoding='utf-8')
|
||||||
|
if not content.strip():
|
||||||
|
result.add_warning(f"Empty primitive file: {md_file.relative_to(package_path)}")
|
||||||
|
except Exception as e:
|
||||||
|
result.add_warning(f"Could not read primitive file {md_file.relative_to(package_path)}: {e}")
|
||||||
|
|
||||||
|
if not has_primitives:
|
||||||
|
result.add_warning("No primitive files found in .apm/ directory")
|
||||||
|
|
||||||
|
# Version format validation (basic semver check)
|
||||||
|
if package and package.version:
|
||||||
|
if not re.match(r'^\d+\.\d+\.\d+', package.version):
|
||||||
|
result.add_warning(f"Version '{package.version}' doesn't follow semantic versioning (x.y.z)")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def parse_git_reference(ref_string: str) -> tuple[GitReferenceType, str]:
|
||||||
|
"""Parse a git reference string to determine its type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ref_string: Git reference (branch, tag, or commit)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (GitReferenceType, cleaned_reference)
|
||||||
|
"""
|
||||||
|
if not ref_string:
|
||||||
|
return GitReferenceType.BRANCH, "main" # Default to main branch
|
||||||
|
|
||||||
|
ref = ref_string.strip()
|
||||||
|
|
||||||
|
# Check if it looks like a commit SHA (40 hex chars or 7+ hex chars)
|
||||||
|
if re.match(r'^[a-f0-9]{7,40}$', ref.lower()):
|
||||||
|
return GitReferenceType.COMMIT, ref
|
||||||
|
|
||||||
|
# Check if it looks like a semantic version tag
|
||||||
|
if re.match(r'^v?\d+\.\d+\.\d+', ref):
|
||||||
|
return GitReferenceType.TAG, ref
|
||||||
|
|
||||||
|
# Otherwise assume it's a branch
|
||||||
|
return GitReferenceType.BRANCH, ref
|
||||||
12
src/apm_cli/output/__init__.py
Normal file
12
src/apm_cli/output/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
"""Output formatting and presentation layer for APM CLI."""
|
||||||
|
|
||||||
|
from .formatters import CompilationFormatter
|
||||||
|
from .models import CompilationResults, ProjectAnalysis, OptimizationDecision, OptimizationStats
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'CompilationFormatter',
|
||||||
|
'CompilationResults',
|
||||||
|
'ProjectAnalysis',
|
||||||
|
'OptimizationDecision',
|
||||||
|
'OptimizationStats'
|
||||||
|
]
|
||||||
911
src/apm_cli/output/formatters.py
Normal file
911
src/apm_cli/output/formatters.py
Normal file
@@ -0,0 +1,911 @@
|
|||||||
|
"""Professional CLI output formatters for APM compilation."""
|
||||||
|
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
try:
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.table import Table
|
||||||
|
from rich.tree import Tree
|
||||||
|
from rich.text import Text
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich import box
|
||||||
|
from io import StringIO
|
||||||
|
RICH_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
RICH_AVAILABLE = False
|
||||||
|
|
||||||
|
from .models import CompilationResults, OptimizationDecision, PlacementStrategy
|
||||||
|
|
||||||
|
|
||||||
|
class CompilationFormatter:
|
||||||
|
"""Professional formatter for compilation output with fallback for no-rich environments."""
|
||||||
|
|
||||||
|
def __init__(self, use_color: bool = True):
|
||||||
|
"""Initialize formatter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_color: Whether to use colors and rich formatting.
|
||||||
|
"""
|
||||||
|
self.use_color = use_color and RICH_AVAILABLE
|
||||||
|
self.console = Console() if self.use_color else None
|
||||||
|
|
||||||
|
def format_default(self, results: CompilationResults) -> str:
|
||||||
|
"""Format default compilation output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
results: Compilation results to format.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted output string.
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Phase 1: Project Discovery
|
||||||
|
lines.extend(self._format_project_discovery(results.project_analysis))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 2: Optimization Progress
|
||||||
|
lines.extend(self._format_optimization_progress(results.optimization_decisions, results.project_analysis))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 3: Results Summary
|
||||||
|
lines.extend(self._format_results_summary(results))
|
||||||
|
|
||||||
|
# Issues (warnings/errors)
|
||||||
|
if results.has_issues:
|
||||||
|
lines.append("")
|
||||||
|
lines.extend(self._format_issues(results.warnings, results.errors))
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def format_verbose(self, results: CompilationResults) -> str:
|
||||||
|
"""Format verbose compilation output with mathematical details.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
results: Compilation results to format.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted verbose output string.
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Phase 1: Project Discovery
|
||||||
|
lines.extend(self._format_project_discovery(results.project_analysis))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 2: Optimization Progress
|
||||||
|
lines.extend(self._format_optimization_progress(results.optimization_decisions, results.project_analysis))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 3: Mathematical Analysis Section (verbose only)
|
||||||
|
lines.extend(self._format_mathematical_analysis(results.optimization_decisions))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 4: Coverage vs. Efficiency Explanation (verbose only)
|
||||||
|
lines.extend(self._format_coverage_explanation(results.optimization_stats))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 5: Detailed Performance Metrics (verbose only)
|
||||||
|
lines.extend(self._format_detailed_metrics(results.optimization_stats))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Phase 6: Final Summary (Generated X files + placement distribution)
|
||||||
|
lines.extend(self._format_final_summary(results))
|
||||||
|
|
||||||
|
# Issues (warnings/errors)
|
||||||
|
if results.has_issues:
|
||||||
|
lines.append("")
|
||||||
|
lines.extend(self._format_issues(results.warnings, results.errors))
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def _format_final_summary(self, results: CompilationResults) -> List[str]:
|
||||||
|
"""Format final summary for verbose mode: Generated files + placement distribution."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Main result
|
||||||
|
file_count = len(results.placement_summaries)
|
||||||
|
summary_line = f"Generated {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||||
|
|
||||||
|
if results.is_dry_run:
|
||||||
|
summary_line = f"[DRY RUN] Would generate {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
color = "yellow" if results.is_dry_run else "green"
|
||||||
|
lines.append(self._styled(summary_line, f"{color} bold"))
|
||||||
|
else:
|
||||||
|
lines.append(summary_line)
|
||||||
|
|
||||||
|
# Efficiency metrics with improved formatting
|
||||||
|
stats = results.optimization_stats
|
||||||
|
efficiency_pct = f"{stats.efficiency_percentage:.1f}%"
|
||||||
|
|
||||||
|
# Build metrics with baselines and improvements when available
|
||||||
|
metrics_lines = [
|
||||||
|
f"┌─ Context efficiency: {efficiency_pct}"
|
||||||
|
]
|
||||||
|
|
||||||
|
if stats.efficiency_improvement is not None:
|
||||||
|
improvement = f"(baseline: {stats.baseline_efficiency * 100:.1f}%, improvement: +{stats.efficiency_improvement:.0f}%)" if stats.efficiency_improvement > 0 else f"(baseline: {stats.baseline_efficiency * 100:.1f}%, change: {stats.efficiency_improvement:.0f}%)"
|
||||||
|
metrics_lines[0] += f" {improvement}"
|
||||||
|
|
||||||
|
if stats.pollution_improvement is not None:
|
||||||
|
pollution_pct = f"{(1.0 - stats.pollution_improvement) * 100:.1f}%"
|
||||||
|
improvement_pct = f"-{stats.pollution_improvement * 100:.0f}%" if stats.pollution_improvement > 0 else f"+{abs(stats.pollution_improvement) * 100:.0f}%"
|
||||||
|
metrics_lines.append(f"├─ Average pollution: {pollution_pct} (improvement: {improvement_pct})")
|
||||||
|
|
||||||
|
if stats.placement_accuracy is not None:
|
||||||
|
accuracy_pct = f"{stats.placement_accuracy * 100:.1f}%"
|
||||||
|
metrics_lines.append(f"├─ Placement accuracy: {accuracy_pct} (mathematical optimum)")
|
||||||
|
|
||||||
|
if stats.generation_time_ms is not None:
|
||||||
|
metrics_lines.append(f"└─ Generation time: {stats.generation_time_ms}ms")
|
||||||
|
else:
|
||||||
|
# Change last ├─ to └─
|
||||||
|
if len(metrics_lines) > 1:
|
||||||
|
metrics_lines[-1] = metrics_lines[-1].replace("├─", "└─")
|
||||||
|
|
||||||
|
for line in metrics_lines:
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
# Add placement distribution summary
|
||||||
|
lines.append("")
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Placement Distribution", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Placement Distribution")
|
||||||
|
|
||||||
|
# Show distribution of AGENTS.md files
|
||||||
|
for summary in results.placement_summaries:
|
||||||
|
rel_path = str(summary.get_relative_path(Path.cwd()))
|
||||||
|
content_text = self._get_placement_description(summary)
|
||||||
|
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
|
||||||
|
|
||||||
|
# Use proper tree formatting
|
||||||
|
prefix = "├─" if summary != results.placement_summaries[-1] else "└─"
|
||||||
|
line = f"{prefix} {rel_path:<30} {content_text} from {source_text}"
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_dry_run(self, results: CompilationResults) -> str:
|
||||||
|
"""Format dry run output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
results: Compilation results to format.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted dry run output string.
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Standard analysis
|
||||||
|
lines.extend(self._format_project_discovery(results.project_analysis))
|
||||||
|
lines.append("")
|
||||||
|
lines.extend(self._format_optimization_progress(results.optimization_decisions, results.project_analysis))
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Dry run specific output
|
||||||
|
lines.extend(self._format_dry_run_summary(results))
|
||||||
|
|
||||||
|
# Issues (warnings/errors) - important for dry run too!
|
||||||
|
if results.has_issues:
|
||||||
|
lines.append("")
|
||||||
|
lines.extend(self._format_issues(results.warnings, results.errors))
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def _format_project_discovery(self, analysis) -> List[str]:
|
||||||
|
"""Format project discovery phase output."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Analyzing project structure...", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Analyzing project structure...")
|
||||||
|
|
||||||
|
# Constitution detection (first priority)
|
||||||
|
if analysis.constitution_detected:
|
||||||
|
constitution_line = f"├─ Constitution detected: {analysis.constitution_path}"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(constitution_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(constitution_line)
|
||||||
|
|
||||||
|
# Structure tree with more detailed information
|
||||||
|
file_types_summary = analysis.get_file_types_summary() if hasattr(analysis, 'get_file_types_summary') else "various"
|
||||||
|
tree_lines = [
|
||||||
|
f"├─ {analysis.directories_scanned} directories scanned (max depth: {analysis.max_depth})",
|
||||||
|
f"├─ {analysis.files_analyzed} files analyzed across {len(analysis.file_types_detected)} file types ({file_types_summary})",
|
||||||
|
f"└─ {analysis.instruction_patterns_detected} instruction patterns detected"
|
||||||
|
]
|
||||||
|
|
||||||
|
for line in tree_lines:
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _format_optimization_progress(self, decisions: List[OptimizationDecision], analysis=None) -> List[str]:
|
||||||
|
"""Format optimization progress display using Rich table for better readability."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Optimizing placements...", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Optimizing placements...")
|
||||||
|
|
||||||
|
if self.use_color and RICH_AVAILABLE:
|
||||||
|
# Create a Rich table for professional display
|
||||||
|
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE_HEAD)
|
||||||
|
table.add_column("Pattern", style="white", width=25)
|
||||||
|
table.add_column("Source", style="yellow", width=20)
|
||||||
|
table.add_column("Coverage", style="dim", width=10)
|
||||||
|
table.add_column("Placement", style="green", width=25)
|
||||||
|
table.add_column("Metrics", style="dim", width=20)
|
||||||
|
|
||||||
|
# Add constitution row first if detected
|
||||||
|
if analysis and analysis.constitution_detected:
|
||||||
|
table.add_row(
|
||||||
|
"**",
|
||||||
|
"constitution.md",
|
||||||
|
"ALL",
|
||||||
|
"./AGENTS.md",
|
||||||
|
"rel: 100%"
|
||||||
|
)
|
||||||
|
|
||||||
|
for decision in decisions:
|
||||||
|
pattern_display = decision.pattern if decision.pattern else "(global)"
|
||||||
|
|
||||||
|
# Extract source information from the instruction
|
||||||
|
source_display = "unknown"
|
||||||
|
if decision.instruction and hasattr(decision.instruction, 'file_path'):
|
||||||
|
try:
|
||||||
|
# Get relative path from base directory if possible
|
||||||
|
rel_path = decision.instruction.file_path.name # Just filename for brevity
|
||||||
|
source_display = rel_path
|
||||||
|
except:
|
||||||
|
source_display = str(decision.instruction.file_path)[-20:] # Last 20 chars
|
||||||
|
|
||||||
|
ratio_display = f"{decision.matching_directories}/{decision.total_directories}"
|
||||||
|
|
||||||
|
if len(decision.placement_directories) == 1:
|
||||||
|
placement = self._get_relative_display_path(decision.placement_directories[0])
|
||||||
|
# Add efficiency details for single placement
|
||||||
|
relevance = getattr(decision, 'relevance_score', 0.0) if hasattr(decision, 'relevance_score') else 1.0
|
||||||
|
pollution = getattr(decision, 'pollution_score', 0.0) if hasattr(decision, 'pollution_score') else 0.0
|
||||||
|
metrics = f"rel: {relevance*100:.0f}%"
|
||||||
|
else:
|
||||||
|
placement_count = len(decision.placement_directories)
|
||||||
|
placement = f"{placement_count} locations"
|
||||||
|
metrics = "distributed"
|
||||||
|
|
||||||
|
# Color code the placement by strategy
|
||||||
|
placement_style = self._get_strategy_color(decision.strategy)
|
||||||
|
placement_text = Text(placement, style=placement_style)
|
||||||
|
|
||||||
|
table.add_row(pattern_display, source_display, ratio_display, placement_text, metrics)
|
||||||
|
|
||||||
|
# Render table to lines
|
||||||
|
if self.console:
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(table)
|
||||||
|
table_output = capture.get()
|
||||||
|
if table_output.strip():
|
||||||
|
lines.extend(table_output.split('\n'))
|
||||||
|
else:
|
||||||
|
# Fallback to simplified text display for non-Rich environments
|
||||||
|
# Add constitution first if detected
|
||||||
|
if analysis and analysis.constitution_detected:
|
||||||
|
lines.append("** constitution.md ALL → ./AGENTS.md (rel: 100%)")
|
||||||
|
|
||||||
|
for decision in decisions:
|
||||||
|
pattern_display = decision.pattern if decision.pattern else "(global)"
|
||||||
|
|
||||||
|
# Extract source information
|
||||||
|
source_display = "unknown"
|
||||||
|
if decision.instruction and hasattr(decision.instruction, 'file_path'):
|
||||||
|
try:
|
||||||
|
source_display = decision.instruction.file_path.name
|
||||||
|
except:
|
||||||
|
source_display = "unknown"
|
||||||
|
|
||||||
|
ratio_display = f"{decision.matching_directories}/{decision.total_directories} dirs"
|
||||||
|
|
||||||
|
if len(decision.placement_directories) == 1:
|
||||||
|
placement = self._get_relative_display_path(decision.placement_directories[0])
|
||||||
|
relevance = getattr(decision, 'relevance_score', 0.0) if hasattr(decision, 'relevance_score') else 1.0
|
||||||
|
pollution = getattr(decision, 'pollution_score', 0.0) if hasattr(decision, 'pollution_score') else 0.0
|
||||||
|
line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} → {placement:<25} (rel: {relevance*100:.0f}%)"
|
||||||
|
else:
|
||||||
|
placement_count = len(decision.placement_directories)
|
||||||
|
line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} → {placement_count} locations"
|
||||||
|
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _format_results_summary(self, results: CompilationResults) -> List[str]:
|
||||||
|
"""Format final results summary."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Main result
|
||||||
|
file_count = len(results.placement_summaries)
|
||||||
|
summary_line = f"Generated {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||||
|
|
||||||
|
if results.is_dry_run:
|
||||||
|
summary_line = f"[DRY RUN] Would generate {file_count} AGENTS.md file{'s' if file_count != 1 else ''}"
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
color = "yellow" if results.is_dry_run else "green"
|
||||||
|
lines.append(self._styled(summary_line, f"{color} bold"))
|
||||||
|
else:
|
||||||
|
lines.append(summary_line)
|
||||||
|
|
||||||
|
# Efficiency metrics with improved formatting
|
||||||
|
stats = results.optimization_stats
|
||||||
|
efficiency_pct = f"{stats.efficiency_percentage:.1f}%"
|
||||||
|
|
||||||
|
# Build metrics with baselines and improvements when available
|
||||||
|
metrics_lines = [
|
||||||
|
f"┌─ Context efficiency: {efficiency_pct}"
|
||||||
|
]
|
||||||
|
|
||||||
|
if stats.efficiency_improvement is not None:
|
||||||
|
improvement = f"(baseline: {stats.baseline_efficiency * 100:.1f}%, improvement: +{stats.efficiency_improvement:.0f}%)" if stats.efficiency_improvement > 0 else f"(baseline: {stats.baseline_efficiency * 100:.1f}%, change: {stats.efficiency_improvement:.0f}%)"
|
||||||
|
metrics_lines[0] += f" {improvement}"
|
||||||
|
|
||||||
|
if stats.pollution_improvement is not None:
|
||||||
|
pollution_pct = f"{(1.0 - stats.pollution_improvement) * 100:.1f}%"
|
||||||
|
improvement_pct = f"-{stats.pollution_improvement * 100:.0f}%" if stats.pollution_improvement > 0 else f"+{abs(stats.pollution_improvement) * 100:.0f}%"
|
||||||
|
metrics_lines.append(f"├─ Average pollution: {pollution_pct} (improvement: {improvement_pct})")
|
||||||
|
|
||||||
|
if stats.placement_accuracy is not None:
|
||||||
|
accuracy_pct = f"{stats.placement_accuracy * 100:.1f}%"
|
||||||
|
metrics_lines.append(f"├─ Placement accuracy: {accuracy_pct} (mathematical optimum)")
|
||||||
|
|
||||||
|
if stats.generation_time_ms is not None:
|
||||||
|
metrics_lines.append(f"└─ Generation time: {stats.generation_time_ms}ms")
|
||||||
|
else:
|
||||||
|
# Change last ├─ to └─
|
||||||
|
if len(metrics_lines) > 1:
|
||||||
|
metrics_lines[-1] = metrics_lines[-1].replace("├─", "└─")
|
||||||
|
|
||||||
|
for line in metrics_lines:
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
# Add placement distribution summary
|
||||||
|
lines.append("")
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Placement Distribution", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Placement Distribution")
|
||||||
|
|
||||||
|
# Show distribution of AGENTS.md files
|
||||||
|
for summary in results.placement_summaries:
|
||||||
|
rel_path = str(summary.get_relative_path(Path.cwd()))
|
||||||
|
content_text = self._get_placement_description(summary)
|
||||||
|
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
|
||||||
|
|
||||||
|
# Use proper tree formatting
|
||||||
|
prefix = "├─" if summary != results.placement_summaries[-1] else "└─"
|
||||||
|
line = f"{prefix} {rel_path:<30} {content_text} from {source_text}"
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _format_dry_run_summary(self, results: CompilationResults) -> List[str]:
|
||||||
|
"""Format dry run specific summary."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("[DRY RUN] File generation preview:", "yellow bold"))
|
||||||
|
else:
|
||||||
|
lines.append("[DRY RUN] File generation preview:")
|
||||||
|
|
||||||
|
# List files that would be generated
|
||||||
|
for summary in results.placement_summaries:
|
||||||
|
rel_path = str(summary.get_relative_path(Path.cwd()))
|
||||||
|
instruction_text = f"{summary.instruction_count} instruction{'s' if summary.instruction_count != 1 else ''}"
|
||||||
|
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
|
||||||
|
|
||||||
|
line = f"├─ {rel_path:<30} {instruction_text}, {source_text}"
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
|
# Change last ├─ to └─
|
||||||
|
if lines and len(lines) > 1:
|
||||||
|
lines[-1] = lines[-1].replace("├─", "└─")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Call to action
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("[DRY RUN] No files written. Run 'apm compile' to apply changes.", "yellow"))
|
||||||
|
else:
|
||||||
|
lines.append("[DRY RUN] No files written. Run 'apm compile' to apply changes.")
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _format_mathematical_analysis(self, decisions: List[OptimizationDecision]) -> List[str]:
|
||||||
|
"""Format mathematical analysis for verbose mode with coverage-first principles."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Mathematical Optimization Analysis", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Mathematical Optimization Analysis")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if self.use_color and RICH_AVAILABLE:
|
||||||
|
# Coverage-First Strategy Table
|
||||||
|
strategy_table = Table(title="Three-Tier Coverage-First Strategy", show_header=True, header_style="bold cyan", box=box.SIMPLE_HEAD)
|
||||||
|
strategy_table.add_column("Pattern", style="white", width=25)
|
||||||
|
strategy_table.add_column("Source", style="yellow", width=15)
|
||||||
|
strategy_table.add_column("Distribution", style="yellow", width=12)
|
||||||
|
strategy_table.add_column("Strategy", style="green", width=15)
|
||||||
|
strategy_table.add_column("Coverage Guarantee", style="blue", width=20)
|
||||||
|
|
||||||
|
for decision in decisions:
|
||||||
|
pattern = decision.pattern if decision.pattern else "(global)"
|
||||||
|
|
||||||
|
# Extract source information
|
||||||
|
source_display = "unknown"
|
||||||
|
if decision.instruction and hasattr(decision.instruction, 'file_path'):
|
||||||
|
try:
|
||||||
|
source_display = decision.instruction.file_path.name
|
||||||
|
except:
|
||||||
|
source_display = "unknown"
|
||||||
|
|
||||||
|
# Distribution score with threshold classification
|
||||||
|
score = decision.distribution_score
|
||||||
|
if score < 0.3:
|
||||||
|
dist_display = f"{score:.3f} (Low)"
|
||||||
|
strategy_name = "Single Point"
|
||||||
|
coverage_status = "✅ Perfect"
|
||||||
|
elif score > 0.7:
|
||||||
|
dist_display = f"{score:.3f} (High)"
|
||||||
|
strategy_name = "Distributed"
|
||||||
|
coverage_status = "✅ Universal"
|
||||||
|
else:
|
||||||
|
dist_display = f"{score:.3f} (Medium)"
|
||||||
|
strategy_name = "Selective Multi"
|
||||||
|
# Check if root placement was used (indicates coverage fallback)
|
||||||
|
if any("." == str(p) or p.name == "" for p in decision.placement_directories):
|
||||||
|
coverage_status = "⚠️ Root Fallback"
|
||||||
|
else:
|
||||||
|
coverage_status = "✅ Verified"
|
||||||
|
|
||||||
|
strategy_table.add_row(pattern, source_display, dist_display, strategy_name, coverage_status)
|
||||||
|
|
||||||
|
# Render strategy table
|
||||||
|
if self.console:
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(strategy_table)
|
||||||
|
table_output = capture.get()
|
||||||
|
if table_output.strip():
|
||||||
|
lines.extend(table_output.split('\n'))
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Hierarchical Coverage Analysis Table
|
||||||
|
coverage_table = Table(title="Hierarchical Coverage Analysis", show_header=True, header_style="bold cyan", box=box.SIMPLE_HEAD)
|
||||||
|
coverage_table.add_column("Pattern", style="white", width=25)
|
||||||
|
coverage_table.add_column("Matching Files", style="yellow", width=15)
|
||||||
|
coverage_table.add_column("Placement", style="green", width=20)
|
||||||
|
coverage_table.add_column("Coverage Result", style="blue", width=25)
|
||||||
|
|
||||||
|
for decision in decisions:
|
||||||
|
pattern = decision.pattern if decision.pattern else "(global)"
|
||||||
|
matching_files = f"{decision.matching_directories} dirs"
|
||||||
|
|
||||||
|
if len(decision.placement_directories) == 1:
|
||||||
|
placement = self._get_relative_display_path(decision.placement_directories[0])
|
||||||
|
|
||||||
|
# Analyze coverage outcome
|
||||||
|
if str(decision.placement_directories[0]).endswith('.'):
|
||||||
|
coverage_result = "Root → All files inherit"
|
||||||
|
elif decision.distribution_score < 0.3:
|
||||||
|
coverage_result = "Local → Perfect efficiency"
|
||||||
|
else:
|
||||||
|
coverage_result = "Selective → Coverage verified"
|
||||||
|
else:
|
||||||
|
placement = f"{len(decision.placement_directories)} locations"
|
||||||
|
coverage_result = "Multi-point → Full coverage"
|
||||||
|
|
||||||
|
coverage_table.add_row(pattern, matching_files, placement, coverage_result)
|
||||||
|
|
||||||
|
# Render coverage table
|
||||||
|
if self.console:
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(coverage_table)
|
||||||
|
table_output = capture.get()
|
||||||
|
if table_output.strip():
|
||||||
|
lines.extend(table_output.split('\n'))
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Updated Mathematical Foundation Panel
|
||||||
|
foundation_text = """Objective: minimize Σ(context_pollution × directory_weight)
|
||||||
|
Constraints: ∀file_matching_pattern → can_inherit_instruction
|
||||||
|
Variables: placement_matrix ∈ {0,1}
|
||||||
|
Algorithm: Three-tier strategy with hierarchical coverage verification
|
||||||
|
|
||||||
|
Coverage Guarantee: Every file can access applicable instructions through
|
||||||
|
hierarchical inheritance. Coverage takes priority over efficiency."""
|
||||||
|
|
||||||
|
if self.console:
|
||||||
|
from rich.panel import Panel
|
||||||
|
try:
|
||||||
|
panel = Panel(foundation_text, title="Coverage-Constrained Optimization", border_style="cyan")
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(panel)
|
||||||
|
panel_output = capture.get()
|
||||||
|
if panel_output.strip():
|
||||||
|
lines.extend(panel_output.split('\n'))
|
||||||
|
except:
|
||||||
|
# Fallback to simple text
|
||||||
|
lines.append("Coverage-Constrained Optimization:")
|
||||||
|
for line in foundation_text.split('\n'):
|
||||||
|
lines.append(f" {line}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Fallback for non-Rich environments
|
||||||
|
lines.append("Coverage-First Strategy Analysis:")
|
||||||
|
for decision in decisions:
|
||||||
|
pattern = decision.pattern if decision.pattern else "(global)"
|
||||||
|
score = f"{decision.distribution_score:.3f}"
|
||||||
|
strategy = decision.strategy.value
|
||||||
|
coverage = "✅ Verified" if decision.distribution_score < 0.7 else "⚠️ Root Fallback"
|
||||||
|
lines.append(f" {pattern:<30} {score:<8} {strategy:<15} {coverage}")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Mathematical Foundation:")
|
||||||
|
lines.append(" Objective: minimize Σ(context_pollution × directory_weight)")
|
||||||
|
lines.append(" Constraints: ∀file_matching_pattern → can_inherit_instruction")
|
||||||
|
lines.append(" Algorithm: Three-tier strategy with coverage verification")
|
||||||
|
lines.append(" Principle: Coverage guarantee takes priority over efficiency")
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _format_detailed_metrics(self, stats) -> List[str]:
|
||||||
|
"""Format detailed performance metrics table with interpretations."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Performance Metrics", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Performance Metrics")
|
||||||
|
|
||||||
|
# Create metrics table
|
||||||
|
if self.use_color and RICH_AVAILABLE:
|
||||||
|
table = Table(box=box.SIMPLE)
|
||||||
|
table.add_column("Metric", style="white", width=20)
|
||||||
|
table.add_column("Value", style="white", width=12)
|
||||||
|
table.add_column("Assessment", style="blue", width=35)
|
||||||
|
|
||||||
|
# Context Efficiency with coverage-first interpretation
|
||||||
|
efficiency = stats.efficiency_percentage
|
||||||
|
if efficiency >= 80:
|
||||||
|
assessment = "Excellent - perfect pattern locality"
|
||||||
|
assessment_color = "bright_green"
|
||||||
|
value_color = "bright_green"
|
||||||
|
elif efficiency >= 60:
|
||||||
|
assessment = "Good - well-optimized with minimal coverage conflicts"
|
||||||
|
assessment_color = "green"
|
||||||
|
value_color = "green"
|
||||||
|
elif efficiency >= 40:
|
||||||
|
assessment = "Fair - moderate coverage-driven pollution"
|
||||||
|
assessment_color = "yellow"
|
||||||
|
value_color = "yellow"
|
||||||
|
elif efficiency >= 20:
|
||||||
|
assessment = "Poor - significant coverage constraints"
|
||||||
|
assessment_color = "orange1"
|
||||||
|
value_color = "orange1"
|
||||||
|
else:
|
||||||
|
assessment = "Very Poor - may be mathematically optimal given coverage"
|
||||||
|
assessment_color = "red"
|
||||||
|
value_color = "red"
|
||||||
|
|
||||||
|
table.add_row(
|
||||||
|
"Context Efficiency",
|
||||||
|
Text(f"{efficiency:.1f}%", style=value_color),
|
||||||
|
Text(assessment, style=assessment_color)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate pollution level with coverage-aware interpretation
|
||||||
|
pollution_level = 100 - efficiency
|
||||||
|
if pollution_level <= 20:
|
||||||
|
pollution_assessment = "Excellent - perfect pattern locality"
|
||||||
|
pollution_color = "bright_green"
|
||||||
|
elif pollution_level <= 40:
|
||||||
|
pollution_assessment = "Good - minimal coverage conflicts"
|
||||||
|
pollution_color = "green"
|
||||||
|
elif pollution_level <= 60:
|
||||||
|
pollution_assessment = "Fair - acceptable coverage-driven pollution"
|
||||||
|
pollution_color = "yellow"
|
||||||
|
elif pollution_level <= 80:
|
||||||
|
pollution_assessment = "Poor - high coverage constraints"
|
||||||
|
pollution_color = "orange1"
|
||||||
|
else:
|
||||||
|
pollution_assessment = "Very Poor - but may guarantee coverage"
|
||||||
|
pollution_color = "red"
|
||||||
|
|
||||||
|
table.add_row(
|
||||||
|
"Pollution Level",
|
||||||
|
Text(f"{pollution_level:.1f}%", style=pollution_color),
|
||||||
|
Text(pollution_assessment, style=pollution_color)
|
||||||
|
)
|
||||||
|
|
||||||
|
if stats.placement_accuracy:
|
||||||
|
accuracy = stats.placement_accuracy * 100
|
||||||
|
if accuracy >= 95:
|
||||||
|
accuracy_assessment = "Excellent - mathematically optimal"
|
||||||
|
accuracy_color = "bright_green"
|
||||||
|
elif accuracy >= 85:
|
||||||
|
accuracy_assessment = "Good - near optimal"
|
||||||
|
accuracy_color = "green"
|
||||||
|
elif accuracy >= 70:
|
||||||
|
accuracy_assessment = "Fair - reasonably placed"
|
||||||
|
accuracy_color = "yellow"
|
||||||
|
else:
|
||||||
|
accuracy_assessment = "Poor - suboptimal placement"
|
||||||
|
accuracy_color = "orange1"
|
||||||
|
|
||||||
|
table.add_row(
|
||||||
|
"Placement Accuracy",
|
||||||
|
Text(f"{accuracy:.1f}%", style=accuracy_color),
|
||||||
|
Text(accuracy_assessment, style=accuracy_color)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Render table
|
||||||
|
if self.console:
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(table)
|
||||||
|
table_output = capture.get()
|
||||||
|
if table_output.strip():
|
||||||
|
lines.extend(table_output.split('\n'))
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Add interpretation guide
|
||||||
|
if self.console:
|
||||||
|
try:
|
||||||
|
interpretation_text = """📊 How These Metrics Are Calculated
|
||||||
|
|
||||||
|
Context Efficiency = Average across all directories of (Relevant Instructions / Total Instructions)
|
||||||
|
• For each directory, APM analyzes what instructions agents would inherit from AGENTS.md files
|
||||||
|
• Calculates ratio of instructions that apply to files in that directory vs total instructions loaded
|
||||||
|
• Takes weighted average across all project directories with files
|
||||||
|
|
||||||
|
Pollution Level = 100% - Context Efficiency (inverse relationship)
|
||||||
|
• High pollution = agents load many irrelevant instructions when working in specific directories
|
||||||
|
• Low pollution = agents see mostly relevant instructions for their current context
|
||||||
|
|
||||||
|
🎯 Interpretation Benchmarks
|
||||||
|
|
||||||
|
Context Efficiency:
|
||||||
|
• 80-100%: Excellent - Instructions perfectly targeted to usage context
|
||||||
|
• 60-80%: Good - Well-optimized with minimal wasted context
|
||||||
|
• 40-60%: Fair - Some optimization opportunities exist
|
||||||
|
• 20-40%: Poor - Significant context pollution, consider restructuring
|
||||||
|
• 0-20%: Very Poor - High pollution, instructions poorly distributed
|
||||||
|
|
||||||
|
Pollution Level:
|
||||||
|
• 0-10%: Excellent - Agents see highly relevant instructions only
|
||||||
|
• 10-25%: Good - Low noise, mostly relevant context
|
||||||
|
• 25-50%: Fair - Moderate noise, some irrelevant instructions
|
||||||
|
• 50%+: Poor - High noise, agents see many irrelevant instructions
|
||||||
|
|
||||||
|
💡 Example: 36.7% efficiency means agents working in specific directories see only 36.7% relevant instructions and 63.3% irrelevant context pollution."""
|
||||||
|
|
||||||
|
panel = Panel(interpretation_text, title="Metrics Guide", border_style="dim", title_align="left")
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(panel)
|
||||||
|
panel_output = capture.get()
|
||||||
|
if panel_output.strip():
|
||||||
|
lines.extend(panel_output.split('\n'))
|
||||||
|
except:
|
||||||
|
# Fallback to simple text
|
||||||
|
lines.extend([
|
||||||
|
"Metrics Guide:",
|
||||||
|
"• Context Efficiency 80-100%: Excellent | 60-80%: Good | 40-60%: Fair | <40%: Poor",
|
||||||
|
"• Pollution 0-10%: Excellent | 10-25%: Good | 25-50%: Fair | >50%: Poor"
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
# Fallback for non-Rich environments
|
||||||
|
efficiency = stats.efficiency_percentage
|
||||||
|
pollution = 100 - efficiency
|
||||||
|
|
||||||
|
if efficiency >= 80:
|
||||||
|
efficiency_assessment = "Excellent"
|
||||||
|
elif efficiency >= 60:
|
||||||
|
efficiency_assessment = "Good"
|
||||||
|
elif efficiency >= 40:
|
||||||
|
efficiency_assessment = "Fair"
|
||||||
|
elif efficiency >= 20:
|
||||||
|
efficiency_assessment = "Poor"
|
||||||
|
else:
|
||||||
|
efficiency_assessment = "Very Poor"
|
||||||
|
|
||||||
|
if pollution <= 10:
|
||||||
|
pollution_assessment = "Excellent"
|
||||||
|
elif pollution <= 25:
|
||||||
|
pollution_assessment = "Good"
|
||||||
|
elif pollution <= 50:
|
||||||
|
pollution_assessment = "Fair"
|
||||||
|
else:
|
||||||
|
pollution_assessment = "Poor"
|
||||||
|
|
||||||
|
lines.extend([
|
||||||
|
f"Context Efficiency: {efficiency:.1f}% ({efficiency_assessment})",
|
||||||
|
f"Pollution Level: {pollution:.1f}% ({pollution_assessment})",
|
||||||
|
"Guide: 80-100% Excellent | 60-80% Good | 40-60% Fair | 20-40% Poor | <20% Very Poor"
|
||||||
|
])
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _format_issues(self, warnings: List[str], errors: List[str]) -> List[str]:
|
||||||
|
"""Format warnings and errors as professional blocks."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Errors first
|
||||||
|
for error in errors:
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f"✗ Error: {error}", "red"))
|
||||||
|
else:
|
||||||
|
lines.append(f"✗ Error: {error}")
|
||||||
|
|
||||||
|
# Then warnings - handle multi-line warnings as cohesive blocks
|
||||||
|
for warning in warnings:
|
||||||
|
if '\n' in warning:
|
||||||
|
# Multi-line warning - format as a professional block
|
||||||
|
warning_lines = warning.split('\n')
|
||||||
|
# First line gets the warning symbol and styling
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f"⚠ Warning: {warning_lines[0]}", "yellow"))
|
||||||
|
else:
|
||||||
|
lines.append(f"⚠ Warning: {warning_lines[0]}")
|
||||||
|
|
||||||
|
# Subsequent lines are indented and styled consistently
|
||||||
|
for line in warning_lines[1:]:
|
||||||
|
if line.strip(): # Skip empty lines
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f" {line}", "yellow"))
|
||||||
|
else:
|
||||||
|
lines.append(f" {line}")
|
||||||
|
else:
|
||||||
|
# Single-line warning - standard format
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f"⚠ Warning: {warning}", "yellow"))
|
||||||
|
else:
|
||||||
|
lines.append(f"⚠ Warning: {warning}")
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _get_strategy_symbol(self, strategy: PlacementStrategy) -> str:
|
||||||
|
"""Get symbol for placement strategy."""
|
||||||
|
symbols = {
|
||||||
|
PlacementStrategy.SINGLE_POINT: "●",
|
||||||
|
PlacementStrategy.SELECTIVE_MULTI: "◆",
|
||||||
|
PlacementStrategy.DISTRIBUTED: "◇"
|
||||||
|
}
|
||||||
|
return symbols.get(strategy, "•")
|
||||||
|
|
||||||
|
def _get_strategy_color(self, strategy: PlacementStrategy) -> str:
|
||||||
|
"""Get color for placement strategy."""
|
||||||
|
colors = {
|
||||||
|
PlacementStrategy.SINGLE_POINT: "green",
|
||||||
|
PlacementStrategy.SELECTIVE_MULTI: "yellow",
|
||||||
|
PlacementStrategy.DISTRIBUTED: "blue"
|
||||||
|
}
|
||||||
|
return colors.get(strategy, "white")
|
||||||
|
|
||||||
|
def _get_relative_display_path(self, path: Path) -> str:
|
||||||
|
"""Get display-friendly relative path."""
|
||||||
|
try:
|
||||||
|
rel_path = path.relative_to(Path.cwd())
|
||||||
|
if rel_path == Path('.'):
|
||||||
|
return "./AGENTS.md"
|
||||||
|
return str(rel_path / "AGENTS.md")
|
||||||
|
except ValueError:
|
||||||
|
return str(path / "AGENTS.md")
|
||||||
|
|
||||||
|
def _format_coverage_explanation(self, stats) -> List[str]:
|
||||||
|
"""Explain the coverage vs. efficiency trade-off."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Coverage vs. Efficiency Analysis", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append("Coverage vs. Efficiency Analysis")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
efficiency = stats.efficiency_percentage
|
||||||
|
|
||||||
|
if efficiency < 30:
|
||||||
|
lines.append("⚠️ Low Efficiency Detected:")
|
||||||
|
lines.append(" • Coverage guarantee requires some instructions at root level")
|
||||||
|
lines.append(" • This creates pollution for specialized directories")
|
||||||
|
lines.append(" • Trade-off: Guaranteed coverage vs. optimal efficiency")
|
||||||
|
lines.append(" • Alternative: Higher efficiency with coverage violations (data loss)")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("💡 This may be mathematically optimal given coverage constraints")
|
||||||
|
elif efficiency < 60:
|
||||||
|
lines.append("✅ Moderate Efficiency:")
|
||||||
|
lines.append(" • Good balance between coverage and efficiency")
|
||||||
|
lines.append(" • Some coverage-driven pollution is acceptable")
|
||||||
|
lines.append(" • Most patterns are well-localized")
|
||||||
|
else:
|
||||||
|
lines.append("🎯 High Efficiency:")
|
||||||
|
lines.append(" • Excellent pattern locality achieved")
|
||||||
|
lines.append(" • Minimal coverage conflicts")
|
||||||
|
lines.append(" • Instructions are optimally placed")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
lines.append("📚 Why Coverage Takes Priority:")
|
||||||
|
lines.append(" • Every file must access applicable instructions")
|
||||||
|
lines.append(" • Hierarchical inheritance prevents data loss")
|
||||||
|
lines.append(" • Better low efficiency than missing instructions")
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _get_placement_description(self, summary) -> str:
|
||||||
|
"""Get description of what's included in a placement summary.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
summary: PlacementSummary object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Description like "Constitution and 1 instruction" or "Constitution"
|
||||||
|
"""
|
||||||
|
# Check if constitution is included
|
||||||
|
has_constitution = any("constitution.md" in source for source in summary.sources)
|
||||||
|
|
||||||
|
# Build the description based on what's included
|
||||||
|
parts = []
|
||||||
|
if has_constitution:
|
||||||
|
parts.append("Constitution")
|
||||||
|
|
||||||
|
if summary.instruction_count > 0:
|
||||||
|
instruction_text = f"{summary.instruction_count} instruction{'s' if summary.instruction_count != 1 else ''}"
|
||||||
|
parts.append(instruction_text)
|
||||||
|
|
||||||
|
if parts:
|
||||||
|
return " and ".join(parts)
|
||||||
|
else:
|
||||||
|
return "content"
|
||||||
|
|
||||||
|
def _styled(self, text: str, style: str) -> str:
|
||||||
|
"""Apply styling to text with rich fallback."""
|
||||||
|
if self.use_color and RICH_AVAILABLE:
|
||||||
|
styled_text = Text(text)
|
||||||
|
styled_text.style = style
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(styled_text, end="")
|
||||||
|
return capture.get()
|
||||||
|
else:
|
||||||
|
return text
|
||||||
122
src/apm_cli/output/models.py
Normal file
122
src/apm_cli/output/models.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
"""Data models for compilation output and results."""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Set
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from ..primitives.models import Instruction
|
||||||
|
|
||||||
|
|
||||||
|
class PlacementStrategy(Enum):
|
||||||
|
"""Placement strategy types for optimization decisions."""
|
||||||
|
SINGLE_POINT = "Single Point"
|
||||||
|
SELECTIVE_MULTI = "Selective Multi"
|
||||||
|
DISTRIBUTED = "Distributed"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProjectAnalysis:
|
||||||
|
"""Analysis of the project structure and file distribution."""
|
||||||
|
directories_scanned: int
|
||||||
|
files_analyzed: int
|
||||||
|
file_types_detected: Set[str]
|
||||||
|
instruction_patterns_detected: int
|
||||||
|
max_depth: int
|
||||||
|
constitution_detected: bool = False
|
||||||
|
constitution_path: Optional[str] = None
|
||||||
|
|
||||||
|
def get_file_types_summary(self) -> str:
|
||||||
|
"""Get a concise summary of detected file types."""
|
||||||
|
if not self.file_types_detected:
|
||||||
|
return "none"
|
||||||
|
|
||||||
|
# Remove leading dots and sort
|
||||||
|
types = sorted([t.lstrip('.') for t in self.file_types_detected if t])
|
||||||
|
if len(types) <= 3:
|
||||||
|
return ', '.join(types)
|
||||||
|
else:
|
||||||
|
return f"{', '.join(types[:3])} and {len(types) - 3} more"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OptimizationDecision:
|
||||||
|
"""Details about a specific optimization decision for an instruction."""
|
||||||
|
instruction: Instruction
|
||||||
|
pattern: str
|
||||||
|
matching_directories: int
|
||||||
|
total_directories: int
|
||||||
|
distribution_score: float
|
||||||
|
strategy: PlacementStrategy
|
||||||
|
placement_directories: List[Path]
|
||||||
|
reasoning: str
|
||||||
|
relevance_score: float = 0.0 # Coverage efficiency for primary placement directory
|
||||||
|
|
||||||
|
@property
|
||||||
|
def distribution_ratio(self) -> float:
|
||||||
|
"""Get the distribution ratio (matching/total)."""
|
||||||
|
return self.matching_directories / self.total_directories if self.total_directories > 0 else 0.0
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PlacementSummary:
|
||||||
|
"""Summary of a single AGENTS.md file placement."""
|
||||||
|
path: Path
|
||||||
|
instruction_count: int
|
||||||
|
source_count: int
|
||||||
|
sources: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
def get_relative_path(self, base_dir: Path) -> Path:
|
||||||
|
"""Get path relative to base directory."""
|
||||||
|
try:
|
||||||
|
rel_path = self.path.relative_to(base_dir)
|
||||||
|
return Path('.') if rel_path == Path('.') else rel_path
|
||||||
|
except ValueError:
|
||||||
|
return self.path
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OptimizationStats:
|
||||||
|
"""Performance and efficiency statistics from optimization."""
|
||||||
|
average_context_efficiency: float
|
||||||
|
pollution_improvement: Optional[float] = None
|
||||||
|
baseline_efficiency: Optional[float] = None
|
||||||
|
placement_accuracy: Optional[float] = None
|
||||||
|
generation_time_ms: Optional[int] = None
|
||||||
|
total_agents_files: int = 0
|
||||||
|
directories_analyzed: int = 0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def efficiency_improvement(self) -> Optional[float]:
|
||||||
|
"""Calculate efficiency improvement percentage."""
|
||||||
|
if self.baseline_efficiency is not None:
|
||||||
|
return ((self.average_context_efficiency - self.baseline_efficiency)
|
||||||
|
/ self.baseline_efficiency * 100)
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def efficiency_percentage(self) -> float:
|
||||||
|
"""Get efficiency as percentage."""
|
||||||
|
return self.average_context_efficiency * 100
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CompilationResults:
|
||||||
|
"""Complete results from compilation process."""
|
||||||
|
project_analysis: ProjectAnalysis
|
||||||
|
optimization_decisions: List[OptimizationDecision]
|
||||||
|
placement_summaries: List[PlacementSummary]
|
||||||
|
optimization_stats: OptimizationStats
|
||||||
|
warnings: List[str] = field(default_factory=list)
|
||||||
|
errors: List[str] = field(default_factory=list)
|
||||||
|
is_dry_run: bool = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def total_instructions(self) -> int:
|
||||||
|
"""Get total number of instructions processed."""
|
||||||
|
return sum(summary.instruction_count for summary in self.placement_summaries)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_issues(self) -> bool:
|
||||||
|
"""Check if there are any warnings or errors."""
|
||||||
|
return len(self.warnings) > 0 or len(self.errors) > 0
|
||||||
320
src/apm_cli/output/script_formatters.py
Normal file
320
src/apm_cli/output/script_formatters.py
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
"""Professional CLI output formatters for APM script execution."""
|
||||||
|
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
try:
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.text import Text
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich.tree import Tree
|
||||||
|
from rich import box
|
||||||
|
RICH_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
RICH_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
|
class ScriptExecutionFormatter:
|
||||||
|
"""Professional formatter for script execution output following CLI UX design plan."""
|
||||||
|
|
||||||
|
def __init__(self, use_color: bool = True):
|
||||||
|
"""Initialize formatter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_color: Whether to use colors and rich formatting.
|
||||||
|
"""
|
||||||
|
self.use_color = use_color and RICH_AVAILABLE
|
||||||
|
self.console = Console() if self.use_color else None
|
||||||
|
|
||||||
|
def format_script_header(self, script_name: str, params: Dict[str, str]) -> List[str]:
|
||||||
|
"""Format the script execution header with parameters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
script_name: Name of the script being executed
|
||||||
|
params: Parameters passed to the script
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Main header
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f"🚀 Running script: {script_name}", "cyan bold"))
|
||||||
|
else:
|
||||||
|
lines.append(f"🚀 Running script: {script_name}")
|
||||||
|
|
||||||
|
# Parameters tree if any exist
|
||||||
|
if params:
|
||||||
|
for param_name, param_value in params.items():
|
||||||
|
param_line = f" - {param_name}: {param_value}"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(param_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(param_line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_compilation_progress(self, prompt_files: List[str]) -> List[str]:
|
||||||
|
"""Format prompt compilation progress.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt_files: List of prompt files being compiled
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
if not prompt_files:
|
||||||
|
return []
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if len(prompt_files) == 1:
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Compiling prompt...", "cyan"))
|
||||||
|
else:
|
||||||
|
lines.append("Compiling prompt...")
|
||||||
|
else:
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f"Compiling {len(prompt_files)} prompts...", "cyan"))
|
||||||
|
else:
|
||||||
|
lines.append(f"Compiling {len(prompt_files)} prompts...")
|
||||||
|
|
||||||
|
# Show each file being compiled
|
||||||
|
for prompt_file in prompt_files:
|
||||||
|
file_line = f"├─ {prompt_file}"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(file_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(file_line)
|
||||||
|
|
||||||
|
# Change last ├─ to └─
|
||||||
|
if lines and len(lines) > 1:
|
||||||
|
lines[-1] = lines[-1].replace("├─", "└─")
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_runtime_execution(self, runtime: str, command: str, content_length: int) -> List[str]:
|
||||||
|
"""Format runtime command execution with content preview.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runtime: Name of the runtime (copilot, codex, llm)
|
||||||
|
command: The command being executed
|
||||||
|
content_length: Length of the content being passed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Runtime detection and styling
|
||||||
|
runtime_colors = {
|
||||||
|
'copilot': 'blue',
|
||||||
|
'codex': 'green',
|
||||||
|
'llm': 'magenta',
|
||||||
|
'unknown': 'white'
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime_color = runtime_colors.get(runtime, 'white')
|
||||||
|
|
||||||
|
# Execution header
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(f"Executing {runtime} runtime...", f"{runtime_color} bold"))
|
||||||
|
else:
|
||||||
|
lines.append(f"Executing {runtime} runtime...")
|
||||||
|
|
||||||
|
# Command structure
|
||||||
|
command_line = f"├─ Command: {command}"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(command_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(command_line)
|
||||||
|
|
||||||
|
# Content size
|
||||||
|
content_line = f"└─ Prompt content: {content_length:,} characters"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(content_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(content_line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_content_preview(self, content: str, max_preview: int = 200) -> List[str]:
|
||||||
|
"""Format content preview with professional styling.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content: The full content to preview
|
||||||
|
max_preview: Maximum characters to show in preview
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Content preview
|
||||||
|
content_preview = content[:max_preview] + "..." if len(content) > max_preview else content
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Prompt preview:", "cyan"))
|
||||||
|
else:
|
||||||
|
lines.append("Prompt preview:")
|
||||||
|
|
||||||
|
# Content in a box for better readability
|
||||||
|
if self.use_color and RICH_AVAILABLE and self.console:
|
||||||
|
try:
|
||||||
|
panel = Panel(
|
||||||
|
content_preview,
|
||||||
|
title=f"Content ({len(content):,} characters)",
|
||||||
|
border_style="dim",
|
||||||
|
title_align="left"
|
||||||
|
)
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(panel)
|
||||||
|
panel_output = capture.get()
|
||||||
|
if panel_output.strip():
|
||||||
|
lines.extend(panel_output.split('\n'))
|
||||||
|
except:
|
||||||
|
# Fallback to simple formatting
|
||||||
|
lines.append("─" * 50)
|
||||||
|
lines.append(content_preview)
|
||||||
|
lines.append("─" * 50)
|
||||||
|
else:
|
||||||
|
# Simple text fallback
|
||||||
|
lines.append("─" * 50)
|
||||||
|
lines.append(content_preview)
|
||||||
|
lines.append("─" * 50)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_environment_setup(self, runtime: str, env_vars_set: List[str]) -> List[str]:
|
||||||
|
"""Format environment setup information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runtime: Name of the runtime
|
||||||
|
env_vars_set: List of environment variables that were set
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
if not env_vars_set:
|
||||||
|
return []
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Environment setup:", "cyan"))
|
||||||
|
else:
|
||||||
|
lines.append("Environment setup:")
|
||||||
|
|
||||||
|
for env_var in env_vars_set:
|
||||||
|
env_line = f"├─ {env_var}: configured"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(env_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(env_line)
|
||||||
|
|
||||||
|
# Change last ├─ to └─
|
||||||
|
if lines and len(lines) > 1:
|
||||||
|
lines[-1] = lines[-1].replace("├─", "└─")
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_execution_success(self, runtime: str, execution_time: Optional[float] = None) -> List[str]:
|
||||||
|
"""Format successful execution result.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runtime: Name of the runtime that executed
|
||||||
|
execution_time: Optional execution time in seconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
success_msg = f"✅ {runtime.title()} execution completed successfully"
|
||||||
|
if execution_time is not None:
|
||||||
|
success_msg += f" ({execution_time:.2f}s)"
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(success_msg, "green bold"))
|
||||||
|
else:
|
||||||
|
lines.append(success_msg)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_execution_error(self, runtime: str, error_code: int, error_msg: Optional[str] = None) -> List[str]:
|
||||||
|
"""Format execution error result.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runtime: Name of the runtime that failed
|
||||||
|
error_code: Exit code from the failed execution
|
||||||
|
error_msg: Optional error message
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
error_header = f"✗ {runtime.title()} execution failed (exit code: {error_code})"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(error_header, "red bold"))
|
||||||
|
else:
|
||||||
|
lines.append(error_header)
|
||||||
|
|
||||||
|
if error_msg:
|
||||||
|
# Format error message with proper indentation
|
||||||
|
error_lines = error_msg.split('\n')
|
||||||
|
for line in error_lines:
|
||||||
|
if line.strip():
|
||||||
|
formatted_line = f" {line}"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(formatted_line, "red"))
|
||||||
|
else:
|
||||||
|
lines.append(formatted_line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format_subprocess_details(self, args: List[str], content_length: int) -> List[str]:
|
||||||
|
"""Format subprocess execution details for debugging.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: The subprocess arguments (without content)
|
||||||
|
content_length: Length of content being passed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted lines
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled("Subprocess execution:", "cyan"))
|
||||||
|
else:
|
||||||
|
lines.append("Subprocess execution:")
|
||||||
|
|
||||||
|
# Show command structure
|
||||||
|
args_display = " ".join(f'"{arg}"' if " " in arg else arg for arg in args)
|
||||||
|
command_line = f"├─ Args: {args_display}"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(command_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(command_line)
|
||||||
|
|
||||||
|
# Show content info
|
||||||
|
content_line = f"└─ Content: +{content_length:,} chars appended"
|
||||||
|
if self.use_color:
|
||||||
|
lines.append(self._styled(content_line, "dim"))
|
||||||
|
else:
|
||||||
|
lines.append(content_line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _styled(self, text: str, style: str) -> str:
|
||||||
|
"""Apply styling to text with rich fallback."""
|
||||||
|
if self.use_color and RICH_AVAILABLE and self.console:
|
||||||
|
styled_text = Text(text)
|
||||||
|
styled_text.style = style
|
||||||
|
with self.console.capture() as capture:
|
||||||
|
self.console.print(styled_text, end="")
|
||||||
|
return capture.get()
|
||||||
|
else:
|
||||||
|
return text
|
||||||
18
src/apm_cli/primitives/__init__.py
Normal file
18
src/apm_cli/primitives/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
"""Primitives package for APM CLI - discovery and parsing of APM context."""
|
||||||
|
|
||||||
|
from .models import Chatmode, Instruction, Context, PrimitiveCollection, PrimitiveConflict
|
||||||
|
from .discovery import discover_primitives, find_primitive_files, discover_primitives_with_dependencies
|
||||||
|
from .parser import parse_primitive_file, validate_primitive
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Chatmode',
|
||||||
|
'Instruction',
|
||||||
|
'Context',
|
||||||
|
'PrimitiveCollection',
|
||||||
|
'PrimitiveConflict',
|
||||||
|
'discover_primitives',
|
||||||
|
'discover_primitives_with_dependencies',
|
||||||
|
'find_primitive_files',
|
||||||
|
'parse_primitive_file',
|
||||||
|
'validate_primitive'
|
||||||
|
]
|
||||||
316
src/apm_cli/primitives/discovery.py
Normal file
316
src/apm_cli/primitives/discovery.py
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
"""Discovery functionality for primitive files."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict
|
||||||
|
|
||||||
|
from .models import PrimitiveCollection
|
||||||
|
from .parser import parse_primitive_file
|
||||||
|
from ..models.apm_package import APMPackage
|
||||||
|
|
||||||
|
|
||||||
|
# Common primitive patterns for local discovery (with recursive search)
|
||||||
|
LOCAL_PRIMITIVE_PATTERNS: Dict[str, List[str]] = {
|
||||||
|
'chatmode': [
|
||||||
|
"**/.apm/chatmodes/*.chatmode.md",
|
||||||
|
"**/.github/chatmodes/*.chatmode.md",
|
||||||
|
"**/*.chatmode.md" # Generic .chatmode.md files
|
||||||
|
],
|
||||||
|
'instruction': [
|
||||||
|
"**/.apm/instructions/*.instructions.md",
|
||||||
|
"**/.github/instructions/*.instructions.md",
|
||||||
|
"**/*.instructions.md" # Generic .instructions.md files
|
||||||
|
],
|
||||||
|
'context': [
|
||||||
|
"**/.apm/context/*.context.md",
|
||||||
|
"**/.apm/memory/*.memory.md", # APM memory convention
|
||||||
|
"**/.github/context/*.context.md",
|
||||||
|
"**/.github/memory/*.memory.md", # VSCode compatibility
|
||||||
|
"**/*.context.md", # Generic .context.md files
|
||||||
|
"**/*.memory.md" # Generic .memory.md files
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Dependency primitive patterns (for .apm directory within dependencies)
|
||||||
|
DEPENDENCY_PRIMITIVE_PATTERNS: Dict[str, List[str]] = {
|
||||||
|
'chatmode': ["chatmodes/*.chatmode.md"],
|
||||||
|
'instruction': ["instructions/*.instructions.md"],
|
||||||
|
'context': [
|
||||||
|
"context/*.context.md",
|
||||||
|
"memory/*.memory.md"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def discover_primitives(base_dir: str = ".") -> PrimitiveCollection:
|
||||||
|
"""Find all APM primitive files in the project.
|
||||||
|
|
||||||
|
Searches for .chatmode.md, .instructions.md, .context.md, and .memory.md files
|
||||||
|
in both .apm/ and .github/ directory structures.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory to search in. Defaults to current directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PrimitiveCollection: Collection of discovered and parsed primitives.
|
||||||
|
"""
|
||||||
|
collection = PrimitiveCollection()
|
||||||
|
|
||||||
|
# Find and parse files for each primitive type
|
||||||
|
for primitive_type, patterns in LOCAL_PRIMITIVE_PATTERNS.items():
|
||||||
|
files = find_primitive_files(base_dir, patterns)
|
||||||
|
|
||||||
|
for file_path in files:
|
||||||
|
try:
|
||||||
|
primitive = parse_primitive_file(file_path, source="local")
|
||||||
|
collection.add_primitive(primitive)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Failed to parse {file_path}: {e}")
|
||||||
|
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
def discover_primitives_with_dependencies(base_dir: str = ".") -> PrimitiveCollection:
|
||||||
|
"""Enhanced primitive discovery including dependency sources.
|
||||||
|
|
||||||
|
Priority Order:
|
||||||
|
1. Local .apm/ (highest priority - always wins)
|
||||||
|
2. Dependencies in declaration order (first declared wins)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory to search in. Defaults to current directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PrimitiveCollection: Collection of discovered and parsed primitives with source tracking.
|
||||||
|
"""
|
||||||
|
collection = PrimitiveCollection()
|
||||||
|
|
||||||
|
# Phase 1: Local primitives (highest priority)
|
||||||
|
scan_local_primitives(base_dir, collection)
|
||||||
|
|
||||||
|
# Phase 2: Dependency primitives (lower priority, with conflict detection)
|
||||||
|
scan_dependency_primitives(base_dir, collection)
|
||||||
|
|
||||||
|
return collection
|
||||||
|
|
||||||
|
|
||||||
|
def scan_local_primitives(base_dir: str, collection: PrimitiveCollection) -> None:
|
||||||
|
"""Scan local .apm/ directory for primitives.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory to search in.
|
||||||
|
collection (PrimitiveCollection): Collection to add primitives to.
|
||||||
|
"""
|
||||||
|
# Find and parse files for each primitive type
|
||||||
|
for primitive_type, patterns in LOCAL_PRIMITIVE_PATTERNS.items():
|
||||||
|
files = find_primitive_files(base_dir, patterns)
|
||||||
|
|
||||||
|
# Filter out files from apm_modules to avoid conflicts with dependency scanning
|
||||||
|
local_files = []
|
||||||
|
base_path = Path(base_dir)
|
||||||
|
apm_modules_path = base_path / "apm_modules"
|
||||||
|
|
||||||
|
for file_path in files:
|
||||||
|
# Only include files that are NOT in apm_modules directory
|
||||||
|
if not _is_under_directory(file_path, apm_modules_path):
|
||||||
|
local_files.append(file_path)
|
||||||
|
|
||||||
|
for file_path in local_files:
|
||||||
|
try:
|
||||||
|
primitive = parse_primitive_file(file_path, source="local")
|
||||||
|
collection.add_primitive(primitive)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Failed to parse local primitive {file_path}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_under_directory(file_path: Path, directory: Path) -> bool:
|
||||||
|
"""Check if a file path is under a specific directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Path): Path to check.
|
||||||
|
directory (Path): Directory to check against.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file_path is under directory, False otherwise.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
file_path.resolve().relative_to(directory.resolve())
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def scan_dependency_primitives(base_dir: str, collection: PrimitiveCollection) -> None:
|
||||||
|
"""Scan all dependencies in apm_modules/ with priority handling.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory to search in.
|
||||||
|
collection (PrimitiveCollection): Collection to add primitives to.
|
||||||
|
"""
|
||||||
|
apm_modules_path = Path(base_dir) / "apm_modules"
|
||||||
|
if not apm_modules_path.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get dependency declaration order from apm.yml
|
||||||
|
dependency_order = get_dependency_declaration_order(base_dir)
|
||||||
|
|
||||||
|
# Process dependencies in declaration order
|
||||||
|
for dep_name in dependency_order:
|
||||||
|
# Handle org-namespaced structure (e.g., "github/design-guidelines")
|
||||||
|
if "/" in dep_name:
|
||||||
|
org_name, repo_name = dep_name.split("/", 1)
|
||||||
|
dep_path = apm_modules_path / org_name / repo_name
|
||||||
|
else:
|
||||||
|
# Fallback for non-namespaced dependencies
|
||||||
|
dep_path = apm_modules_path / dep_name
|
||||||
|
|
||||||
|
if dep_path.exists() and dep_path.is_dir():
|
||||||
|
scan_directory_with_source(dep_path, collection, source=f"dependency:{dep_name}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_dependency_declaration_order(base_dir: str) -> List[str]:
|
||||||
|
"""Get APM dependency names in their declaration order from apm.yml.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory containing apm.yml.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of dependency names in declaration order.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
apm_yml_path = Path(base_dir) / "apm.yml"
|
||||||
|
if not apm_yml_path.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
package = APMPackage.from_apm_yml(apm_yml_path)
|
||||||
|
apm_dependencies = package.get_apm_dependencies()
|
||||||
|
|
||||||
|
# Extract package names from dependency references
|
||||||
|
# Use alias if provided, otherwise use full org/repo path for org-namespaced structure
|
||||||
|
dependency_names = []
|
||||||
|
for dep in apm_dependencies:
|
||||||
|
if dep.alias:
|
||||||
|
dependency_names.append(dep.alias)
|
||||||
|
else:
|
||||||
|
# Use full org/repo path (e.g., "github/design-guidelines")
|
||||||
|
# This matches our org-namespaced directory structure
|
||||||
|
dependency_names.append(dep.repo_url)
|
||||||
|
|
||||||
|
return dependency_names
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Failed to parse dependency order from apm.yml: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def scan_directory_with_source(directory: Path, collection: PrimitiveCollection, source: str) -> None:
|
||||||
|
"""Scan a directory for primitives with a specific source tag.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
directory (Path): Directory to scan (e.g., apm_modules/package_name).
|
||||||
|
collection (PrimitiveCollection): Collection to add primitives to.
|
||||||
|
source (str): Source identifier for discovered primitives.
|
||||||
|
"""
|
||||||
|
# Look for .apm directory within the dependency
|
||||||
|
apm_dir = directory / ".apm"
|
||||||
|
if not apm_dir.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
# Find and parse files for each primitive type
|
||||||
|
for primitive_type, patterns in DEPENDENCY_PRIMITIVE_PATTERNS.items():
|
||||||
|
for pattern in patterns:
|
||||||
|
full_pattern = str(apm_dir / pattern)
|
||||||
|
matching_files = glob.glob(full_pattern, recursive=True)
|
||||||
|
|
||||||
|
for file_path_str in matching_files:
|
||||||
|
file_path = Path(file_path_str)
|
||||||
|
if file_path.is_file() and _is_readable(file_path):
|
||||||
|
try:
|
||||||
|
primitive = parse_primitive_file(file_path, source=source)
|
||||||
|
collection.add_primitive(primitive)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Failed to parse dependency primitive {file_path}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def find_primitive_files(base_dir: str, patterns: List[str]) -> List[Path]:
|
||||||
|
"""Find primitive files matching the given patterns.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str): Base directory to search in.
|
||||||
|
patterns (List[str]): List of glob patterns to match.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Path]: List of unique file paths found.
|
||||||
|
"""
|
||||||
|
if not os.path.isdir(base_dir):
|
||||||
|
return []
|
||||||
|
|
||||||
|
all_files = []
|
||||||
|
|
||||||
|
for pattern in patterns:
|
||||||
|
# Use glob to find files matching the pattern
|
||||||
|
matching_files = glob.glob(os.path.join(base_dir, pattern), recursive=True)
|
||||||
|
all_files.extend(matching_files)
|
||||||
|
|
||||||
|
# Remove duplicates while preserving order and convert to Path objects
|
||||||
|
seen = set()
|
||||||
|
unique_files = []
|
||||||
|
|
||||||
|
for file_path in all_files:
|
||||||
|
abs_path = os.path.abspath(file_path)
|
||||||
|
if abs_path not in seen:
|
||||||
|
seen.add(abs_path)
|
||||||
|
unique_files.append(Path(abs_path))
|
||||||
|
|
||||||
|
# Filter out directories and ensure files are readable
|
||||||
|
valid_files = []
|
||||||
|
for file_path in unique_files:
|
||||||
|
if file_path.is_file() and _is_readable(file_path):
|
||||||
|
valid_files.append(file_path)
|
||||||
|
|
||||||
|
return valid_files
|
||||||
|
|
||||||
|
|
||||||
|
def _is_readable(file_path: Path) -> bool:
|
||||||
|
"""Check if a file is readable.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Path): Path to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file is readable, False otherwise.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r', encoding='utf-8') as f:
|
||||||
|
# Try to read first few bytes to verify it's readable
|
||||||
|
f.read(1)
|
||||||
|
return True
|
||||||
|
except (PermissionError, UnicodeDecodeError, OSError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _should_skip_directory(dir_path: str) -> bool:
|
||||||
|
"""Check if a directory should be skipped during scanning.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dir_path (str): Directory path to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if directory should be skipped, False otherwise.
|
||||||
|
"""
|
||||||
|
skip_patterns = {
|
||||||
|
'.git',
|
||||||
|
'node_modules',
|
||||||
|
'__pycache__',
|
||||||
|
'.pytest_cache',
|
||||||
|
'.venv',
|
||||||
|
'venv',
|
||||||
|
'.tox',
|
||||||
|
'build',
|
||||||
|
'dist',
|
||||||
|
'.mypy_cache'
|
||||||
|
}
|
||||||
|
|
||||||
|
dir_name = os.path.basename(dir_path)
|
||||||
|
return dir_name in skip_patterns
|
||||||
212
src/apm_cli/primitives/models.py
Normal file
212
src/apm_cli/primitives/models.py
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
"""Data models for APM context."""
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, List, Union, Dict
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Chatmode:
|
||||||
|
"""Represents a chatmode primitive."""
|
||||||
|
name: str
|
||||||
|
file_path: Path
|
||||||
|
description: str
|
||||||
|
apply_to: Optional[str] # Glob pattern for file targeting (optional for chatmodes)
|
||||||
|
content: str
|
||||||
|
author: Optional[str] = None
|
||||||
|
version: Optional[str] = None
|
||||||
|
source: Optional[str] = None # Source of primitive: "local" or "dependency:{package_name}"
|
||||||
|
|
||||||
|
def validate(self) -> List[str]:
|
||||||
|
"""Validate chatmode structure.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation errors.
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
if not self.description:
|
||||||
|
errors.append("Missing 'description' in frontmatter")
|
||||||
|
if not self.content.strip():
|
||||||
|
errors.append("Empty content")
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Instruction:
|
||||||
|
"""Represents an instruction primitive."""
|
||||||
|
name: str
|
||||||
|
file_path: Path
|
||||||
|
description: str
|
||||||
|
apply_to: str # Glob pattern for file targeting (required for instructions)
|
||||||
|
content: str
|
||||||
|
author: Optional[str] = None
|
||||||
|
version: Optional[str] = None
|
||||||
|
source: Optional[str] = None # Source of primitive: "local" or "dependency:{package_name}"
|
||||||
|
|
||||||
|
def validate(self) -> List[str]:
|
||||||
|
"""Validate instruction structure.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation errors.
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
if not self.description:
|
||||||
|
errors.append("Missing 'description' in frontmatter")
|
||||||
|
if not self.apply_to:
|
||||||
|
errors.append("Missing 'applyTo' in frontmatter (required for instructions)")
|
||||||
|
if not self.content.strip():
|
||||||
|
errors.append("Empty content")
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Context:
|
||||||
|
"""Represents a context primitive."""
|
||||||
|
name: str
|
||||||
|
file_path: Path
|
||||||
|
content: str
|
||||||
|
description: Optional[str] = None
|
||||||
|
author: Optional[str] = None
|
||||||
|
version: Optional[str] = None
|
||||||
|
source: Optional[str] = None # Source of primitive: "local" or "dependency:{package_name}"
|
||||||
|
|
||||||
|
def validate(self) -> List[str]:
|
||||||
|
"""Validate context structure.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation errors.
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
if not self.content.strip():
|
||||||
|
errors.append("Empty content")
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
# Union type for all primitive types
|
||||||
|
Primitive = Union[Chatmode, Instruction, Context]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PrimitiveConflict:
|
||||||
|
"""Represents a conflict between primitives from different sources."""
|
||||||
|
primitive_name: str
|
||||||
|
primitive_type: str # 'chatmode', 'instruction', 'context'
|
||||||
|
winning_source: str # Source that won the conflict
|
||||||
|
losing_sources: List[str] # Sources that lost the conflict
|
||||||
|
file_path: Path # Path of the winning primitive
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""String representation of the conflict."""
|
||||||
|
losing_list = ", ".join(self.losing_sources)
|
||||||
|
return f"{self.primitive_type} '{self.primitive_name}': {self.winning_source} overrides {losing_list}"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PrimitiveCollection:
|
||||||
|
"""Collection of discovered primitives."""
|
||||||
|
chatmodes: List[Chatmode]
|
||||||
|
instructions: List[Instruction]
|
||||||
|
contexts: List[Context]
|
||||||
|
conflicts: List[PrimitiveConflict] # Track conflicts during discovery
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.chatmodes = []
|
||||||
|
self.instructions = []
|
||||||
|
self.contexts = []
|
||||||
|
self.conflicts = []
|
||||||
|
|
||||||
|
def add_primitive(self, primitive: Primitive) -> None:
|
||||||
|
"""Add a primitive to the appropriate collection.
|
||||||
|
|
||||||
|
If a primitive with the same name already exists, the new primitive
|
||||||
|
will only be added if it has higher priority (lower priority primitives
|
||||||
|
are tracked as conflicts).
|
||||||
|
"""
|
||||||
|
if isinstance(primitive, Chatmode):
|
||||||
|
self._add_with_conflict_detection(primitive, self.chatmodes, "chatmode")
|
||||||
|
elif isinstance(primitive, Instruction):
|
||||||
|
self._add_with_conflict_detection(primitive, self.instructions, "instruction")
|
||||||
|
elif isinstance(primitive, Context):
|
||||||
|
self._add_with_conflict_detection(primitive, self.contexts, "context")
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown primitive type: {type(primitive)}")
|
||||||
|
|
||||||
|
def _add_with_conflict_detection(self, new_primitive: Primitive, collection: List[Primitive], primitive_type: str) -> None:
|
||||||
|
"""Add primitive with conflict detection."""
|
||||||
|
# Find existing primitive with same name
|
||||||
|
existing_index = None
|
||||||
|
for i, existing in enumerate(collection):
|
||||||
|
if existing.name == new_primitive.name:
|
||||||
|
existing_index = i
|
||||||
|
break
|
||||||
|
|
||||||
|
if existing_index is None:
|
||||||
|
# No conflict, just add the primitive
|
||||||
|
collection.append(new_primitive)
|
||||||
|
else:
|
||||||
|
# Conflict detected - apply priority rules
|
||||||
|
existing = collection[existing_index]
|
||||||
|
|
||||||
|
# Priority rules:
|
||||||
|
# 1. Local always wins over dependency
|
||||||
|
# 2. Earlier dependency wins over later dependency
|
||||||
|
should_replace = self._should_replace_primitive(existing, new_primitive)
|
||||||
|
|
||||||
|
if should_replace:
|
||||||
|
# Replace existing with new primitive and record conflict
|
||||||
|
conflict = PrimitiveConflict(
|
||||||
|
primitive_name=new_primitive.name,
|
||||||
|
primitive_type=primitive_type,
|
||||||
|
winning_source=new_primitive.source or "unknown",
|
||||||
|
losing_sources=[existing.source or "unknown"],
|
||||||
|
file_path=new_primitive.file_path
|
||||||
|
)
|
||||||
|
self.conflicts.append(conflict)
|
||||||
|
collection[existing_index] = new_primitive
|
||||||
|
else:
|
||||||
|
# Keep existing and record that new primitive was ignored
|
||||||
|
conflict = PrimitiveConflict(
|
||||||
|
primitive_name=existing.name,
|
||||||
|
primitive_type=primitive_type,
|
||||||
|
winning_source=existing.source or "unknown",
|
||||||
|
losing_sources=[new_primitive.source or "unknown"],
|
||||||
|
file_path=existing.file_path
|
||||||
|
)
|
||||||
|
self.conflicts.append(conflict)
|
||||||
|
# Don't add new_primitive to collection
|
||||||
|
|
||||||
|
def _should_replace_primitive(self, existing: Primitive, new: Primitive) -> bool:
|
||||||
|
"""Determine if new primitive should replace existing based on priority."""
|
||||||
|
existing_source = existing.source or "unknown"
|
||||||
|
new_source = new.source or "unknown"
|
||||||
|
|
||||||
|
# Local always wins
|
||||||
|
if existing_source == "local":
|
||||||
|
return False # Never replace local
|
||||||
|
if new_source == "local":
|
||||||
|
return True # Always replace with local
|
||||||
|
|
||||||
|
# Both are dependencies - this shouldn't happen in correct usage
|
||||||
|
# since dependencies should be processed in order, but handle gracefully
|
||||||
|
return False # Keep first dependency (existing)
|
||||||
|
|
||||||
|
def all_primitives(self) -> List[Primitive]:
|
||||||
|
"""Get all primitives as a single list."""
|
||||||
|
return self.chatmodes + self.instructions + self.contexts
|
||||||
|
|
||||||
|
def count(self) -> int:
|
||||||
|
"""Get total count of all primitives."""
|
||||||
|
return len(self.chatmodes) + len(self.instructions) + len(self.contexts)
|
||||||
|
|
||||||
|
def has_conflicts(self) -> bool:
|
||||||
|
"""Check if any conflicts were detected during discovery."""
|
||||||
|
return len(self.conflicts) > 0
|
||||||
|
|
||||||
|
def get_conflicts_by_type(self, primitive_type: str) -> List[PrimitiveConflict]:
|
||||||
|
"""Get conflicts for a specific primitive type."""
|
||||||
|
return [c for c in self.conflicts if c.primitive_type == primitive_type]
|
||||||
|
|
||||||
|
def get_primitives_by_source(self, source: str) -> List[Primitive]:
|
||||||
|
"""Get all primitives from a specific source."""
|
||||||
|
all_primitives = self.all_primitives()
|
||||||
|
return [p for p in all_primitives if p.source == source]
|
||||||
204
src/apm_cli/primitives/parser.py
Normal file
204
src/apm_cli/primitives/parser.py
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
"""Parser for primitive definition files."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Union, List
|
||||||
|
import frontmatter
|
||||||
|
|
||||||
|
from .models import Chatmode, Instruction, Context, Primitive
|
||||||
|
|
||||||
|
|
||||||
|
def parse_primitive_file(file_path: Union[str, Path], source: str = None) -> Primitive:
|
||||||
|
"""Parse a primitive file.
|
||||||
|
|
||||||
|
Determines the primitive type based on file extension and parses accordingly.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Union[str, Path]): Path to the primitive file.
|
||||||
|
source (str, optional): Source identifier for the primitive (e.g., "local", "dependency:package_name").
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Primitive: Parsed primitive (Chatmode, Instruction, or Context).
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If file cannot be parsed or has invalid format.
|
||||||
|
"""
|
||||||
|
file_path = Path(file_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r', encoding='utf-8') as f:
|
||||||
|
post = frontmatter.load(f)
|
||||||
|
|
||||||
|
# Extract name based on file structure
|
||||||
|
name = _extract_primitive_name(file_path)
|
||||||
|
metadata = post.metadata
|
||||||
|
content = post.content
|
||||||
|
|
||||||
|
# Determine primitive type based on file extension
|
||||||
|
if file_path.name.endswith('.chatmode.md'):
|
||||||
|
return _parse_chatmode(name, file_path, metadata, content, source)
|
||||||
|
elif file_path.name.endswith('.instructions.md'):
|
||||||
|
return _parse_instruction(name, file_path, metadata, content, source)
|
||||||
|
elif file_path.name.endswith('.context.md') or file_path.name.endswith('.memory.md') or _is_context_file(file_path):
|
||||||
|
return _parse_context(name, file_path, metadata, content, source)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown primitive file type: {file_path}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Failed to parse primitive file {file_path}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_chatmode(name: str, file_path: Path, metadata: dict, content: str, source: str = None) -> Chatmode:
|
||||||
|
"""Parse a chatmode primitive.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the chatmode.
|
||||||
|
file_path (Path): Path to the file.
|
||||||
|
metadata (dict): Metadata from frontmatter.
|
||||||
|
content (str): Content of the file.
|
||||||
|
source (str, optional): Source identifier for the primitive.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Chatmode: Parsed chatmode primitive.
|
||||||
|
"""
|
||||||
|
return Chatmode(
|
||||||
|
name=name,
|
||||||
|
file_path=file_path,
|
||||||
|
description=metadata.get('description', ''),
|
||||||
|
apply_to=metadata.get('applyTo'), # Optional for chatmodes
|
||||||
|
content=content,
|
||||||
|
author=metadata.get('author'),
|
||||||
|
version=metadata.get('version'),
|
||||||
|
source=source
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_instruction(name: str, file_path: Path, metadata: dict, content: str, source: str = None) -> Instruction:
|
||||||
|
"""Parse an instruction primitive.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the instruction.
|
||||||
|
file_path (Path): Path to the file.
|
||||||
|
metadata (dict): Metadata from frontmatter.
|
||||||
|
content (str): Content of the file.
|
||||||
|
source (str, optional): Source identifier for the primitive.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Instruction: Parsed instruction primitive.
|
||||||
|
"""
|
||||||
|
return Instruction(
|
||||||
|
name=name,
|
||||||
|
file_path=file_path,
|
||||||
|
description=metadata.get('description', ''),
|
||||||
|
apply_to=metadata.get('applyTo', ''), # Required for instructions
|
||||||
|
content=content,
|
||||||
|
author=metadata.get('author'),
|
||||||
|
version=metadata.get('version'),
|
||||||
|
source=source
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_context(name: str, file_path: Path, metadata: dict, content: str, source: str = None) -> Context:
|
||||||
|
"""Parse a context primitive.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the context.
|
||||||
|
file_path (Path): Path to the file.
|
||||||
|
metadata (dict): Metadata from frontmatter.
|
||||||
|
content (str): Content of the file.
|
||||||
|
source (str, optional): Source identifier for the primitive.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Context: Parsed context primitive.
|
||||||
|
"""
|
||||||
|
return Context(
|
||||||
|
name=name,
|
||||||
|
file_path=file_path,
|
||||||
|
content=content,
|
||||||
|
description=metadata.get('description'), # Optional for contexts
|
||||||
|
author=metadata.get('author'),
|
||||||
|
version=metadata.get('version'),
|
||||||
|
source=source
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_primitive_name(file_path: Path) -> str:
|
||||||
|
"""Extract primitive name from file path based on naming conventions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Path): Path to the primitive file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Extracted primitive name.
|
||||||
|
"""
|
||||||
|
# Normalize path
|
||||||
|
path_parts = file_path.parts
|
||||||
|
|
||||||
|
# Check if it's in a structured directory (.apm/ or .github/)
|
||||||
|
if '.apm' in path_parts or '.github' in path_parts:
|
||||||
|
try:
|
||||||
|
# Find the base directory index
|
||||||
|
if '.apm' in path_parts:
|
||||||
|
base_idx = path_parts.index('.apm')
|
||||||
|
else:
|
||||||
|
base_idx = path_parts.index('.github')
|
||||||
|
|
||||||
|
# For structured directories like .apm/chatmodes/name.chatmode.md
|
||||||
|
if (base_idx + 2 < len(path_parts) and
|
||||||
|
path_parts[base_idx + 1] in ['chatmodes', 'instructions', 'context', 'memory']):
|
||||||
|
basename = file_path.name
|
||||||
|
# Remove the double extension (.chatmode.md, .instructions.md, etc.)
|
||||||
|
if basename.endswith('.chatmode.md'):
|
||||||
|
return basename.replace('.chatmode.md', '')
|
||||||
|
elif basename.endswith('.instructions.md'):
|
||||||
|
return basename.replace('.instructions.md', '')
|
||||||
|
elif basename.endswith('.context.md'):
|
||||||
|
return basename.replace('.context.md', '')
|
||||||
|
elif basename.endswith('.memory.md'):
|
||||||
|
return basename.replace('.memory.md', '')
|
||||||
|
elif basename.endswith('.md'):
|
||||||
|
return basename.replace('.md', '')
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Fallback: extract from filename
|
||||||
|
basename = file_path.name
|
||||||
|
if basename.endswith('.chatmode.md'):
|
||||||
|
return basename.replace('.chatmode.md', '')
|
||||||
|
elif basename.endswith('.instructions.md'):
|
||||||
|
return basename.replace('.instructions.md', '')
|
||||||
|
elif basename.endswith('.context.md'):
|
||||||
|
return basename.replace('.context.md', '')
|
||||||
|
elif basename.endswith('.memory.md'):
|
||||||
|
return basename.replace('.memory.md', '')
|
||||||
|
elif basename.endswith('.md'):
|
||||||
|
return basename.replace('.md', '')
|
||||||
|
|
||||||
|
# Final fallback: use filename without extension
|
||||||
|
return file_path.stem
|
||||||
|
|
||||||
|
|
||||||
|
def _is_context_file(file_path: Path) -> bool:
|
||||||
|
"""Check if a file should be treated as a context file based on its directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Path): Path to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file is in .apm/memory/ or .github/memory/ directory.
|
||||||
|
"""
|
||||||
|
# Only files directly under .apm/memory/ or .github/memory/ are considered context files here
|
||||||
|
parent_parts = file_path.parent.parts[-2:] # Get last two parts of parent path
|
||||||
|
return parent_parts in [('.apm', 'memory'), ('.github', 'memory')]
|
||||||
|
|
||||||
|
|
||||||
|
def validate_primitive(primitive: Primitive) -> List[str]:
|
||||||
|
"""Validate a primitive and return any errors.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
primitive (Primitive): Primitive to validate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation errors.
|
||||||
|
"""
|
||||||
|
return primitive.validate()
|
||||||
7
src/apm_cli/registry/__init__.py
Normal file
7
src/apm_cli/registry/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
"""MCP Registry module for APM-CLI."""
|
||||||
|
|
||||||
|
from .client import SimpleRegistryClient
|
||||||
|
from .integration import RegistryIntegration
|
||||||
|
from .operations import MCPServerOperations
|
||||||
|
|
||||||
|
__all__ = ["SimpleRegistryClient", "RegistryIntegration", "MCPServerOperations"]
|
||||||
253
src/apm_cli/registry/client.py
Normal file
253
src/apm_cli/registry/client.py
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
"""Simple MCP Registry client for server discovery."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from typing import Dict, List, Optional, Any, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleRegistryClient:
|
||||||
|
"""Simple client for querying MCP registries for server discovery."""
|
||||||
|
|
||||||
|
def __init__(self, registry_url: Optional[str] = None):
|
||||||
|
"""Initialize the registry client.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registry_url (str, optional): URL of the MCP registry.
|
||||||
|
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||||
|
or falls back to the default demo registry.
|
||||||
|
"""
|
||||||
|
self.registry_url = registry_url or os.environ.get(
|
||||||
|
"MCP_REGISTRY_URL", "https://api.mcp.github.com"
|
||||||
|
)
|
||||||
|
self.session = requests.Session()
|
||||||
|
|
||||||
|
def list_servers(self, limit: int = 100, cursor: Optional[str] = None) -> Tuple[List[Dict[str, Any]], Optional[str]]:
|
||||||
|
"""List all available servers in the registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
limit (int, optional): Maximum number of entries to return. Defaults to 100.
|
||||||
|
cursor (str, optional): Pagination cursor for retrieving next set of results.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[List[Dict[str, Any]], Optional[str]]: List of server metadata dictionaries and the next cursor if available.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
requests.RequestException: If the request fails.
|
||||||
|
"""
|
||||||
|
url = f"{self.registry_url}/v0/servers"
|
||||||
|
params = {}
|
||||||
|
|
||||||
|
if limit is not None:
|
||||||
|
params['limit'] = limit
|
||||||
|
if cursor is not None:
|
||||||
|
params['cursor'] = cursor
|
||||||
|
|
||||||
|
response = self.session.get(url, params=params)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
# Extract servers - they're nested under "server" key in each item
|
||||||
|
raw_servers = data.get("servers", [])
|
||||||
|
servers = []
|
||||||
|
for item in raw_servers:
|
||||||
|
if "server" in item:
|
||||||
|
servers.append(item["server"])
|
||||||
|
else:
|
||||||
|
servers.append(item) # Fallback for different structure
|
||||||
|
|
||||||
|
metadata = data.get("metadata", {})
|
||||||
|
next_cursor = metadata.get("next_cursor")
|
||||||
|
|
||||||
|
return servers, next_cursor
|
||||||
|
|
||||||
|
def search_servers(self, query: str) -> List[Dict[str, Any]]:
|
||||||
|
"""Search for servers in the registry using the API search endpoint.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): Search query string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict[str, Any]]: List of matching server metadata dictionaries.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
requests.RequestException: If the request fails.
|
||||||
|
"""
|
||||||
|
# The MCP Registry API now only accepts repository names (e.g., "github-mcp-server")
|
||||||
|
# If the query looks like a full identifier (e.g., "io.github.github/github-mcp-server"),
|
||||||
|
# extract the repository name for the search
|
||||||
|
search_query = self._extract_repository_name(query)
|
||||||
|
|
||||||
|
url = f"{self.registry_url}/v0/servers/search"
|
||||||
|
params = {'q': search_query}
|
||||||
|
|
||||||
|
response = self.session.get(url, params=params)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
# Extract servers - they're nested under "server" key in each item
|
||||||
|
raw_servers = data.get("servers", [])
|
||||||
|
servers = []
|
||||||
|
for item in raw_servers:
|
||||||
|
if "server" in item:
|
||||||
|
servers.append(item["server"])
|
||||||
|
else:
|
||||||
|
servers.append(item) # Fallback for different structure
|
||||||
|
|
||||||
|
return servers
|
||||||
|
|
||||||
|
def get_server_info(self, server_id: str) -> Dict[str, Any]:
|
||||||
|
"""Get detailed information about a specific server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_id (str): ID of the server.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: Server metadata dictionary.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
requests.RequestException: If the request fails.
|
||||||
|
ValueError: If the server is not found.
|
||||||
|
"""
|
||||||
|
url = f"{self.registry_url}/v0/servers/{server_id}"
|
||||||
|
response = self.session.get(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
# Return the complete response including x-github and other metadata
|
||||||
|
# but ensure the main server info is accessible at the top level
|
||||||
|
if "server" in data:
|
||||||
|
# Merge server info to top level while preserving x-github and other sections
|
||||||
|
result = data["server"].copy()
|
||||||
|
for key, value in data.items():
|
||||||
|
if key != "server":
|
||||||
|
result[key] = value
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
raise ValueError(f"Server '{server_id}' not found in registry")
|
||||||
|
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
if not data:
|
||||||
|
raise ValueError(f"Server '{server_id}' not found in registry")
|
||||||
|
return data
|
||||||
|
|
||||||
|
def get_server_by_name(self, name: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Find a server by its name using the search API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the server to find.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[Dict[str, Any]]: Server metadata dictionary or None if not found.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
requests.RequestException: If the request fails.
|
||||||
|
"""
|
||||||
|
# Use search API to find by name - more efficient than listing all servers
|
||||||
|
try:
|
||||||
|
search_results = self.search_servers(name)
|
||||||
|
|
||||||
|
# Look for an exact match in search results
|
||||||
|
for server in search_results:
|
||||||
|
if server.get("name") == name:
|
||||||
|
return self.get_server_info(server["id"])
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def find_server_by_reference(self, reference: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Find a server by exact name match or server ID.
|
||||||
|
|
||||||
|
This is an efficient lookup that uses the search API:
|
||||||
|
1. Server ID (UUID format) - direct API call
|
||||||
|
2. Server name - search API for exact match (automatically handles identifier extraction)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reference (str): Server reference (ID or exact name).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[Dict[str, Any]]: Server metadata dictionary or None if not found.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
requests.RequestException: If the request fails.
|
||||||
|
"""
|
||||||
|
# Strategy 1: Try as server ID first (direct lookup)
|
||||||
|
try:
|
||||||
|
# Check if it looks like a UUID (contains hyphens and is 36 chars)
|
||||||
|
if len(reference) == 36 and reference.count('-') == 4:
|
||||||
|
return self.get_server_info(reference)
|
||||||
|
except (ValueError, Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Strategy 2: Use search API to find by name
|
||||||
|
# search_servers now handles extracting repository names internally
|
||||||
|
try:
|
||||||
|
search_results = self.search_servers(reference)
|
||||||
|
|
||||||
|
# Look for matches in search results - check both exact reference match
|
||||||
|
# and the server name from the registry
|
||||||
|
for server in search_results:
|
||||||
|
server_name = server.get("name", "")
|
||||||
|
# Check exact match with original reference
|
||||||
|
if server_name == reference:
|
||||||
|
return self.get_server_info(server["id"])
|
||||||
|
# Check match with common identifier patterns
|
||||||
|
if self._is_server_match(reference, server_name):
|
||||||
|
return self.get_server_info(server["id"])
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# If not found by ID or exact name, server is not in registry
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _extract_repository_name(self, reference: str) -> str:
|
||||||
|
"""Extract the repository name from various identifier formats.
|
||||||
|
|
||||||
|
This method handles various naming patterns by extracting the part after
|
||||||
|
the last slash, which typically represents the actual server/repository name.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- "io.github.github/github-mcp-server" -> "github-mcp-server"
|
||||||
|
- "abc.dllde.io/some-server" -> "some-server"
|
||||||
|
- "adb.ok/another-server" -> "another-server"
|
||||||
|
- "github/github-mcp-server" -> "github-mcp-server"
|
||||||
|
- "github-mcp-server" -> "github-mcp-server"
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reference (str): Server reference in various formats.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Repository name suitable for API search.
|
||||||
|
"""
|
||||||
|
# If there's a slash, extract the part after the last slash
|
||||||
|
# This works for any pattern like domain.tld/server, owner/repo, etc.
|
||||||
|
if "/" in reference:
|
||||||
|
return reference.split("/")[-1]
|
||||||
|
|
||||||
|
# Already a simple repo name
|
||||||
|
return reference
|
||||||
|
|
||||||
|
def _is_server_match(self, reference: str, server_name: str) -> bool:
|
||||||
|
"""Check if a reference matches a server name using common patterns.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reference (str): Original reference from user.
|
||||||
|
server_name (str): Server name from registry.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if they represent the same server.
|
||||||
|
"""
|
||||||
|
# Direct match
|
||||||
|
if reference == server_name:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Extract repo names and compare
|
||||||
|
ref_repo = self._extract_repository_name(reference)
|
||||||
|
server_repo = self._extract_repository_name(server_name)
|
||||||
|
|
||||||
|
return ref_repo == server_repo
|
||||||
157
src/apm_cli/registry/integration.py
Normal file
157
src/apm_cli/registry/integration.py
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
"""Integration module for connecting registry client with package manager."""
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
from .client import SimpleRegistryClient
|
||||||
|
|
||||||
|
|
||||||
|
class RegistryIntegration:
|
||||||
|
"""Integration class for connecting registry discovery to package manager."""
|
||||||
|
|
||||||
|
def __init__(self, registry_url: Optional[str] = None):
|
||||||
|
"""Initialize the registry integration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registry_url (str, optional): URL of the MCP registry.
|
||||||
|
If not provided, uses the MCP_REGISTRY_URL environment variable
|
||||||
|
or falls back to the default demo registry.
|
||||||
|
"""
|
||||||
|
self.client = SimpleRegistryClient(registry_url)
|
||||||
|
|
||||||
|
def list_available_packages(self) -> List[Dict[str, Any]]:
|
||||||
|
"""List all available packages in the registry.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict[str, Any]]: List of package metadata dictionaries.
|
||||||
|
"""
|
||||||
|
servers, _ = self.client.list_servers()
|
||||||
|
# Transform server data to package format for backward compatibility
|
||||||
|
return [self._server_to_package(server) for server in servers]
|
||||||
|
|
||||||
|
def search_packages(self, query: str) -> List[Dict[str, Any]]:
|
||||||
|
"""Search for packages in the registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): Search query string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict[str, Any]]: List of matching package metadata dictionaries.
|
||||||
|
"""
|
||||||
|
servers = self.client.search_servers(query)
|
||||||
|
# Transform server data to package format for backward compatibility
|
||||||
|
return [self._server_to_package(server) for server in servers]
|
||||||
|
|
||||||
|
def get_package_info(self, name: str) -> Dict[str, Any]:
|
||||||
|
"""Get detailed information about a specific package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the package.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: Package metadata dictionary.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the package is not found.
|
||||||
|
"""
|
||||||
|
# Use find_server_by_reference which handles all identifier formats:
|
||||||
|
# - UUIDs (direct lookup)
|
||||||
|
# - Full identifiers like "io.github.github/github-mcp-server"
|
||||||
|
# - Registry names like "github/github-mcp-server"
|
||||||
|
# - Simple names like "github-mcp-server"
|
||||||
|
server_info = self.client.find_server_by_reference(name)
|
||||||
|
if not server_info:
|
||||||
|
raise ValueError(f"Package '{name}' not found in registry")
|
||||||
|
return self._server_to_package_detail(server_info)
|
||||||
|
|
||||||
|
def get_latest_version(self, name: str) -> str:
|
||||||
|
"""Get the latest version of a package.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the package.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Latest version string.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the package has no versions.
|
||||||
|
"""
|
||||||
|
package_info = self.get_package_info(name)
|
||||||
|
|
||||||
|
# Check for version_detail in server format
|
||||||
|
if "version_detail" in package_info:
|
||||||
|
version_detail = package_info.get("version_detail", {})
|
||||||
|
if version_detail and "version" in version_detail:
|
||||||
|
return version_detail["version"]
|
||||||
|
|
||||||
|
# Check packages list for version information
|
||||||
|
packages = package_info.get("packages", [])
|
||||||
|
if packages:
|
||||||
|
for pkg in packages:
|
||||||
|
if "version" in pkg:
|
||||||
|
return pkg["version"]
|
||||||
|
|
||||||
|
# Fall back to versions list (backward compatibility)
|
||||||
|
versions = package_info.get("versions", [])
|
||||||
|
if versions:
|
||||||
|
return versions[-1].get("version", "latest")
|
||||||
|
|
||||||
|
raise ValueError(f"Package '{name}' has no versions")
|
||||||
|
|
||||||
|
def _server_to_package(self, server: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Convert server data format to package format for compatibility.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server (Dict[str, Any]): Server data from registry.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: Package formatted data.
|
||||||
|
"""
|
||||||
|
package = {
|
||||||
|
"id": server.get("id", ""),
|
||||||
|
"name": server.get("name", "Unknown"),
|
||||||
|
"description": server.get("description", "No description available"),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add repository information if available
|
||||||
|
if "repository" in server:
|
||||||
|
package["repository"] = server["repository"]
|
||||||
|
|
||||||
|
# Add version information if available
|
||||||
|
if "version_detail" in server:
|
||||||
|
package["version_detail"] = server["version_detail"]
|
||||||
|
|
||||||
|
return package
|
||||||
|
|
||||||
|
def _server_to_package_detail(self, server: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Convert detailed server data to package detail format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server (Dict[str, Any]): Server data from registry.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: Package detail formatted data.
|
||||||
|
"""
|
||||||
|
# Start with the basic package data
|
||||||
|
package_detail = self._server_to_package(server)
|
||||||
|
|
||||||
|
# Add packages information
|
||||||
|
if "packages" in server:
|
||||||
|
package_detail["packages"] = server["packages"]
|
||||||
|
|
||||||
|
# Add remotes information (crucial for deployment type detection)
|
||||||
|
if "remotes" in server:
|
||||||
|
package_detail["remotes"] = server["remotes"]
|
||||||
|
|
||||||
|
if "package_canonical" in server:
|
||||||
|
package_detail["package_canonical"] = server["package_canonical"]
|
||||||
|
|
||||||
|
# For backward compatibility, create a versions list
|
||||||
|
if "version_detail" in server and server["version_detail"]:
|
||||||
|
version_info = server["version_detail"]
|
||||||
|
package_detail["versions"] = [{
|
||||||
|
"version": version_info.get("version", "latest"),
|
||||||
|
"release_date": version_info.get("release_date", ""),
|
||||||
|
"is_latest": version_info.get("is_latest", True)
|
||||||
|
}]
|
||||||
|
|
||||||
|
return package_detail
|
||||||
398
src/apm_cli/registry/operations.py
Normal file
398
src/apm_cli/registry/operations.py
Normal file
@@ -0,0 +1,398 @@
|
|||||||
|
"""MCP server operations and installation logic."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from typing import List, Dict, Set, Optional, Tuple
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from .client import SimpleRegistryClient
|
||||||
|
|
||||||
|
|
||||||
|
class MCPServerOperations:
|
||||||
|
"""Handles MCP server operations like conflict detection and installation status."""
|
||||||
|
|
||||||
|
def __init__(self, registry_url: Optional[str] = None):
|
||||||
|
"""Initialize MCP server operations.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registry_url: Optional registry URL override
|
||||||
|
"""
|
||||||
|
self.registry_client = SimpleRegistryClient(registry_url)
|
||||||
|
|
||||||
|
def check_servers_needing_installation(self, target_runtimes: List[str], server_references: List[str]) -> List[str]:
|
||||||
|
"""Check which MCP servers actually need installation across target runtimes.
|
||||||
|
|
||||||
|
This method checks the actual MCP configuration files to see which servers
|
||||||
|
are already installed by comparing server IDs (UUIDs), not names.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_runtimes: List of target runtimes to check
|
||||||
|
server_references: List of MCP server references (names or IDs)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of server references that need installation in at least one runtime
|
||||||
|
"""
|
||||||
|
servers_needing_installation = set()
|
||||||
|
|
||||||
|
# Check each server reference
|
||||||
|
for server_ref in server_references:
|
||||||
|
try:
|
||||||
|
# Get server info from registry to find the canonical ID
|
||||||
|
server_info = self.registry_client.find_server_by_reference(server_ref)
|
||||||
|
|
||||||
|
if not server_info:
|
||||||
|
# Server not found in registry, might be a local/custom server
|
||||||
|
# Add to installation list for safety
|
||||||
|
servers_needing_installation.add(server_ref)
|
||||||
|
continue
|
||||||
|
|
||||||
|
server_id = server_info.get("id")
|
||||||
|
if not server_id:
|
||||||
|
# No ID available, add to installation list
|
||||||
|
servers_needing_installation.add(server_ref)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if this server needs installation in ANY of the target runtimes
|
||||||
|
needs_installation = False
|
||||||
|
for runtime in target_runtimes:
|
||||||
|
runtime_installed_ids = self._get_installed_server_ids([runtime])
|
||||||
|
if server_id not in runtime_installed_ids:
|
||||||
|
needs_installation = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if needs_installation:
|
||||||
|
servers_needing_installation.add(server_ref)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# If we can't check the server, assume it needs installation
|
||||||
|
servers_needing_installation.add(server_ref)
|
||||||
|
|
||||||
|
return list(servers_needing_installation)
|
||||||
|
|
||||||
|
def _get_installed_server_ids(self, target_runtimes: List[str]) -> Set[str]:
|
||||||
|
"""Get all installed server IDs across target runtimes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_runtimes: List of runtimes to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Set of server IDs that are currently installed
|
||||||
|
"""
|
||||||
|
installed_ids = set()
|
||||||
|
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
try:
|
||||||
|
from ..factory import ClientFactory
|
||||||
|
except ImportError:
|
||||||
|
return installed_ids
|
||||||
|
|
||||||
|
for runtime in target_runtimes:
|
||||||
|
try:
|
||||||
|
client = ClientFactory.create_client(runtime)
|
||||||
|
config = client.get_current_config()
|
||||||
|
|
||||||
|
if isinstance(config, dict):
|
||||||
|
if runtime == 'copilot':
|
||||||
|
# Copilot stores servers in mcpServers object in mcp-config.json
|
||||||
|
mcp_servers = config.get("mcpServers", {})
|
||||||
|
for server_name, server_config in mcp_servers.items():
|
||||||
|
if isinstance(server_config, dict):
|
||||||
|
server_id = server_config.get("id")
|
||||||
|
if server_id:
|
||||||
|
installed_ids.add(server_id)
|
||||||
|
|
||||||
|
elif runtime == 'codex':
|
||||||
|
# Codex stores servers as mcp_servers.{name} sections in config.toml
|
||||||
|
mcp_servers = config.get("mcp_servers", {})
|
||||||
|
for server_name, server_config in mcp_servers.items():
|
||||||
|
if isinstance(server_config, dict):
|
||||||
|
server_id = server_config.get("id")
|
||||||
|
if server_id:
|
||||||
|
installed_ids.add(server_id)
|
||||||
|
|
||||||
|
elif runtime == 'vscode':
|
||||||
|
# VS Code stores servers in settings.json with different structure
|
||||||
|
# Check both mcpServers and any nested structure
|
||||||
|
mcp_servers = config.get("mcpServers", {})
|
||||||
|
for server_name, server_config in mcp_servers.items():
|
||||||
|
if isinstance(server_config, dict):
|
||||||
|
server_id = (
|
||||||
|
server_config.get("id") or
|
||||||
|
server_config.get("serverId") or
|
||||||
|
server_config.get("server_id")
|
||||||
|
)
|
||||||
|
if server_id:
|
||||||
|
installed_ids.add(server_id)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# If we can't read a runtime's config, skip it
|
||||||
|
continue
|
||||||
|
|
||||||
|
return installed_ids
|
||||||
|
|
||||||
|
def validate_servers_exist(self, server_references: List[str]) -> Tuple[List[str], List[str]]:
|
||||||
|
"""Validate that all servers exist in the registry before attempting installation.
|
||||||
|
|
||||||
|
This implements fail-fast validation similar to npm's behavior.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_references: List of MCP server references to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (valid_servers, invalid_servers)
|
||||||
|
"""
|
||||||
|
valid_servers = []
|
||||||
|
invalid_servers = []
|
||||||
|
|
||||||
|
for server_ref in server_references:
|
||||||
|
try:
|
||||||
|
server_info = self.registry_client.find_server_by_reference(server_ref)
|
||||||
|
if server_info:
|
||||||
|
valid_servers.append(server_ref)
|
||||||
|
else:
|
||||||
|
invalid_servers.append(server_ref)
|
||||||
|
except Exception:
|
||||||
|
invalid_servers.append(server_ref)
|
||||||
|
|
||||||
|
return valid_servers, invalid_servers
|
||||||
|
|
||||||
|
def batch_fetch_server_info(self, server_references: List[str]) -> Dict[str, Optional[Dict]]:
|
||||||
|
"""Batch fetch server info for all servers to avoid duplicate registry calls.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_references: List of MCP server references
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping server reference to server info (or None if not found)
|
||||||
|
"""
|
||||||
|
server_info_cache = {}
|
||||||
|
|
||||||
|
for server_ref in server_references:
|
||||||
|
try:
|
||||||
|
server_info = self.registry_client.find_server_by_reference(server_ref)
|
||||||
|
server_info_cache[server_ref] = server_info
|
||||||
|
except Exception:
|
||||||
|
server_info_cache[server_ref] = None
|
||||||
|
|
||||||
|
return server_info_cache
|
||||||
|
|
||||||
|
def collect_runtime_variables(self, server_references: List[str], server_info_cache: Dict[str, Optional[Dict]] = None) -> Dict[str, str]:
|
||||||
|
"""Collect runtime variables from runtime_arguments.variables fields.
|
||||||
|
|
||||||
|
These are NOT environment variables but CLI argument placeholders that need
|
||||||
|
to be substituted directly into the command arguments (e.g., {ado_org}).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_references: List of MCP server references
|
||||||
|
server_info_cache: Pre-fetched server info to avoid duplicate registry calls
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping runtime variable names to their values
|
||||||
|
"""
|
||||||
|
all_required_vars = {} # var_name -> {description, required, etc.}
|
||||||
|
|
||||||
|
# Use cached server info if available, otherwise fetch on-demand
|
||||||
|
if server_info_cache is None:
|
||||||
|
server_info_cache = self.batch_fetch_server_info(server_references)
|
||||||
|
|
||||||
|
# Collect all unique runtime variables from runtime_arguments
|
||||||
|
for server_ref in server_references:
|
||||||
|
try:
|
||||||
|
server_info = server_info_cache.get(server_ref)
|
||||||
|
if not server_info:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Extract runtime variables from runtime_arguments
|
||||||
|
packages = server_info.get("packages", [])
|
||||||
|
for package in packages:
|
||||||
|
if isinstance(package, dict):
|
||||||
|
runtime_arguments = package.get("runtime_arguments", [])
|
||||||
|
for arg in runtime_arguments:
|
||||||
|
if isinstance(arg, dict) and "variables" in arg:
|
||||||
|
variables = arg.get("variables", {})
|
||||||
|
for var_name, var_info in variables.items():
|
||||||
|
if isinstance(var_info, dict):
|
||||||
|
all_required_vars[var_name] = {
|
||||||
|
"description": var_info.get("description", ""),
|
||||||
|
"required": var_info.get("is_required", True)
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# Skip servers we can't analyze
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prompt user for each runtime variable
|
||||||
|
if all_required_vars:
|
||||||
|
return self._prompt_for_environment_variables(all_required_vars)
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def collect_environment_variables(self, server_references: List[str], server_info_cache: Dict[str, Optional[Dict]] = None) -> Dict[str, str]:
|
||||||
|
"""Collect environment variables needed by the specified servers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_references: List of MCP server references
|
||||||
|
server_info_cache: Pre-fetched server info to avoid duplicate registry calls
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping environment variable names to their values
|
||||||
|
"""
|
||||||
|
shared_env_vars = {}
|
||||||
|
all_required_vars = {} # var_name -> {description, required, etc.}
|
||||||
|
|
||||||
|
# Use cached server info if available, otherwise fetch on-demand
|
||||||
|
if server_info_cache is None:
|
||||||
|
server_info_cache = self.batch_fetch_server_info(server_references)
|
||||||
|
|
||||||
|
# Collect all unique environment variables needed
|
||||||
|
for server_ref in server_references:
|
||||||
|
try:
|
||||||
|
server_info = server_info_cache.get(server_ref)
|
||||||
|
if not server_info:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Extract environment variables from Docker args (legacy support)
|
||||||
|
if "docker" in server_info and "args" in server_info["docker"]:
|
||||||
|
docker_args = server_info["docker"]["args"]
|
||||||
|
if isinstance(docker_args, list):
|
||||||
|
for arg in docker_args:
|
||||||
|
if isinstance(arg, str) and arg.startswith("${") and arg.endswith("}"):
|
||||||
|
var_name = arg[2:-1] # Remove ${ and }
|
||||||
|
if var_name not in all_required_vars:
|
||||||
|
all_required_vars[var_name] = {
|
||||||
|
"description": f"Environment variable for {server_info.get('name', server_ref)}",
|
||||||
|
"required": True
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check packages for environment variables (preferred method)
|
||||||
|
packages = server_info.get("packages", [])
|
||||||
|
for package in packages:
|
||||||
|
if isinstance(package, dict):
|
||||||
|
# Try both camelCase and snake_case field names
|
||||||
|
env_vars = package.get("environmentVariables", []) or package.get("environment_variables", [])
|
||||||
|
for env_var in env_vars:
|
||||||
|
if isinstance(env_var, dict) and "name" in env_var:
|
||||||
|
var_name = env_var["name"]
|
||||||
|
all_required_vars[var_name] = {
|
||||||
|
"description": env_var.get("description", ""),
|
||||||
|
"required": env_var.get("required", True)
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# Skip servers we can't analyze
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prompt user for each environment variable
|
||||||
|
if all_required_vars:
|
||||||
|
shared_env_vars = self._prompt_for_environment_variables(all_required_vars)
|
||||||
|
|
||||||
|
return shared_env_vars
|
||||||
|
|
||||||
|
def _prompt_for_environment_variables(self, required_vars: Dict[str, Dict]) -> Dict[str, str]:
|
||||||
|
"""Prompt user for environment variables.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
required_vars: Dictionary mapping var names to their metadata
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping variable names to their values
|
||||||
|
"""
|
||||||
|
env_vars = {}
|
||||||
|
|
||||||
|
# Check if we're in E2E test mode or CI environment - don't prompt interactively
|
||||||
|
is_e2e_tests = os.getenv('APM_E2E_TESTS', '').lower() in ('1', 'true', 'yes')
|
||||||
|
is_ci_environment = any(os.getenv(var) for var in ['CI', 'GITHUB_ACTIONS', 'TRAVIS', 'JENKINS_URL', 'BUILDKITE'])
|
||||||
|
|
||||||
|
if is_e2e_tests or is_ci_environment:
|
||||||
|
# In E2E tests or CI, provide reasonable defaults instead of prompting
|
||||||
|
for var_name in sorted(required_vars.keys()):
|
||||||
|
var_info = required_vars[var_name]
|
||||||
|
existing_value = os.getenv(var_name)
|
||||||
|
|
||||||
|
if existing_value:
|
||||||
|
env_vars[var_name] = existing_value
|
||||||
|
else:
|
||||||
|
# Provide sensible defaults for known variables
|
||||||
|
if var_name == 'GITHUB_DYNAMIC_TOOLSETS':
|
||||||
|
env_vars[var_name] = '1' # Enable dynamic toolsets for GitHub MCP server
|
||||||
|
elif 'token' in var_name.lower() or 'key' in var_name.lower():
|
||||||
|
# For tokens/keys, try environment defaults with fallback chain
|
||||||
|
# Priority: GITHUB_APM_PAT (APM modules) > GITHUB_TOKEN (user tokens)
|
||||||
|
env_vars[var_name] = os.getenv('GITHUB_APM_PAT') or os.getenv('GITHUB_TOKEN', '')
|
||||||
|
else:
|
||||||
|
# For other variables, use empty string or reasonable default
|
||||||
|
env_vars[var_name] = ''
|
||||||
|
|
||||||
|
if is_e2e_tests:
|
||||||
|
print("E2E test mode detected")
|
||||||
|
else:
|
||||||
|
print("CI environment detected")
|
||||||
|
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Try to use Rich for better prompts
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.prompt import Prompt
|
||||||
|
|
||||||
|
console = Console()
|
||||||
|
console.print("Environment variables needed:", style="cyan")
|
||||||
|
|
||||||
|
for var_name in sorted(required_vars.keys()):
|
||||||
|
var_info = required_vars[var_name]
|
||||||
|
description = var_info.get("description", "")
|
||||||
|
required = var_info.get("required", True)
|
||||||
|
|
||||||
|
# Check if already set in environment
|
||||||
|
existing_value = os.getenv(var_name)
|
||||||
|
|
||||||
|
if existing_value:
|
||||||
|
console.print(f" ✅ {var_name}: [dim]using existing value[/dim]")
|
||||||
|
env_vars[var_name] = existing_value
|
||||||
|
else:
|
||||||
|
# Determine if this looks like a password/secret
|
||||||
|
is_sensitive = any(keyword in var_name.lower()
|
||||||
|
for keyword in ['password', 'secret', 'key', 'token', 'api'])
|
||||||
|
|
||||||
|
prompt_text = f" {var_name}"
|
||||||
|
if description:
|
||||||
|
prompt_text += f" ({description})"
|
||||||
|
|
||||||
|
if required:
|
||||||
|
value = Prompt.ask(prompt_text, password=is_sensitive)
|
||||||
|
else:
|
||||||
|
value = Prompt.ask(prompt_text, default="", password=is_sensitive)
|
||||||
|
|
||||||
|
env_vars[var_name] = value
|
||||||
|
|
||||||
|
console.print()
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
# Fallback to simple input
|
||||||
|
import click
|
||||||
|
|
||||||
|
click.echo("Environment variables needed:")
|
||||||
|
|
||||||
|
for var_name in sorted(required_vars.keys()):
|
||||||
|
var_info = required_vars[var_name]
|
||||||
|
description = var_info.get("description", "")
|
||||||
|
|
||||||
|
existing_value = os.getenv(var_name)
|
||||||
|
|
||||||
|
if existing_value:
|
||||||
|
click.echo(f" ✅ {var_name}: using existing value")
|
||||||
|
env_vars[var_name] = existing_value
|
||||||
|
else:
|
||||||
|
prompt_text = f" {var_name}"
|
||||||
|
if description:
|
||||||
|
prompt_text += f" ({description})"
|
||||||
|
|
||||||
|
# Simple input for fallback
|
||||||
|
is_sensitive = any(keyword in var_name.lower()
|
||||||
|
for keyword in ['password', 'secret', 'key', 'token', 'api'])
|
||||||
|
|
||||||
|
value = click.prompt(prompt_text, hide_input=is_sensitive, default="", show_default=False)
|
||||||
|
env_vars[var_name] = value
|
||||||
|
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
return env_vars
|
||||||
25
src/apm_cli/utils/__init__.py
Normal file
25
src/apm_cli/utils/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
"""Utility modules for APM CLI."""
|
||||||
|
|
||||||
|
from .console import (
|
||||||
|
_rich_success,
|
||||||
|
_rich_error,
|
||||||
|
_rich_warning,
|
||||||
|
_rich_info,
|
||||||
|
_rich_echo,
|
||||||
|
_rich_panel,
|
||||||
|
_create_files_table,
|
||||||
|
_get_console,
|
||||||
|
STATUS_SYMBOLS
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'_rich_success',
|
||||||
|
'_rich_error',
|
||||||
|
'_rich_warning',
|
||||||
|
'_rich_info',
|
||||||
|
'_rich_echo',
|
||||||
|
'_rich_panel',
|
||||||
|
'_create_files_table',
|
||||||
|
'_get_console',
|
||||||
|
'STATUS_SYMBOLS'
|
||||||
|
]
|
||||||
159
src/apm_cli/utils/console.py
Normal file
159
src/apm_cli/utils/console.py
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
"""Console utility functions for formatting and output."""
|
||||||
|
|
||||||
|
import click
|
||||||
|
import sys
|
||||||
|
from typing import Optional, Any
|
||||||
|
|
||||||
|
# Rich library imports with fallbacks
|
||||||
|
try:
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich.table import Table
|
||||||
|
from rich import print as rich_print
|
||||||
|
RICH_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
RICH_AVAILABLE = False
|
||||||
|
Console = Any
|
||||||
|
Panel = Any
|
||||||
|
Table = Any
|
||||||
|
rich_print = None
|
||||||
|
|
||||||
|
# Colorama imports for fallback
|
||||||
|
try:
|
||||||
|
from colorama import Fore, Style, init
|
||||||
|
init(autoreset=True)
|
||||||
|
COLORAMA_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
COLORAMA_AVAILABLE = False
|
||||||
|
Fore = None
|
||||||
|
Style = None
|
||||||
|
|
||||||
|
|
||||||
|
# Status symbols for consistent iconography
|
||||||
|
STATUS_SYMBOLS = {
|
||||||
|
'success': '✨',
|
||||||
|
'sparkles': '✨',
|
||||||
|
'running': '🚀',
|
||||||
|
'gear': '⚙️',
|
||||||
|
'info': '💡',
|
||||||
|
'warning': '⚠️',
|
||||||
|
'error': '❌',
|
||||||
|
'check': '✅',
|
||||||
|
'list': '📋',
|
||||||
|
'preview': '👀',
|
||||||
|
'robot': '🤖',
|
||||||
|
'metrics': '📊'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_console() -> Optional[Any]:
|
||||||
|
"""Get Rich console instance if available."""
|
||||||
|
if RICH_AVAILABLE:
|
||||||
|
try:
|
||||||
|
return Console()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _rich_echo(message: str, color: str = "white", style: str = None, bold: bool = False, symbol: str = None):
|
||||||
|
"""Echo message with Rich formatting or colorama fallback."""
|
||||||
|
# Handle backward compatibility - if style is provided, use it as color
|
||||||
|
if style is not None:
|
||||||
|
color = style
|
||||||
|
|
||||||
|
if symbol and symbol in STATUS_SYMBOLS:
|
||||||
|
symbol_char = STATUS_SYMBOLS[symbol]
|
||||||
|
message = f"{symbol_char} {message}"
|
||||||
|
|
||||||
|
console = _get_console()
|
||||||
|
if console:
|
||||||
|
try:
|
||||||
|
style_str = color
|
||||||
|
if bold:
|
||||||
|
style_str = f"bold {color}"
|
||||||
|
console.print(message, style=style_str)
|
||||||
|
return
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Colorama fallback
|
||||||
|
if COLORAMA_AVAILABLE and Fore:
|
||||||
|
color_map = {
|
||||||
|
'red': Fore.RED,
|
||||||
|
'green': Fore.GREEN,
|
||||||
|
'yellow': Fore.YELLOW,
|
||||||
|
'blue': Fore.BLUE,
|
||||||
|
'cyan': Fore.CYAN,
|
||||||
|
'white': Fore.WHITE,
|
||||||
|
'magenta': Fore.MAGENTA,
|
||||||
|
'muted': Fore.WHITE, # Add muted mapping
|
||||||
|
'info': Fore.BLUE
|
||||||
|
}
|
||||||
|
color_code = color_map.get(color, Fore.WHITE)
|
||||||
|
style_code = Style.BRIGHT if bold else ""
|
||||||
|
click.echo(f"{color_code}{style_code}{message}{Style.RESET_ALL}")
|
||||||
|
else:
|
||||||
|
click.echo(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _rich_success(message: str, symbol: str = None):
|
||||||
|
"""Display success message with green color and bold styling."""
|
||||||
|
_rich_echo(message, color="green", symbol=symbol, bold=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _rich_error(message: str, symbol: str = None):
|
||||||
|
"""Display error message with red color."""
|
||||||
|
_rich_echo(message, color="red", symbol=symbol)
|
||||||
|
|
||||||
|
|
||||||
|
def _rich_warning(message: str, symbol: str = None):
|
||||||
|
"""Display warning message with yellow color."""
|
||||||
|
_rich_echo(message, color="yellow", symbol=symbol)
|
||||||
|
|
||||||
|
|
||||||
|
def _rich_info(message: str, symbol: str = None):
|
||||||
|
"""Display info message with blue color."""
|
||||||
|
_rich_echo(message, color="blue", symbol=symbol)
|
||||||
|
|
||||||
|
|
||||||
|
def _rich_panel(content: str, title: str = None, style: str = "cyan"):
|
||||||
|
"""Display content in a Rich panel with fallback."""
|
||||||
|
console = _get_console()
|
||||||
|
if console and Panel:
|
||||||
|
try:
|
||||||
|
panel = Panel(content, title=title, border_style=style)
|
||||||
|
console.print(panel)
|
||||||
|
return
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Fallback to simple text display
|
||||||
|
if title:
|
||||||
|
click.echo(f"\n--- {title} ---")
|
||||||
|
click.echo(content)
|
||||||
|
if title:
|
||||||
|
click.echo("-" * (len(title) + 8))
|
||||||
|
|
||||||
|
|
||||||
|
def _create_files_table(files_data: list, title: str = "Files") -> Optional[Any]:
|
||||||
|
"""Create a Rich table for file display."""
|
||||||
|
if not RICH_AVAILABLE or not Table:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
table = Table(title=f"📋 {title}", show_header=True, header_style="bold cyan")
|
||||||
|
table.add_column("File", style="bold white")
|
||||||
|
table.add_column("Description", style="white")
|
||||||
|
|
||||||
|
for file_info in files_data:
|
||||||
|
if isinstance(file_info, dict):
|
||||||
|
table.add_row(file_info.get('name', ''), file_info.get('description', ''))
|
||||||
|
elif isinstance(file_info, (list, tuple)) and len(file_info) >= 2:
|
||||||
|
table.add_row(str(file_info[0]), str(file_info[1]))
|
||||||
|
else:
|
||||||
|
table.add_row(str(file_info), "")
|
||||||
|
|
||||||
|
return table
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
101
src/apm_cli/utils/helpers.py
Normal file
101
src/apm_cli/utils/helpers.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
"""Helper utility functions for APM-CLI."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import subprocess
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def is_tool_available(tool_name):
|
||||||
|
"""Check if a command-line tool is available.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tool_name (str): Name of the tool to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the tool is available, False otherwise.
|
||||||
|
"""
|
||||||
|
# First try using shutil.which which is more reliable across platforms
|
||||||
|
if shutil.which(tool_name):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Fall back to subprocess approach if shutil.which returns None
|
||||||
|
try:
|
||||||
|
# Different approaches for different platforms
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
# On Windows, use 'where' command but WITHOUT shell=True
|
||||||
|
result = subprocess.run(['where', tool_name],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
shell=False, # Changed from True to False
|
||||||
|
check=False)
|
||||||
|
return result.returncode == 0
|
||||||
|
else:
|
||||||
|
# On Unix-like systems, use 'which' command
|
||||||
|
result = subprocess.run(['which', tool_name],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
check=False)
|
||||||
|
return result.returncode == 0
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_package_managers():
|
||||||
|
"""Get available package managers on the system.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Dictionary of available package managers and their paths.
|
||||||
|
"""
|
||||||
|
package_managers = {}
|
||||||
|
|
||||||
|
# Check for Python package managers
|
||||||
|
if is_tool_available("uv"):
|
||||||
|
package_managers["uv"] = "uv"
|
||||||
|
if is_tool_available("pip"):
|
||||||
|
package_managers["pip"] = "pip"
|
||||||
|
if is_tool_available("pipx"):
|
||||||
|
package_managers["pipx"] = "pipx"
|
||||||
|
|
||||||
|
# Check for JavaScript package managers
|
||||||
|
if is_tool_available("npm"):
|
||||||
|
package_managers["npm"] = "npm"
|
||||||
|
if is_tool_available("yarn"):
|
||||||
|
package_managers["yarn"] = "yarn"
|
||||||
|
if is_tool_available("pnpm"):
|
||||||
|
package_managers["pnpm"] = "pnpm"
|
||||||
|
|
||||||
|
# Check for system package managers
|
||||||
|
if is_tool_available("brew"): # macOS
|
||||||
|
package_managers["brew"] = "brew"
|
||||||
|
if is_tool_available("apt"): # Debian/Ubuntu
|
||||||
|
package_managers["apt"] = "apt"
|
||||||
|
if is_tool_available("yum"): # CentOS/RHEL
|
||||||
|
package_managers["yum"] = "yum"
|
||||||
|
if is_tool_available("dnf"): # Fedora
|
||||||
|
package_managers["dnf"] = "dnf"
|
||||||
|
if is_tool_available("apk"): # Alpine
|
||||||
|
package_managers["apk"] = "apk"
|
||||||
|
if is_tool_available("pacman"): # Arch
|
||||||
|
package_managers["pacman"] = "pacman"
|
||||||
|
|
||||||
|
return package_managers
|
||||||
|
|
||||||
|
|
||||||
|
def detect_platform():
|
||||||
|
"""Detect the current platform.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Platform name (macos, linux, windows).
|
||||||
|
"""
|
||||||
|
system = platform.system().lower()
|
||||||
|
|
||||||
|
if system == "darwin":
|
||||||
|
return "macos"
|
||||||
|
elif system == "linux":
|
||||||
|
return "linux"
|
||||||
|
elif system == "windows":
|
||||||
|
return "windows"
|
||||||
|
else:
|
||||||
|
return "unknown"
|
||||||
54
src/apm_cli/version.py
Normal file
54
src/apm_cli/version.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
"""Version management for APM CLI."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Build-time version constant (will be injected during build)
|
||||||
|
# This avoids TOML parsing overhead during runtime
|
||||||
|
__BUILD_VERSION__ = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_version() -> str:
|
||||||
|
"""
|
||||||
|
Get the current version efficiently.
|
||||||
|
|
||||||
|
First tries build-time constant, then falls back to pyproject.toml parsing.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Version string
|
||||||
|
"""
|
||||||
|
# Use build-time constant if available (fastest path)
|
||||||
|
if __BUILD_VERSION__:
|
||||||
|
return __BUILD_VERSION__
|
||||||
|
|
||||||
|
# Fallback to reading from pyproject.toml (for development)
|
||||||
|
try:
|
||||||
|
# Handle PyInstaller bundle vs development
|
||||||
|
if getattr(sys, 'frozen', False):
|
||||||
|
# Running in PyInstaller bundle
|
||||||
|
pyproject_path = Path(sys._MEIPASS) / 'pyproject.toml'
|
||||||
|
else:
|
||||||
|
# Running in development
|
||||||
|
pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml"
|
||||||
|
|
||||||
|
if pyproject_path.exists():
|
||||||
|
# Simple regex parsing instead of full TOML library
|
||||||
|
with open(pyproject_path, 'r', encoding='utf-8') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Look for version = "x.y.z" pattern (including PEP 440 prereleases)
|
||||||
|
import re
|
||||||
|
match = re.search(r'version\s*=\s*["\']([^"\']+)["\']', content)
|
||||||
|
if match:
|
||||||
|
version = match.group(1)
|
||||||
|
# Validate PEP 440 version patterns: x.y.z or x.y.z{a|b|rc}N
|
||||||
|
if re.match(r'^\d+\.\d+\.\d+(a\d+|b\d+|rc\d+)?$', version):
|
||||||
|
return version
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
# For backward compatibility
|
||||||
|
__version__ = get_version()
|
||||||
1
src/apm_cli/workflow/__init__.py
Normal file
1
src/apm_cli/workflow/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Workflow management package."""
|
||||||
100
src/apm_cli/workflow/discovery.py
Normal file
100
src/apm_cli/workflow/discovery.py
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
"""Discovery functionality for workflow files."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from .parser import parse_workflow_file
|
||||||
|
|
||||||
|
|
||||||
|
def discover_workflows(base_dir=None):
|
||||||
|
"""Find all .prompt.md files following VSCode's .github/prompts convention.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir (str, optional): Base directory to search in. Defaults to current directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of WorkflowDefinition objects.
|
||||||
|
"""
|
||||||
|
if base_dir is None:
|
||||||
|
base_dir = os.getcwd()
|
||||||
|
|
||||||
|
# Support VSCode's .github/prompts convention with .prompt.md files
|
||||||
|
prompt_patterns = [
|
||||||
|
"**/.github/prompts/*.prompt.md", # VSCode convention: .github/prompts/
|
||||||
|
"**/*.prompt.md" # Generic .prompt.md files
|
||||||
|
]
|
||||||
|
|
||||||
|
workflow_files = []
|
||||||
|
for pattern in prompt_patterns:
|
||||||
|
workflow_files.extend(glob.glob(os.path.join(base_dir, pattern), recursive=True))
|
||||||
|
|
||||||
|
# Remove duplicates while preserving order
|
||||||
|
seen = set()
|
||||||
|
unique_files = []
|
||||||
|
for file_path in workflow_files:
|
||||||
|
if file_path not in seen:
|
||||||
|
seen.add(file_path)
|
||||||
|
unique_files.append(file_path)
|
||||||
|
|
||||||
|
workflows = []
|
||||||
|
for file_path in unique_files:
|
||||||
|
try:
|
||||||
|
workflow = parse_workflow_file(file_path)
|
||||||
|
workflows.append(workflow)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Failed to parse {file_path}: {e}")
|
||||||
|
|
||||||
|
return workflows
|
||||||
|
|
||||||
|
|
||||||
|
def create_workflow_template(name, output_dir=None, description=None, use_vscode_convention=True):
|
||||||
|
"""Create a basic workflow template file following VSCode's .github/prompts convention.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the workflow.
|
||||||
|
output_dir (str, optional): Directory to create the file in. Defaults to current directory.
|
||||||
|
description (str, optional): Description for the workflow. Defaults to generic description.
|
||||||
|
use_vscode_convention (bool): Whether to use VSCode's .github/prompts structure. Defaults to True.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Path to the created file.
|
||||||
|
"""
|
||||||
|
if output_dir is None:
|
||||||
|
output_dir = os.getcwd()
|
||||||
|
|
||||||
|
title = name.replace("-", " ").title()
|
||||||
|
workflow_description = description or f"Workflow for {title.lower()}"
|
||||||
|
|
||||||
|
template = f"""---
|
||||||
|
description: {workflow_description}
|
||||||
|
author: Your Name
|
||||||
|
mcp:
|
||||||
|
- package1
|
||||||
|
- package2
|
||||||
|
input:
|
||||||
|
- param1
|
||||||
|
- param2
|
||||||
|
---
|
||||||
|
|
||||||
|
# {title}
|
||||||
|
|
||||||
|
1. Step One:
|
||||||
|
- Details for step one
|
||||||
|
- Use parameters like this: ${{input:param1}}
|
||||||
|
|
||||||
|
2. Step Two:
|
||||||
|
- Details for step two
|
||||||
|
"""
|
||||||
|
|
||||||
|
if use_vscode_convention:
|
||||||
|
# Create .github/prompts directory structure
|
||||||
|
prompts_dir = os.path.join(output_dir, ".github", "prompts")
|
||||||
|
os.makedirs(prompts_dir, exist_ok=True)
|
||||||
|
file_path = os.path.join(prompts_dir, f"{name}.prompt.md")
|
||||||
|
else:
|
||||||
|
# Create .prompt.md file in output directory
|
||||||
|
file_path = os.path.join(output_dir, f"{name}.prompt.md")
|
||||||
|
|
||||||
|
with open(file_path, "w", encoding='utf-8') as f:
|
||||||
|
f.write(template)
|
||||||
|
|
||||||
|
return file_path
|
||||||
92
src/apm_cli/workflow/parser.py
Normal file
92
src/apm_cli/workflow/parser.py
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
"""Parser for workflow definition files."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import frontmatter
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowDefinition:
|
||||||
|
"""Simple container for workflow data."""
|
||||||
|
|
||||||
|
def __init__(self, name, file_path, metadata, content):
|
||||||
|
"""Initialize a workflow definition.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the workflow.
|
||||||
|
file_path (str): Path to the workflow file.
|
||||||
|
metadata (dict): Metadata from the frontmatter.
|
||||||
|
content (str): Content of the workflow file.
|
||||||
|
"""
|
||||||
|
self.name = name
|
||||||
|
self.file_path = file_path
|
||||||
|
self.description = metadata.get('description', '')
|
||||||
|
self.author = metadata.get('author', '')
|
||||||
|
self.mcp_dependencies = metadata.get('mcp', [])
|
||||||
|
self.input_parameters = metadata.get('input', [])
|
||||||
|
self.llm_model = metadata.get('llm', None) # LLM model specified in frontmatter
|
||||||
|
self.content = content
|
||||||
|
|
||||||
|
def validate(self):
|
||||||
|
"""Basic validation of required fields.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of validation errors.
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
if not self.description:
|
||||||
|
errors.append("Missing 'description' in frontmatter")
|
||||||
|
# Input parameters are optional, so we don't check for them
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
def parse_workflow_file(file_path):
|
||||||
|
"""Parse a workflow file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (str): Path to the workflow file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
WorkflowDefinition: Parsed workflow definition.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r', encoding='utf-8') as f:
|
||||||
|
post = frontmatter.load(f)
|
||||||
|
|
||||||
|
# Extract name based on file structure
|
||||||
|
name = _extract_workflow_name(file_path)
|
||||||
|
metadata = post.metadata
|
||||||
|
content = post.content
|
||||||
|
|
||||||
|
return WorkflowDefinition(name, file_path, metadata, content)
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Failed to parse workflow file: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_workflow_name(file_path):
|
||||||
|
"""Extract workflow name from file path based on naming conventions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (str): Path to the workflow file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Extracted workflow name.
|
||||||
|
"""
|
||||||
|
# Normalize path separators
|
||||||
|
normalized_path = os.path.normpath(file_path)
|
||||||
|
path_parts = normalized_path.split(os.sep)
|
||||||
|
|
||||||
|
# Check if it's a VSCode .github/prompts convention
|
||||||
|
if '.github' in path_parts and 'prompts' in path_parts:
|
||||||
|
# For .github/prompts/name.prompt.md, extract name from filename
|
||||||
|
github_idx = path_parts.index('.github')
|
||||||
|
if (github_idx + 1 < len(path_parts) and
|
||||||
|
path_parts[github_idx + 1] == 'prompts'):
|
||||||
|
basename = os.path.basename(file_path)
|
||||||
|
if basename.endswith('.prompt.md'):
|
||||||
|
return basename.replace('.prompt.md', '')
|
||||||
|
|
||||||
|
# For .prompt.md files, extract name from filename
|
||||||
|
if file_path.endswith('.prompt.md'):
|
||||||
|
return os.path.basename(file_path).replace('.prompt.md', '')
|
||||||
|
|
||||||
|
# Fallback: use filename without extension
|
||||||
|
return os.path.splitext(os.path.basename(file_path))[0]
|
||||||
193
src/apm_cli/workflow/runner.py
Normal file
193
src/apm_cli/workflow/runner.py
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
"""Runner for workflow execution."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from colorama import Fore, Style
|
||||||
|
from .parser import WorkflowDefinition
|
||||||
|
from .discovery import discover_workflows
|
||||||
|
from ..runtime.factory import RuntimeFactory
|
||||||
|
|
||||||
|
# Color constants (matching cli.py)
|
||||||
|
WARNING = f"{Fore.YELLOW}"
|
||||||
|
RESET = f"{Style.RESET_ALL}"
|
||||||
|
|
||||||
|
|
||||||
|
def substitute_parameters(content, params):
|
||||||
|
"""Simple string-based parameter substitution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content (str): Content to substitute parameters in.
|
||||||
|
params (dict): Parameters to substitute.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Content with parameters substituted.
|
||||||
|
"""
|
||||||
|
result = content
|
||||||
|
for key, value in params.items():
|
||||||
|
placeholder = f"${{input:{key}}}"
|
||||||
|
result = result.replace(placeholder, str(value))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def collect_parameters(workflow_def, provided_params=None):
|
||||||
|
"""Collect parameters from command line or prompt for missing ones.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
workflow_def (WorkflowDefinition): Workflow definition.
|
||||||
|
provided_params (dict, optional): Parameters provided from command line.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Complete set of parameters.
|
||||||
|
"""
|
||||||
|
provided_params = provided_params or {}
|
||||||
|
|
||||||
|
# If there are no input parameters defined, return the provided ones
|
||||||
|
if not workflow_def.input_parameters:
|
||||||
|
return provided_params
|
||||||
|
|
||||||
|
# Convert list parameters to dict if they're just names
|
||||||
|
if isinstance(workflow_def.input_parameters, list):
|
||||||
|
# List of parameter names
|
||||||
|
param_names = workflow_def.input_parameters
|
||||||
|
else:
|
||||||
|
# Already a dict
|
||||||
|
param_names = list(workflow_def.input_parameters.keys())
|
||||||
|
|
||||||
|
missing_params = [p for p in param_names if p not in provided_params]
|
||||||
|
|
||||||
|
if missing_params:
|
||||||
|
print(f"Workflow '{workflow_def.name}' requires the following parameters:")
|
||||||
|
for param in missing_params:
|
||||||
|
value = input(f" {param}: ")
|
||||||
|
provided_params[param] = value
|
||||||
|
|
||||||
|
return provided_params
|
||||||
|
|
||||||
|
|
||||||
|
def find_workflow_by_name(name, base_dir=None):
|
||||||
|
"""Find a workflow by name or file path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the workflow or file path.
|
||||||
|
base_dir (str, optional): Base directory to search in.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
WorkflowDefinition: Workflow definition if found, None otherwise.
|
||||||
|
"""
|
||||||
|
if base_dir is None:
|
||||||
|
base_dir = os.getcwd()
|
||||||
|
|
||||||
|
# If name looks like a file path, try to parse it directly
|
||||||
|
if name.endswith('.prompt.md') or name.endswith('.workflow.md'):
|
||||||
|
# Handle relative paths
|
||||||
|
if not os.path.isabs(name):
|
||||||
|
name = os.path.join(base_dir, name)
|
||||||
|
|
||||||
|
if os.path.exists(name):
|
||||||
|
try:
|
||||||
|
from .parser import parse_workflow_file
|
||||||
|
return parse_workflow_file(name)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error parsing workflow file {name}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Otherwise, search by name
|
||||||
|
workflows = discover_workflows(base_dir)
|
||||||
|
for workflow in workflows:
|
||||||
|
if workflow.name == name:
|
||||||
|
return workflow
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run_workflow(workflow_name, params=None, base_dir=None):
|
||||||
|
"""Run a workflow with parameters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
workflow_name (str): Name of the workflow to run.
|
||||||
|
params (dict, optional): Parameters to use.
|
||||||
|
base_dir (str, optional): Base directory to search for workflows.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (bool, str) Success status and result content.
|
||||||
|
"""
|
||||||
|
params = params or {}
|
||||||
|
|
||||||
|
# Extract runtime and model information
|
||||||
|
runtime_name = params.get('_runtime', None)
|
||||||
|
fallback_llm = params.get('_llm', None)
|
||||||
|
|
||||||
|
# Find the workflow
|
||||||
|
workflow = find_workflow_by_name(workflow_name, base_dir)
|
||||||
|
if not workflow:
|
||||||
|
return False, f"Workflow '{workflow_name}' not found."
|
||||||
|
|
||||||
|
# Validate the workflow
|
||||||
|
errors = workflow.validate()
|
||||||
|
if errors:
|
||||||
|
return False, f"Invalid workflow: {', '.join(errors)}"
|
||||||
|
|
||||||
|
# Collect missing parameters
|
||||||
|
all_params = collect_parameters(workflow, params)
|
||||||
|
|
||||||
|
# Substitute parameters
|
||||||
|
result_content = substitute_parameters(workflow.content, all_params)
|
||||||
|
|
||||||
|
# Determine the LLM model to use
|
||||||
|
# Priority: frontmatter llm > --llm flag > runtime default
|
||||||
|
llm_model = workflow.llm_model or fallback_llm
|
||||||
|
|
||||||
|
# Show warning if both frontmatter and --llm flag are specified
|
||||||
|
if workflow.llm_model and fallback_llm:
|
||||||
|
print(f"{WARNING}WARNING: Both frontmatter 'llm: {workflow.llm_model}' and --llm '{fallback_llm}' specified. Using frontmatter value: {workflow.llm_model}{RESET}")
|
||||||
|
|
||||||
|
# Always execute with runtime (use best available if not specified)
|
||||||
|
try:
|
||||||
|
# Use specified runtime type or get best available
|
||||||
|
if runtime_name:
|
||||||
|
# Check if runtime_name is a valid runtime type
|
||||||
|
if RuntimeFactory.runtime_exists(runtime_name):
|
||||||
|
runtime = RuntimeFactory.create_runtime(runtime_name, llm_model)
|
||||||
|
else:
|
||||||
|
# Invalid runtime name - fail with clear error message
|
||||||
|
available_runtimes = [adapter.get_runtime_name() for adapter in RuntimeFactory._RUNTIME_ADAPTERS if adapter.is_available()]
|
||||||
|
return False, f"Invalid runtime '{runtime_name}'. Available runtimes: {', '.join(available_runtimes)}"
|
||||||
|
else:
|
||||||
|
runtime = RuntimeFactory.create_runtime(model_name=llm_model)
|
||||||
|
|
||||||
|
# Execute the prompt with the runtime
|
||||||
|
response = runtime.execute_prompt(result_content)
|
||||||
|
return True, response
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return False, f"Runtime execution failed: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def preview_workflow(workflow_name, params=None, base_dir=None):
|
||||||
|
"""Preview a workflow with parameters substituted (without execution).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
workflow_name (str): Name of the workflow to preview.
|
||||||
|
params (dict, optional): Parameters to use.
|
||||||
|
base_dir (str, optional): Base directory to search for workflows.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (bool, str) Success status and processed content.
|
||||||
|
"""
|
||||||
|
params = params or {}
|
||||||
|
|
||||||
|
# Find the workflow
|
||||||
|
workflow = find_workflow_by_name(workflow_name, base_dir)
|
||||||
|
if not workflow:
|
||||||
|
return False, f"Workflow '{workflow_name}' not found."
|
||||||
|
|
||||||
|
# Validate the workflow
|
||||||
|
errors = workflow.validate()
|
||||||
|
if errors:
|
||||||
|
return False, f"Invalid workflow: {', '.join(errors)}"
|
||||||
|
|
||||||
|
# Collect missing parameters
|
||||||
|
all_params = collect_parameters(workflow, params)
|
||||||
|
|
||||||
|
# Substitute parameters and return the processed content
|
||||||
|
result_content = substitute_parameters(workflow.content, all_params)
|
||||||
|
return True, result_content
|
||||||
@@ -3,10 +3,20 @@
|
|||||||
# requires-python = ">=3.11"
|
# requires-python = ">=3.11"
|
||||||
# dependencies = [
|
# dependencies = [
|
||||||
# "typer",
|
# "typer",
|
||||||
# "rich",
|
# "rich>=13.0.0",
|
||||||
# "platformdirs",
|
# "platformdirs",
|
||||||
# "readchar",
|
# "readchar",
|
||||||
# "httpx",
|
# "httpx",
|
||||||
|
# "click>=8.0.0",
|
||||||
|
# "colorama>=0.4.6",
|
||||||
|
# "pyyaml>=6.0.0",
|
||||||
|
# "requests>=2.28.0",
|
||||||
|
# "python-frontmatter>=1.0.0",
|
||||||
|
# "tomli>=1.2.0; python_version<'3.11'",
|
||||||
|
# "toml>=0.10.2",
|
||||||
|
# "rich-click>=1.7.0",
|
||||||
|
# "watchdog>=3.0.0",
|
||||||
|
# "GitPython>=3.1.0",
|
||||||
# ]
|
# ]
|
||||||
# ///
|
# ///
|
||||||
"""
|
"""
|
||||||
@@ -14,13 +24,11 @@ Specify CLI - Setup tool for Specify projects
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
uvx specify-cli.py init <project-name>
|
uvx specify-cli.py init <project-name>
|
||||||
uvx specify-cli.py init .
|
|
||||||
uvx specify-cli.py init --here
|
uvx specify-cli.py init --here
|
||||||
|
|
||||||
Or install globally:
|
Or install globally:
|
||||||
uv tool install --from specify-cli.py specify-cli
|
uv tool install --from specify-cli.py specify-cli
|
||||||
specify init <project-name>
|
specify init <project-name>
|
||||||
specify init .
|
|
||||||
specify init --here
|
specify init --here
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -30,7 +38,6 @@ import sys
|
|||||||
import zipfile
|
import zipfile
|
||||||
import tempfile
|
import tempfile
|
||||||
import shutil
|
import shutil
|
||||||
import shlex
|
|
||||||
import json
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
@@ -47,6 +54,12 @@ from rich.table import Table
|
|||||||
from rich.tree import Tree
|
from rich.tree import Tree
|
||||||
from typer.core import TyperGroup
|
from typer.core import TyperGroup
|
||||||
|
|
||||||
|
# APM imports
|
||||||
|
from apm_cli.cli import init as apm_init, install as apm_install, compile as apm_compile, prune as apm_prune, uninstall as apm_uninstall
|
||||||
|
from apm_cli.commands.deps import deps as apm_deps
|
||||||
|
import click
|
||||||
|
from click.testing import CliRunner
|
||||||
|
|
||||||
# For cross-platform keyboard input
|
# For cross-platform keyboard input
|
||||||
import readchar
|
import readchar
|
||||||
import ssl
|
import ssl
|
||||||
@@ -55,28 +68,12 @@ import truststore
|
|||||||
ssl_context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
ssl_context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||||
client = httpx.Client(verify=ssl_context)
|
client = httpx.Client(verify=ssl_context)
|
||||||
|
|
||||||
def _github_token(cli_token: str | None = None) -> str | None:
|
|
||||||
"""Return sanitized GitHub token (cli arg takes precedence) or None."""
|
|
||||||
return ((cli_token or os.getenv("GH_TOKEN") or os.getenv("GITHUB_TOKEN") or "").strip()) or None
|
|
||||||
|
|
||||||
def _github_auth_headers(cli_token: str | None = None) -> dict:
|
|
||||||
"""Return Authorization header dict only when a non-empty token exists."""
|
|
||||||
token = _github_token(cli_token)
|
|
||||||
return {"Authorization": f"Bearer {token}"} if token else {}
|
|
||||||
|
|
||||||
# Constants
|
# Constants
|
||||||
AI_CHOICES = {
|
AI_CHOICES = {
|
||||||
"copilot": "GitHub Copilot",
|
"copilot": "GitHub Copilot",
|
||||||
"claude": "Claude Code",
|
"claude": "Claude Code",
|
||||||
"gemini": "Gemini CLI",
|
"gemini": "Gemini CLI",
|
||||||
"cursor": "Cursor",
|
"cursor": "Cursor"
|
||||||
"qwen": "Qwen Code",
|
|
||||||
"opencode": "opencode",
|
|
||||||
"codex": "Codex CLI",
|
|
||||||
"windsurf": "Windsurf",
|
|
||||||
"kilocode": "Kilo Code",
|
|
||||||
"auggie": "Auggie CLI",
|
|
||||||
"roo": "Roo Code",
|
|
||||||
}
|
}
|
||||||
# Add script type choices
|
# Add script type choices
|
||||||
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
||||||
@@ -94,7 +91,7 @@ BANNER = """
|
|||||||
╚══════╝╚═╝ ╚══════╝ ╚═════╝╚═╝╚═╝ ╚═╝
|
╚══════╝╚═╝ ╚══════╝ ╚═════╝╚═╝╚═╝ ╚═╝
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TAGLINE = "GitHub Spec Kit - Spec-Driven Development Toolkit"
|
TAGLINE = "Spec-Driven Development Toolkit"
|
||||||
class StepTracker:
|
class StepTracker:
|
||||||
"""Track and render hierarchical steps without emojis, similar to Claude Code tree output.
|
"""Track and render hierarchical steps without emojis, similar to Claude Code tree output.
|
||||||
Supports live auto-refresh via an attached refresh callback.
|
Supports live auto-refresh via an attached refresh callback.
|
||||||
@@ -145,7 +142,7 @@ class StepTracker:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def render(self):
|
def render(self):
|
||||||
tree = Tree(f"[cyan]{self.title}[/cyan]", guide_style="grey50")
|
tree = Tree(f"[bold cyan]{self.title}[/bold cyan]", guide_style="grey50")
|
||||||
for step in self.steps:
|
for step in self.steps:
|
||||||
label = step["label"]
|
label = step["label"]
|
||||||
detail_text = step["detail"].strip() if step["detail"] else ""
|
detail_text = step["detail"].strip() if step["detail"] else ""
|
||||||
@@ -194,9 +191,9 @@ def get_key():
|
|||||||
key = readchar.readkey()
|
key = readchar.readkey()
|
||||||
|
|
||||||
# Arrow keys
|
# Arrow keys
|
||||||
if key == readchar.key.UP or key == readchar.key.CTRL_P:
|
if key == readchar.key.UP:
|
||||||
return 'up'
|
return 'up'
|
||||||
if key == readchar.key.DOWN or key == readchar.key.CTRL_N:
|
if key == readchar.key.DOWN:
|
||||||
return 'down'
|
return 'down'
|
||||||
|
|
||||||
# Enter/Return
|
# Enter/Return
|
||||||
@@ -238,14 +235,14 @@ def select_with_arrows(options: dict, prompt_text: str = "Select an option", def
|
|||||||
def create_selection_panel():
|
def create_selection_panel():
|
||||||
"""Create the selection panel with current selection highlighted."""
|
"""Create the selection panel with current selection highlighted."""
|
||||||
table = Table.grid(padding=(0, 2))
|
table = Table.grid(padding=(0, 2))
|
||||||
table.add_column(style="cyan", justify="left", width=3)
|
table.add_column(style="bright_cyan", justify="left", width=3)
|
||||||
table.add_column(style="white", justify="left")
|
table.add_column(style="white", justify="left")
|
||||||
|
|
||||||
for i, key in enumerate(option_keys):
|
for i, key in enumerate(option_keys):
|
||||||
if i == selected_index:
|
if i == selected_index:
|
||||||
table.add_row("▶", f"[cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
|
table.add_row("▶", f"[bright_cyan]{key}: {options[key]}[/bright_cyan]")
|
||||||
else:
|
else:
|
||||||
table.add_row(" ", f"[cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
|
table.add_row(" ", f"[white]{key}: {options[key]}[/white]")
|
||||||
|
|
||||||
table.add_row("", "")
|
table.add_row("", "")
|
||||||
table.add_row("", "[dim]Use ↑/↓ to navigate, Enter to select, Esc to cancel[/dim]")
|
table.add_row("", "[dim]Use ↑/↓ to navigate, Enter to select, Esc to cancel[/dim]")
|
||||||
@@ -313,6 +310,240 @@ app = typer.Typer(
|
|||||||
cls=BannerGroup,
|
cls=BannerGroup,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
def apm_click():
|
||||||
|
"""APM - Agent Package Manager commands"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Add APM commands to the Click group
|
||||||
|
apm_click.add_command(apm_init, name="init")
|
||||||
|
apm_click.add_command(apm_install, name="install")
|
||||||
|
apm_click.add_command(apm_uninstall, name="uninstall")
|
||||||
|
apm_click.add_command(apm_compile, name="compile")
|
||||||
|
apm_click.add_command(apm_prune, name="prune")
|
||||||
|
apm_click.add_command(apm_deps, name="deps")
|
||||||
|
|
||||||
|
|
||||||
|
# Create APM subcommands as Typer commands
|
||||||
|
apm_app = typer.Typer(
|
||||||
|
name="apm",
|
||||||
|
help="APM - Agent Package Manager commands for context management.",
|
||||||
|
add_completion=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
@apm_app.command("init", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||||
|
def apm_init_wrapper(
|
||||||
|
ctx: typer.Context,
|
||||||
|
project_name: str = typer.Argument(None, help="Project name"),
|
||||||
|
force: bool = typer.Option(False, "-f", "--force", help="Overwrite existing files without confirmation"),
|
||||||
|
yes: bool = typer.Option(False, "-y", "--yes", help="Skip interactive questionnaire and use defaults"),
|
||||||
|
):
|
||||||
|
"""Initialize a new APM project"""
|
||||||
|
args = []
|
||||||
|
if project_name:
|
||||||
|
args.append(project_name)
|
||||||
|
if force:
|
||||||
|
args.append("--force")
|
||||||
|
if yes:
|
||||||
|
args.append("--yes")
|
||||||
|
if ctx.args:
|
||||||
|
args.extend(ctx.args)
|
||||||
|
|
||||||
|
_run_apm_command(["init"] + args)
|
||||||
|
|
||||||
|
@apm_app.command("install", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||||
|
def apm_install_wrapper(
|
||||||
|
ctx: typer.Context,
|
||||||
|
packages: list[str] = typer.Argument(None, help="APM packages to add and install (owner/repo format)"),
|
||||||
|
runtime: str = typer.Option(None, "--runtime", help="Target specific runtime only (codex, vscode)"),
|
||||||
|
exclude: str = typer.Option(None, "--exclude", help="Exclude specific runtime from installation"),
|
||||||
|
only: str = typer.Option(None, "--only", help="Install only specific dependency type (apm or mcp)"),
|
||||||
|
update: bool = typer.Option(False, "--update", help="Update dependencies to latest Git references"),
|
||||||
|
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be installed without installing"),
|
||||||
|
):
|
||||||
|
"""Install APM and MCP dependencies from apm.yml.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
specify apm install # Install existing deps from apm.yml
|
||||||
|
specify apm install github/design-guidelines # Add package and install
|
||||||
|
specify apm install org/pkg1 org/pkg2 # Add multiple packages and install
|
||||||
|
"""
|
||||||
|
args = []
|
||||||
|
|
||||||
|
# Add package arguments first
|
||||||
|
if packages:
|
||||||
|
args.extend(packages)
|
||||||
|
|
||||||
|
if runtime:
|
||||||
|
args.extend(["--runtime", runtime])
|
||||||
|
if exclude:
|
||||||
|
args.extend(["--exclude", exclude])
|
||||||
|
if only:
|
||||||
|
args.extend(["--only", only])
|
||||||
|
if update:
|
||||||
|
args.append("--update")
|
||||||
|
if dry_run:
|
||||||
|
args.append("--dry-run")
|
||||||
|
if ctx.args:
|
||||||
|
args.extend(ctx.args)
|
||||||
|
|
||||||
|
_run_apm_command(["install"] + args)
|
||||||
|
|
||||||
|
@apm_app.command("compile", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||||
|
def apm_compile_wrapper(
|
||||||
|
ctx: typer.Context,
|
||||||
|
output: str = typer.Option(None, "-o", "--output", help="Output file path (for single-file mode)"),
|
||||||
|
dry_run: bool = typer.Option(False, "--dry-run", help="🔍 Preview compilation without writing files (shows placement decisions)"),
|
||||||
|
no_links: bool = typer.Option(False, "--no-links", help="Skip markdown link resolution"),
|
||||||
|
chatmode: str = typer.Option(None, "--chatmode", help="Chatmode to prepend to AGENTS.md files"),
|
||||||
|
watch: bool = typer.Option(False, "--watch", help="Auto-regenerate on changes"),
|
||||||
|
validate: bool = typer.Option(False, "--validate", help="Validate primitives without compiling"),
|
||||||
|
with_constitution: bool = typer.Option(True, "--with-constitution/--no-constitution", help="Include Spec Kit constitution block at top if memory/constitution.md present"),
|
||||||
|
single_agents: bool = typer.Option(False, "--single-agents", help="📄 Force single-file compilation (legacy mode)"),
|
||||||
|
verbose: bool = typer.Option(False, "-v", "--verbose", help="🔍 Show detailed source attribution and optimizer analysis"),
|
||||||
|
local_only: bool = typer.Option(False, "--local-only", help="🏠 Ignore dependencies, compile only local primitives"),
|
||||||
|
clean: bool = typer.Option(False, "--clean", help="🧹 Remove orphaned AGENTS.md files that are no longer generated"),
|
||||||
|
):
|
||||||
|
"""Generate AGENTS.md from APM context"""
|
||||||
|
# Build arguments for the Click command
|
||||||
|
args = []
|
||||||
|
if output:
|
||||||
|
args.extend(["-o", output])
|
||||||
|
if dry_run:
|
||||||
|
args.append("--dry-run")
|
||||||
|
if no_links:
|
||||||
|
args.append("--no-links")
|
||||||
|
if chatmode:
|
||||||
|
args.extend(["--chatmode", chatmode])
|
||||||
|
if watch:
|
||||||
|
args.append("--watch")
|
||||||
|
if validate:
|
||||||
|
args.append("--validate")
|
||||||
|
if not with_constitution:
|
||||||
|
args.append("--no-constitution")
|
||||||
|
if single_agents:
|
||||||
|
args.append("--single-agents")
|
||||||
|
if verbose:
|
||||||
|
args.append("--verbose")
|
||||||
|
if local_only:
|
||||||
|
args.append("--local-only")
|
||||||
|
if clean:
|
||||||
|
args.append("--clean")
|
||||||
|
|
||||||
|
# Add any extra arguments
|
||||||
|
if ctx.args:
|
||||||
|
args.extend(ctx.args)
|
||||||
|
|
||||||
|
_run_apm_command(["compile"] + args)
|
||||||
|
|
||||||
|
@apm_app.command("prune", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||||
|
def apm_prune_wrapper(
|
||||||
|
ctx: typer.Context,
|
||||||
|
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be removed without removing"),
|
||||||
|
):
|
||||||
|
"""Remove APM packages not listed in apm.yml.
|
||||||
|
|
||||||
|
This command cleans up the apm_modules/ directory by removing packages that
|
||||||
|
were previously installed but are no longer declared as dependencies in apm.yml.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
specify apm prune # Remove orphaned packages
|
||||||
|
specify apm prune --dry-run # Show what would be removed
|
||||||
|
"""
|
||||||
|
args = []
|
||||||
|
if dry_run:
|
||||||
|
args.append("--dry-run")
|
||||||
|
|
||||||
|
# Add any extra arguments
|
||||||
|
if ctx.args:
|
||||||
|
args.extend(ctx.args)
|
||||||
|
|
||||||
|
_run_apm_command(["prune"] + args)
|
||||||
|
|
||||||
|
@apm_app.command("uninstall", context_settings={"allow_extra_args": True, "allow_interspersed_args": False})
|
||||||
|
def apm_uninstall_wrapper(
|
||||||
|
ctx: typer.Context,
|
||||||
|
packages: list[str] = typer.Argument(..., help="APM packages to remove (owner/repo format)"),
|
||||||
|
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be removed without removing"),
|
||||||
|
):
|
||||||
|
"""Remove APM packages from apm.yml and apm_modules.
|
||||||
|
|
||||||
|
This command removes packages from both the apm.yml dependencies list
|
||||||
|
and the apm_modules/ directory. It's the opposite of 'specify apm install <package>'.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
specify apm uninstall github/design-guidelines # Remove one package
|
||||||
|
specify apm uninstall org/pkg1 org/pkg2 # Remove multiple packages
|
||||||
|
specify apm uninstall github/pkg --dry-run # Show what would be removed
|
||||||
|
"""
|
||||||
|
args = []
|
||||||
|
|
||||||
|
# Add package arguments first
|
||||||
|
if packages:
|
||||||
|
args.extend(packages)
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
args.append("--dry-run")
|
||||||
|
|
||||||
|
# Add any extra arguments
|
||||||
|
if ctx.args:
|
||||||
|
args.extend(ctx.args)
|
||||||
|
|
||||||
|
_run_apm_command(["uninstall"] + args)
|
||||||
|
|
||||||
|
# Create deps subcommands as Typer sub-application
|
||||||
|
deps_app = typer.Typer(
|
||||||
|
name="deps",
|
||||||
|
help="🔗 Manage APM package dependencies",
|
||||||
|
add_completion=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
@deps_app.command("clean")
|
||||||
|
def apm_deps_clean_wrapper(ctx: typer.Context):
|
||||||
|
"""Remove all APM dependencies"""
|
||||||
|
_run_apm_command(["deps", "clean"] + (ctx.args or []))
|
||||||
|
|
||||||
|
@deps_app.command("info")
|
||||||
|
def apm_deps_info_wrapper(ctx: typer.Context):
|
||||||
|
"""Show detailed package information"""
|
||||||
|
_run_apm_command(["deps", "info"] + (ctx.args or []))
|
||||||
|
|
||||||
|
@deps_app.command("list")
|
||||||
|
def apm_deps_list_wrapper(ctx: typer.Context):
|
||||||
|
"""List installed APM dependencies"""
|
||||||
|
_run_apm_command(["deps", "list"] + (ctx.args or []))
|
||||||
|
|
||||||
|
@deps_app.command("tree")
|
||||||
|
def apm_deps_tree_wrapper(ctx: typer.Context):
|
||||||
|
"""Show dependency tree structure"""
|
||||||
|
_run_apm_command(["deps", "tree"] + (ctx.args or []))
|
||||||
|
|
||||||
|
@deps_app.command("update")
|
||||||
|
def apm_deps_update_wrapper(ctx: typer.Context):
|
||||||
|
"""Update APM dependencies"""
|
||||||
|
_run_apm_command(["deps", "update"] + (ctx.args or []))
|
||||||
|
|
||||||
|
# Add the deps sub-application to the APM app
|
||||||
|
apm_app.add_typer(deps_app, name="deps")
|
||||||
|
|
||||||
|
def _run_apm_command(args: list[str]):
|
||||||
|
"""Helper to run APM Click commands"""
|
||||||
|
original_argv = sys.argv.copy()
|
||||||
|
try:
|
||||||
|
sys.argv = ["apm"] + args
|
||||||
|
try:
|
||||||
|
apm_click.main(args, standalone_mode=False)
|
||||||
|
except SystemExit as e:
|
||||||
|
if e.code != 0:
|
||||||
|
raise typer.Exit(e.code)
|
||||||
|
finally:
|
||||||
|
sys.argv = original_argv
|
||||||
|
|
||||||
|
# Add the APM subcommand app to the main app
|
||||||
|
app.add_typer(apm_app, name="apm")
|
||||||
|
|
||||||
|
# Remove the old apm_command since we're using the Typer subcommand app now
|
||||||
|
|
||||||
|
|
||||||
def show_banner():
|
def show_banner():
|
||||||
"""Display the ASCII art banner."""
|
"""Display the ASCII art banner."""
|
||||||
@@ -360,13 +591,13 @@ def run_command(cmd: list[str], check_return: bool = True, capture: bool = False
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def check_tool_for_tracker(tool: str, tracker: StepTracker) -> bool:
|
def check_tool_for_tracker(tool: str, install_hint: str, tracker: StepTracker) -> bool:
|
||||||
"""Check if a tool is installed and update tracker."""
|
"""Check if a tool is installed and update tracker."""
|
||||||
if shutil.which(tool):
|
if shutil.which(tool):
|
||||||
tracker.complete(tool, "available")
|
tracker.complete(tool, "available")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
tracker.error(tool, "not found")
|
tracker.error(tool, f"not found - {install_hint}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@@ -385,6 +616,8 @@ def check_tool(tool: str, install_hint: str) -> bool:
|
|||||||
if shutil.which(tool):
|
if shutil.which(tool):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
|
console.print(f"[yellow]⚠️ {tool} not found[/yellow]")
|
||||||
|
console.print(f" Install with: [cyan]{install_hint}[/cyan]")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@@ -433,7 +666,7 @@ def init_git_repo(project_path: Path, quiet: bool = False) -> bool:
|
|||||||
os.chdir(original_cwd)
|
os.chdir(original_cwd)
|
||||||
|
|
||||||
|
|
||||||
def download_template_from_github(ai_assistant: str, download_dir: Path, *, script_type: str = "sh", verbose: bool = True, show_progress: bool = True, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Tuple[Path, dict]:
|
def download_template_from_github(ai_assistant: str, download_dir: Path, *, script_type: str = "sh", verbose: bool = True, show_progress: bool = True, client: httpx.Client = None, debug: bool = False) -> Tuple[Path, dict]:
|
||||||
repo_owner = "github"
|
repo_owner = "github"
|
||||||
repo_name = "spec-kit"
|
repo_name = "spec-kit"
|
||||||
if client is None:
|
if client is None:
|
||||||
@@ -444,12 +677,7 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, scri
|
|||||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = client.get(
|
response = client.get(api_url, timeout=30, follow_redirects=True)
|
||||||
api_url,
|
|
||||||
timeout=30,
|
|
||||||
follow_redirects=True,
|
|
||||||
headers=_github_auth_headers(github_token),
|
|
||||||
)
|
|
||||||
status = response.status_code
|
status = response.status_code
|
||||||
if status != 200:
|
if status != 200:
|
||||||
msg = f"GitHub API returned {status} for {api_url}"
|
msg = f"GitHub API returned {status} for {api_url}"
|
||||||
@@ -466,21 +694,20 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, scri
|
|||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
# Find the template asset for the specified AI assistant
|
# Find the template asset for the specified AI assistant
|
||||||
assets = release_data.get("assets", [])
|
|
||||||
pattern = f"spec-kit-template-{ai_assistant}-{script_type}"
|
pattern = f"spec-kit-template-{ai_assistant}-{script_type}"
|
||||||
matching_assets = [
|
matching_assets = [
|
||||||
asset for asset in assets
|
asset for asset in release_data.get("assets", [])
|
||||||
if pattern in asset["name"] and asset["name"].endswith(".zip")
|
if pattern in asset["name"] and asset["name"].endswith(".zip")
|
||||||
]
|
]
|
||||||
|
|
||||||
asset = matching_assets[0] if matching_assets else None
|
if not matching_assets:
|
||||||
|
console.print(f"[red]No matching release asset found[/red] for pattern: [bold]{pattern}[/bold]")
|
||||||
if asset is None:
|
asset_names = [a.get('name','?') for a in release_data.get('assets', [])]
|
||||||
console.print(f"[red]No matching release asset found[/red] for [bold]{ai_assistant}[/bold] (expected pattern: [bold]{pattern}[/bold])")
|
|
||||||
asset_names = [a.get('name', '?') for a in assets]
|
|
||||||
console.print(Panel("\n".join(asset_names) or "(no assets)", title="Available Assets", border_style="yellow"))
|
console.print(Panel("\n".join(asset_names) or "(no assets)", title="Available Assets", border_style="yellow"))
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
# Use the first matching asset
|
||||||
|
asset = matching_assets[0]
|
||||||
download_url = asset["browser_download_url"]
|
download_url = asset["browser_download_url"]
|
||||||
filename = asset["name"]
|
filename = asset["name"]
|
||||||
file_size = asset["size"]
|
file_size = asset["size"]
|
||||||
@@ -489,19 +716,14 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, scri
|
|||||||
console.print(f"[cyan]Found template:[/cyan] {filename}")
|
console.print(f"[cyan]Found template:[/cyan] {filename}")
|
||||||
console.print(f"[cyan]Size:[/cyan] {file_size:,} bytes")
|
console.print(f"[cyan]Size:[/cyan] {file_size:,} bytes")
|
||||||
console.print(f"[cyan]Release:[/cyan] {release_data['tag_name']}")
|
console.print(f"[cyan]Release:[/cyan] {release_data['tag_name']}")
|
||||||
|
|
||||||
|
# Download the file
|
||||||
zip_path = download_dir / filename
|
zip_path = download_dir / filename
|
||||||
if verbose:
|
if verbose:
|
||||||
console.print(f"[cyan]Downloading template...[/cyan]")
|
console.print(f"[cyan]Downloading template...[/cyan]")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with client.stream(
|
with client.stream("GET", download_url, timeout=60, follow_redirects=True) as response:
|
||||||
"GET",
|
|
||||||
download_url,
|
|
||||||
timeout=60,
|
|
||||||
follow_redirects=True,
|
|
||||||
headers=_github_auth_headers(github_token),
|
|
||||||
) as response:
|
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
body_sample = response.text[:400]
|
body_sample = response.text[:400]
|
||||||
raise RuntimeError(f"Download failed with {response.status_code}\nHeaders: {response.headers}\nBody (truncated): {body_sample}")
|
raise RuntimeError(f"Download failed with {response.status_code}\nHeaders: {response.headers}\nBody (truncated): {body_sample}")
|
||||||
@@ -545,7 +767,7 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, scri
|
|||||||
return zip_path, metadata
|
return zip_path, metadata
|
||||||
|
|
||||||
|
|
||||||
def download_and_extract_template(project_path: Path, ai_assistant: str, script_type: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Path:
|
def download_and_extract_template(project_path: Path, ai_assistant: str, script_type: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None, client: httpx.Client = None, debug: bool = False) -> Path:
|
||||||
"""Download the latest release and extract it to create a new project.
|
"""Download the latest release and extract it to create a new project.
|
||||||
Returns project_path. Uses tracker if provided (with keys: fetch, download, extract, cleanup)
|
Returns project_path. Uses tracker if provided (with keys: fetch, download, extract, cleanup)
|
||||||
"""
|
"""
|
||||||
@@ -562,8 +784,7 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, script_
|
|||||||
verbose=verbose and tracker is None,
|
verbose=verbose and tracker is None,
|
||||||
show_progress=(tracker is None),
|
show_progress=(tracker is None),
|
||||||
client=client,
|
client=client,
|
||||||
debug=debug,
|
debug=debug
|
||||||
github_token=github_token
|
|
||||||
)
|
)
|
||||||
if tracker:
|
if tracker:
|
||||||
tracker.complete("fetch", f"release {meta['release']} ({meta['size']:,} bytes)")
|
tracker.complete("fetch", f"release {meta['release']} ({meta['size']:,} bytes)")
|
||||||
@@ -747,64 +968,98 @@ def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None =
|
|||||||
for f in failures:
|
for f in failures:
|
||||||
console.print(f" - {f}")
|
console.print(f" - {f}")
|
||||||
|
|
||||||
|
|
||||||
|
def _create_apm_structure(project_path: Path, project_name: str, ai_assistant: str = "copilot") -> None:
|
||||||
|
"""Create APM structure in the project directory."""
|
||||||
|
# Copy APM template files
|
||||||
|
template_source = Path(__file__).parent.parent.parent / "templates" / "apm" / "hello-world"
|
||||||
|
|
||||||
|
if not template_source.exists():
|
||||||
|
raise FileNotFoundError(f"APM template not found at {template_source}")
|
||||||
|
|
||||||
|
# Copy APM files to project root
|
||||||
|
files_to_copy = [
|
||||||
|
"apm.yml",
|
||||||
|
"hello-world.prompt.md",
|
||||||
|
"feature-implementation.prompt.md",
|
||||||
|
"README.md"
|
||||||
|
]
|
||||||
|
|
||||||
|
for file_name in files_to_copy:
|
||||||
|
src_file = template_source / file_name
|
||||||
|
if src_file.exists():
|
||||||
|
shutil.copy2(src_file, project_path / file_name)
|
||||||
|
|
||||||
|
# Copy .apm directory
|
||||||
|
apm_src = template_source / ".apm"
|
||||||
|
apm_dst = project_path / ".apm"
|
||||||
|
if apm_src.exists():
|
||||||
|
shutil.copytree(apm_src, apm_dst, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
# Update apm.yml with proper template variable replacement
|
||||||
|
apm_yml = project_path / "apm.yml"
|
||||||
|
if apm_yml.exists():
|
||||||
|
content = apm_yml.read_text()
|
||||||
|
|
||||||
|
# Replace template variables with actual values
|
||||||
|
replacements = {
|
||||||
|
"{{project_name}}": project_name,
|
||||||
|
"{{version}}": "1.0.0",
|
||||||
|
"{{description}}": f"AI-native project powered by {ai_assistant}",
|
||||||
|
"{{author}}": "Developer",
|
||||||
|
"hello-world": project_name # Also replace any hello-world references
|
||||||
|
}
|
||||||
|
|
||||||
|
for placeholder, value in replacements.items():
|
||||||
|
content = content.replace(placeholder, value)
|
||||||
|
|
||||||
|
apm_yml.write_text(content)
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
@app.command()
|
||||||
def init(
|
def init(
|
||||||
project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here, or use '.' for current directory)"),
|
project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here)"),
|
||||||
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, copilot, cursor, qwen, opencode, codex, windsurf, kilocode, or auggie"),
|
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, copilot, or cursor"),
|
||||||
script_type: str = typer.Option(None, "--script", help="Script type to use: sh or ps"),
|
script_type: str = typer.Option(None, "--script", help="Script type to use: sh or ps"),
|
||||||
ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"),
|
ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"),
|
||||||
no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"),
|
no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"),
|
||||||
here: bool = typer.Option(False, "--here", help="Initialize project in the current directory instead of creating a new one"),
|
here: bool = typer.Option(False, "--here", help="Initialize project in the current directory instead of creating a new one"),
|
||||||
force: bool = typer.Option(False, "--force", help="Force merge/overwrite when using --here (skip confirmation)"),
|
|
||||||
skip_tls: bool = typer.Option(False, "--skip-tls", help="Skip SSL/TLS verification (not recommended)"),
|
skip_tls: bool = typer.Option(False, "--skip-tls", help="Skip SSL/TLS verification (not recommended)"),
|
||||||
debug: bool = typer.Option(False, "--debug", help="Show verbose diagnostic output for network and extraction failures"),
|
debug: bool = typer.Option(False, "--debug", help="Show verbose diagnostic output for network and extraction failures"),
|
||||||
github_token: str = typer.Option(None, "--github-token", help="GitHub token to use for API requests (or set GH_TOKEN or GITHUB_TOKEN environment variable)"),
|
use_apm: bool = typer.Option(False, "--use-apm", help="Include APM (Agent Package Manager) structure for context management"),
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize a new Specify project from the latest template.
|
Initialize a new Specify project from the latest template.
|
||||||
|
|
||||||
This command will:
|
This command will:
|
||||||
1. Check that required tools are installed (git is optional)
|
1. Check that required tools are installed (git is optional)
|
||||||
2. Let you choose your AI assistant (Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, or Auggie CLI)
|
2. Let you choose your AI assistant (Claude Code, Gemini CLI, GitHub Copilot, or Cursor)
|
||||||
3. Download the appropriate template from GitHub
|
3. Download the appropriate template from GitHub
|
||||||
4. Extract the template to a new project directory or current directory
|
4. Extract the template to a new project directory or current directory
|
||||||
5. Initialize a fresh git repository (if not --no-git and no existing repo)
|
5. Initialize a fresh git repository (if not --no-git and no existing repo)
|
||||||
6. Optionally set up AI assistant commands
|
6. Optionally set up AI assistant commands
|
||||||
|
7. Optionally include APM support (with --use-apm flag)
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
specify init my-project
|
specify init my-project
|
||||||
specify init my-project --ai claude
|
specify init my-project --ai claude
|
||||||
specify init my-project --ai gemini
|
specify init my-project --ai gemini --use-apm
|
||||||
specify init my-project --ai copilot --no-git
|
specify init my-project --ai copilot --no-git
|
||||||
specify init my-project --ai cursor
|
specify init my-project --ai cursor --use-apm
|
||||||
specify init my-project --ai qwen
|
|
||||||
specify init my-project --ai opencode
|
|
||||||
specify init my-project --ai codex
|
|
||||||
specify init my-project --ai windsurf
|
|
||||||
specify init my-project --ai auggie
|
|
||||||
specify init --ignore-agent-tools my-project
|
specify init --ignore-agent-tools my-project
|
||||||
specify init . --ai claude # Initialize in current directory
|
specify init --here --ai claude
|
||||||
specify init . # Initialize in current directory (interactive AI selection)
|
specify init --here --use-apm
|
||||||
specify init --here --ai claude # Alternative syntax for current directory
|
|
||||||
specify init --here --ai codex
|
|
||||||
specify init --here
|
|
||||||
specify init --here --force # Skip confirmation when current directory not empty
|
|
||||||
"""
|
"""
|
||||||
# Show banner first
|
# Show banner first
|
||||||
show_banner()
|
show_banner()
|
||||||
|
|
||||||
# Handle '.' as shorthand for current directory (equivalent to --here)
|
|
||||||
if project_name == ".":
|
|
||||||
here = True
|
|
||||||
project_name = None # Clear project_name to use existing validation logic
|
|
||||||
|
|
||||||
# Validate arguments
|
# Validate arguments
|
||||||
if here and project_name:
|
if here and project_name:
|
||||||
console.print("[red]Error:[/red] Cannot specify both project name and --here flag")
|
console.print("[red]Error:[/red] Cannot specify both project name and --here flag")
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
if not here and not project_name:
|
if not here and not project_name:
|
||||||
console.print("[red]Error:[/red] Must specify either a project name, use '.' for current directory, or use --here flag")
|
console.print("[red]Error:[/red] Must specify either a project name or use --here flag")
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
# Determine project directory
|
# Determine project directory
|
||||||
@@ -817,51 +1072,31 @@ def init(
|
|||||||
if existing_items:
|
if existing_items:
|
||||||
console.print(f"[yellow]Warning:[/yellow] Current directory is not empty ({len(existing_items)} items)")
|
console.print(f"[yellow]Warning:[/yellow] Current directory is not empty ({len(existing_items)} items)")
|
||||||
console.print("[yellow]Template files will be merged with existing content and may overwrite existing files[/yellow]")
|
console.print("[yellow]Template files will be merged with existing content and may overwrite existing files[/yellow]")
|
||||||
if force:
|
|
||||||
console.print("[cyan]--force supplied: skipping confirmation and proceeding with merge[/cyan]")
|
# Ask for confirmation
|
||||||
else:
|
response = typer.confirm("Do you want to continue?")
|
||||||
# Ask for confirmation
|
if not response:
|
||||||
response = typer.confirm("Do you want to continue?")
|
console.print("[yellow]Operation cancelled[/yellow]")
|
||||||
if not response:
|
raise typer.Exit(0)
|
||||||
console.print("[yellow]Operation cancelled[/yellow]")
|
|
||||||
raise typer.Exit(0)
|
|
||||||
else:
|
else:
|
||||||
project_path = Path(project_name).resolve()
|
project_path = Path(project_name).resolve()
|
||||||
# Check if project directory already exists
|
# Check if project directory already exists
|
||||||
if project_path.exists():
|
if project_path.exists():
|
||||||
error_panel = Panel(
|
console.print(f"[red]Error:[/red] Directory '{project_name}' already exists")
|
||||||
f"Directory '[cyan]{project_name}[/cyan]' already exists\n"
|
|
||||||
"Please choose a different project name or remove the existing directory.",
|
|
||||||
title="[red]Directory Conflict[/red]",
|
|
||||||
border_style="red",
|
|
||||||
padding=(1, 2)
|
|
||||||
)
|
|
||||||
console.print()
|
|
||||||
console.print(error_panel)
|
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
# Create formatted setup info with column alignment
|
console.print(Panel.fit(
|
||||||
current_dir = Path.cwd()
|
"[bold cyan]Specify Project Setup[/bold cyan]\n"
|
||||||
|
f"{'Initializing in current directory:' if here else 'Creating new project:'} [green]{project_path.name}[/green]"
|
||||||
setup_lines = [
|
+ (f"\n[dim]Path: {project_path}[/dim]" if here else ""),
|
||||||
"[cyan]Specify Project Setup[/cyan]",
|
border_style="cyan"
|
||||||
"",
|
))
|
||||||
f"{'Project':<15} [green]{project_path.name}[/green]",
|
|
||||||
f"{'Working Path':<15} [dim]{current_dir}[/dim]",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Add target path only if different from working dir
|
|
||||||
if not here:
|
|
||||||
setup_lines.append(f"{'Target Path':<15} [dim]{project_path}[/dim]")
|
|
||||||
|
|
||||||
console.print(Panel("\n".join(setup_lines), border_style="cyan", padding=(1, 2)))
|
|
||||||
|
|
||||||
# Check git only if we might need it (not --no-git)
|
# Check git only if we might need it (not --no-git)
|
||||||
# Only set to True if the user wants it and the tool is available
|
git_available = True
|
||||||
should_init_git = False
|
|
||||||
if not no_git:
|
if not no_git:
|
||||||
should_init_git = check_tool("git", "https://git-scm.com/downloads")
|
git_available = check_tool("git", "https://git-scm.com/downloads")
|
||||||
if not should_init_git:
|
if not git_available:
|
||||||
console.print("[yellow]Git not found - will skip repository initialization[/yellow]")
|
console.print("[yellow]Git not found - will skip repository initialization[/yellow]")
|
||||||
|
|
||||||
# AI assistant selection
|
# AI assistant selection
|
||||||
@@ -881,45 +1116,18 @@ def init(
|
|||||||
# Check agent tools unless ignored
|
# Check agent tools unless ignored
|
||||||
if not ignore_agent_tools:
|
if not ignore_agent_tools:
|
||||||
agent_tool_missing = False
|
agent_tool_missing = False
|
||||||
install_url = ""
|
|
||||||
if selected_ai == "claude":
|
if selected_ai == "claude":
|
||||||
if not check_tool("claude", "https://docs.anthropic.com/en/docs/claude-code/setup"):
|
if not check_tool("claude", "Install from: https://docs.anthropic.com/en/docs/claude-code/setup"):
|
||||||
install_url = "https://docs.anthropic.com/en/docs/claude-code/setup"
|
console.print("[red]Error:[/red] Claude CLI is required for Claude Code projects")
|
||||||
agent_tool_missing = True
|
agent_tool_missing = True
|
||||||
elif selected_ai == "gemini":
|
elif selected_ai == "gemini":
|
||||||
if not check_tool("gemini", "https://github.com/google-gemini/gemini-cli"):
|
if not check_tool("gemini", "Install from: https://github.com/google-gemini/gemini-cli"):
|
||||||
install_url = "https://github.com/google-gemini/gemini-cli"
|
console.print("[red]Error:[/red] Gemini CLI is required for Gemini projects")
|
||||||
agent_tool_missing = True
|
agent_tool_missing = True
|
||||||
elif selected_ai == "qwen":
|
|
||||||
if not check_tool("qwen", "https://github.com/QwenLM/qwen-code"):
|
|
||||||
install_url = "https://github.com/QwenLM/qwen-code"
|
|
||||||
agent_tool_missing = True
|
|
||||||
elif selected_ai == "opencode":
|
|
||||||
if not check_tool("opencode", "https://opencode.ai"):
|
|
||||||
install_url = "https://opencode.ai"
|
|
||||||
agent_tool_missing = True
|
|
||||||
elif selected_ai == "codex":
|
|
||||||
if not check_tool("codex", "https://github.com/openai/codex"):
|
|
||||||
install_url = "https://github.com/openai/codex"
|
|
||||||
agent_tool_missing = True
|
|
||||||
elif selected_ai == "auggie":
|
|
||||||
if not check_tool("auggie", "https://docs.augmentcode.com/cli/setup-auggie/install-auggie-cli"):
|
|
||||||
install_url = "https://docs.augmentcode.com/cli/setup-auggie/install-auggie-cli"
|
|
||||||
agent_tool_missing = True
|
|
||||||
# GitHub Copilot and Cursor checks are not needed as they're typically available in supported IDEs
|
|
||||||
|
|
||||||
if agent_tool_missing:
|
if agent_tool_missing:
|
||||||
error_panel = Panel(
|
console.print("\n[red]Required AI tool is missing![/red]")
|
||||||
f"[cyan]{selected_ai}[/cyan] not found\n"
|
console.print("[yellow]Tip:[/yellow] Use --ignore-agent-tools to skip this check")
|
||||||
f"Install with: [cyan]{install_url}[/cyan]\n"
|
|
||||||
f"{AI_CHOICES[selected_ai]} is required to continue with this project type.\n\n"
|
|
||||||
"Tip: Use [cyan]--ignore-agent-tools[/cyan] to skip this check",
|
|
||||||
title="[red]Agent Detection Error[/red]",
|
|
||||||
border_style="red",
|
|
||||||
padding=(1, 2)
|
|
||||||
)
|
|
||||||
console.print()
|
|
||||||
console.print(error_panel)
|
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
# Determine script type (explicit, interactive, or OS default)
|
# Determine script type (explicit, interactive, or OS default)
|
||||||
@@ -958,6 +1166,7 @@ def init(
|
|||||||
("extract", "Extract template"),
|
("extract", "Extract template"),
|
||||||
("zip-list", "Archive contents"),
|
("zip-list", "Archive contents"),
|
||||||
("extracted-summary", "Extraction summary"),
|
("extracted-summary", "Extraction summary"),
|
||||||
|
("apm", "Create APM structure"),
|
||||||
("chmod", "Ensure scripts executable"),
|
("chmod", "Ensure scripts executable"),
|
||||||
("cleanup", "Cleanup"),
|
("cleanup", "Cleanup"),
|
||||||
("git", "Initialize git repository"),
|
("git", "Initialize git repository"),
|
||||||
@@ -974,7 +1183,18 @@ def init(
|
|||||||
local_ssl_context = ssl_context if verify else False
|
local_ssl_context = ssl_context if verify else False
|
||||||
local_client = httpx.Client(verify=local_ssl_context)
|
local_client = httpx.Client(verify=local_ssl_context)
|
||||||
|
|
||||||
download_and_extract_template(project_path, selected_ai, selected_script, here, verbose=False, tracker=tracker, client=local_client, debug=debug, github_token=github_token)
|
download_and_extract_template(project_path, selected_ai, selected_script, here, verbose=False, tracker=tracker, client=local_client, debug=debug)
|
||||||
|
|
||||||
|
# APM structure creation (conditional)
|
||||||
|
if use_apm:
|
||||||
|
tracker.start("apm", "setting up APM structure")
|
||||||
|
try:
|
||||||
|
_create_apm_structure(project_path, project_path.name, selected_ai)
|
||||||
|
tracker.complete("apm", "APM structure created")
|
||||||
|
except Exception as e:
|
||||||
|
tracker.error("apm", f"APM setup failed: {str(e)}")
|
||||||
|
else:
|
||||||
|
tracker.skip("apm", "APM not requested")
|
||||||
|
|
||||||
# Ensure scripts are executable (POSIX)
|
# Ensure scripts are executable (POSIX)
|
||||||
ensure_executable_scripts(project_path, tracker=tracker)
|
ensure_executable_scripts(project_path, tracker=tracker)
|
||||||
@@ -984,7 +1204,7 @@ def init(
|
|||||||
tracker.start("git")
|
tracker.start("git")
|
||||||
if is_git_repo(project_path):
|
if is_git_repo(project_path):
|
||||||
tracker.complete("git", "existing repo detected")
|
tracker.complete("git", "existing repo detected")
|
||||||
elif should_init_git:
|
elif git_available:
|
||||||
if init_git_repo(project_path, quiet=True):
|
if init_git_repo(project_path, quiet=True):
|
||||||
tracker.complete("git", "initialized")
|
tracker.complete("git", "initialized")
|
||||||
else:
|
else:
|
||||||
@@ -1018,86 +1238,48 @@ def init(
|
|||||||
console.print(tracker.render())
|
console.print(tracker.render())
|
||||||
console.print("\n[bold green]Project ready.[/bold green]")
|
console.print("\n[bold green]Project ready.[/bold green]")
|
||||||
|
|
||||||
# Agent folder security notice
|
|
||||||
agent_folder_map = {
|
|
||||||
"claude": ".claude/",
|
|
||||||
"gemini": ".gemini/",
|
|
||||||
"cursor": ".cursor/",
|
|
||||||
"qwen": ".qwen/",
|
|
||||||
"opencode": ".opencode/",
|
|
||||||
"codex": ".codex/",
|
|
||||||
"windsurf": ".windsurf/",
|
|
||||||
"kilocode": ".kilocode/",
|
|
||||||
"auggie": ".augment/",
|
|
||||||
"copilot": ".github/",
|
|
||||||
"roo": ".roo/"
|
|
||||||
}
|
|
||||||
|
|
||||||
if selected_ai in agent_folder_map:
|
|
||||||
agent_folder = agent_folder_map[selected_ai]
|
|
||||||
security_notice = Panel(
|
|
||||||
f"Some agents may store credentials, auth tokens, or other identifying and private artifacts in the agent folder within your project.\n"
|
|
||||||
f"Consider adding [cyan]{agent_folder}[/cyan] (or parts of it) to [cyan].gitignore[/cyan] to prevent accidental credential leakage.",
|
|
||||||
title="[yellow]Agent Folder Security[/yellow]",
|
|
||||||
border_style="yellow",
|
|
||||||
padding=(1, 2)
|
|
||||||
)
|
|
||||||
console.print()
|
|
||||||
console.print(security_notice)
|
|
||||||
|
|
||||||
# Boxed "Next steps" section
|
# Boxed "Next steps" section
|
||||||
steps_lines = []
|
steps_lines = []
|
||||||
if not here:
|
if not here:
|
||||||
steps_lines.append(f"1. Go to the project folder: [cyan]cd {project_name}[/cyan]")
|
steps_lines.append(f"1. [bold green]cd {project_name}[/bold green]")
|
||||||
step_num = 2
|
step_num = 2
|
||||||
else:
|
else:
|
||||||
steps_lines.append("1. You're already in the project directory!")
|
steps_lines.append("1. You're already in the project directory!")
|
||||||
step_num = 2
|
step_num = 2
|
||||||
|
|
||||||
# Add Codex-specific setup step if needed
|
if selected_ai == "claude":
|
||||||
if selected_ai == "codex":
|
steps_lines.append(f"{step_num}. Open in Visual Studio Code and start using / commands with Claude Code")
|
||||||
codex_path = project_path / ".codex"
|
steps_lines.append(" - Type / in any file to see available commands")
|
||||||
quoted_path = shlex.quote(str(codex_path))
|
steps_lines.append(" - Use /specify to create specifications")
|
||||||
if os.name == "nt": # Windows
|
steps_lines.append(" - Use /plan to create implementation plans")
|
||||||
cmd = f"setx CODEX_HOME {quoted_path}"
|
steps_lines.append(" - Use /tasks to generate tasks")
|
||||||
else: # Unix-like systems
|
elif selected_ai == "gemini":
|
||||||
cmd = f"export CODEX_HOME={quoted_path}"
|
steps_lines.append(f"{step_num}. Use / commands with Gemini CLI")
|
||||||
|
steps_lines.append(" - Run gemini /specify to create specifications")
|
||||||
steps_lines.append(f"{step_num}. Set [cyan]CODEX_HOME[/cyan] environment variable before running Codex: [cyan]{cmd}[/cyan]")
|
steps_lines.append(" - Run gemini /plan to create implementation plans")
|
||||||
|
steps_lines.append(" - Run gemini /tasks to generate tasks")
|
||||||
|
steps_lines.append(" - See GEMINI.md for all available commands")
|
||||||
|
elif selected_ai == "copilot":
|
||||||
|
steps_lines.append(f"{step_num}. Open in Visual Studio Code and use [bold cyan]/specify[/], [bold cyan]/plan[/], [bold cyan]/tasks[/] commands with GitHub Copilot")
|
||||||
|
|
||||||
|
# Removed script variant step (scripts are transparent to users)
|
||||||
|
step_num += 1
|
||||||
|
steps_lines.append(f"{step_num}. Update [bold magenta]CONSTITUTION.md[/bold magenta] with your project's non-negotiable principles")
|
||||||
|
|
||||||
|
# Add APM-specific next steps if APM was enabled
|
||||||
|
if use_apm:
|
||||||
step_num += 1
|
step_num += 1
|
||||||
|
steps_lines.append(f"{step_num}. Use APM commands to manage your project context:")
|
||||||
|
steps_lines.append(" - [bold cyan]specify apm compile[/bold cyan] - Generate AGENTS.md from APM instructions and packages")
|
||||||
|
steps_lines.append(" - [bold cyan]specify apm install[/bold cyan] - Install APM packages")
|
||||||
|
steps_lines.append(" - [bold cyan]specify apm deps list[/bold cyan] - List installed APM packages")
|
||||||
|
|
||||||
steps_lines.append(f"{step_num}. Start using slash commands with your AI agent:")
|
steps_panel = Panel("\n".join(steps_lines), title="Next steps", border_style="cyan", padding=(1,2))
|
||||||
|
console.print() # blank line
|
||||||
steps_lines.append(" 2.1 [cyan]/constitution[/] - Establish project principles")
|
|
||||||
steps_lines.append(" 2.2 [cyan]/specify[/] - Create baseline specification")
|
|
||||||
steps_lines.append(" 2.3 [cyan]/plan[/] - Create implementation plan")
|
|
||||||
steps_lines.append(" 2.4 [cyan]/tasks[/] - Generate actionable tasks")
|
|
||||||
steps_lines.append(" 2.5 [cyan]/implement[/] - Execute implementation")
|
|
||||||
|
|
||||||
steps_panel = Panel("\n".join(steps_lines), title="Next Steps", border_style="cyan", padding=(1,2))
|
|
||||||
console.print()
|
|
||||||
console.print(steps_panel)
|
console.print(steps_panel)
|
||||||
|
|
||||||
|
# Removed farewell line per user request
|
||||||
|
|
||||||
enhancement_lines = [
|
|
||||||
"Optional commands that you can use for your specs [bright_black](improve quality & confidence)[/bright_black]",
|
|
||||||
"",
|
|
||||||
f"○ [cyan]/clarify[/] [bright_black](optional)[/bright_black] - Ask structured questions to de-risk ambiguous areas before planning (run before [cyan]/plan[/] if used)",
|
|
||||||
f"○ [cyan]/analyze[/] [bright_black](optional)[/bright_black] - Cross-artifact consistency & alignment report (after [cyan]/tasks[/], before [cyan]/implement[/])"
|
|
||||||
]
|
|
||||||
enhancements_panel = Panel("\n".join(enhancement_lines), title="Enhancement Commands", border_style="cyan", padding=(1,2))
|
|
||||||
console.print()
|
|
||||||
console.print(enhancements_panel)
|
|
||||||
|
|
||||||
if selected_ai == "codex":
|
|
||||||
warning_text = """[bold yellow]Important Note:[/bold yellow]
|
|
||||||
|
|
||||||
Custom prompts do not yet support arguments in Codex. You may need to manually specify additional project instructions directly in prompt files located in [cyan].codex/prompts/[/cyan].
|
|
||||||
|
|
||||||
For more information, see: [cyan]https://github.com/openai/codex/issues/2890[/cyan]"""
|
|
||||||
|
|
||||||
warning_panel = Panel(warning_text, title="Slash Commands in Codex", border_style="yellow", padding=(1,2))
|
|
||||||
console.print()
|
|
||||||
console.print(warning_panel)
|
|
||||||
|
|
||||||
@app.command()
|
@app.command()
|
||||||
def check():
|
def check():
|
||||||
@@ -1105,41 +1287,36 @@ def check():
|
|||||||
show_banner()
|
show_banner()
|
||||||
console.print("[bold]Checking for installed tools...[/bold]\n")
|
console.print("[bold]Checking for installed tools...[/bold]\n")
|
||||||
|
|
||||||
|
# Create tracker for checking tools
|
||||||
tracker = StepTracker("Check Available Tools")
|
tracker = StepTracker("Check Available Tools")
|
||||||
|
|
||||||
|
# Add all tools we want to check
|
||||||
tracker.add("git", "Git version control")
|
tracker.add("git", "Git version control")
|
||||||
tracker.add("claude", "Claude Code CLI")
|
tracker.add("claude", "Claude Code CLI")
|
||||||
tracker.add("gemini", "Gemini CLI")
|
tracker.add("gemini", "Gemini CLI")
|
||||||
tracker.add("qwen", "Qwen Code CLI")
|
tracker.add("code", "VS Code (for GitHub Copilot)")
|
||||||
tracker.add("code", "Visual Studio Code")
|
tracker.add("cursor-agent", "Cursor IDE agent (optional)")
|
||||||
tracker.add("code-insiders", "Visual Studio Code Insiders")
|
|
||||||
tracker.add("cursor-agent", "Cursor IDE agent")
|
|
||||||
tracker.add("windsurf", "Windsurf IDE")
|
|
||||||
tracker.add("kilocode", "Kilo Code IDE")
|
|
||||||
tracker.add("opencode", "opencode")
|
|
||||||
tracker.add("codex", "Codex CLI")
|
|
||||||
tracker.add("auggie", "Auggie CLI")
|
|
||||||
|
|
||||||
git_ok = check_tool_for_tracker("git", tracker)
|
# Check each tool
|
||||||
claude_ok = check_tool_for_tracker("claude", tracker)
|
git_ok = check_tool_for_tracker("git", "https://git-scm.com/downloads", tracker)
|
||||||
gemini_ok = check_tool_for_tracker("gemini", tracker)
|
claude_ok = check_tool_for_tracker("claude", "https://docs.anthropic.com/en/docs/claude-code/setup", tracker)
|
||||||
qwen_ok = check_tool_for_tracker("qwen", tracker)
|
gemini_ok = check_tool_for_tracker("gemini", "https://github.com/google-gemini/gemini-cli", tracker)
|
||||||
code_ok = check_tool_for_tracker("code", tracker)
|
# Check for VS Code (code or code-insiders)
|
||||||
code_insiders_ok = check_tool_for_tracker("code-insiders", tracker)
|
code_ok = check_tool_for_tracker("code", "https://code.visualstudio.com/", tracker)
|
||||||
cursor_ok = check_tool_for_tracker("cursor-agent", tracker)
|
if not code_ok:
|
||||||
windsurf_ok = check_tool_for_tracker("windsurf", tracker)
|
code_ok = check_tool_for_tracker("code-insiders", "https://code.visualstudio.com/insiders/", tracker)
|
||||||
kilocode_ok = check_tool_for_tracker("kilocode", tracker)
|
cursor_ok = check_tool_for_tracker("cursor-agent", "https://cursor.sh/", tracker)
|
||||||
opencode_ok = check_tool_for_tracker("opencode", tracker)
|
|
||||||
codex_ok = check_tool_for_tracker("codex", tracker)
|
# Render the final tree
|
||||||
auggie_ok = check_tool_for_tracker("auggie", tracker)
|
|
||||||
|
|
||||||
console.print(tracker.render())
|
console.print(tracker.render())
|
||||||
|
|
||||||
|
# Summary
|
||||||
console.print("\n[bold green]Specify CLI is ready to use![/bold green]")
|
console.print("\n[bold green]Specify CLI is ready to use![/bold green]")
|
||||||
|
|
||||||
|
# Recommendations
|
||||||
if not git_ok:
|
if not git_ok:
|
||||||
console.print("[dim]Tip: Install git for repository management[/dim]")
|
console.print("[dim]Tip: Install git for repository management[/dim]")
|
||||||
if not (claude_ok or gemini_ok or cursor_ok or qwen_ok or windsurf_ok or kilocode_ok or opencode_ok or codex_ok or auggie_ok):
|
if not (claude_ok or gemini_ok):
|
||||||
console.print("[dim]Tip: Install an AI assistant for the best experience[/dim]")
|
console.print("[dim]Tip: Install an AI assistant for the best experience[/dim]")
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
9
templates/apm/hello-world/apm.yml
Normal file
9
templates/apm/hello-world/apm.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
name: {{project_name}}
|
||||||
|
version: {{version}}
|
||||||
|
description: {{description}}
|
||||||
|
author: {{author}}
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
apm:
|
||||||
|
# list of APM packages as GitHub repositories: <owner>/<repo>
|
||||||
|
# - github/design-guidelines
|
||||||
@@ -1,104 +0,0 @@
|
|||||||
---
|
|
||||||
description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
|
|
||||||
scripts:
|
|
||||||
sh: scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks
|
|
||||||
ps: scripts/powershell/check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks
|
|
||||||
---
|
|
||||||
|
|
||||||
The user input to you can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty).
|
|
||||||
|
|
||||||
User input:
|
|
||||||
|
|
||||||
$ARGUMENTS
|
|
||||||
|
|
||||||
Goal: Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`.
|
|
||||||
|
|
||||||
STRICTLY READ-ONLY: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
|
||||||
|
|
||||||
Constitution Authority: The project constitution (`/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`.
|
|
||||||
|
|
||||||
Execution steps:
|
|
||||||
|
|
||||||
1. Run `{SCRIPT}` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths:
|
|
||||||
- SPEC = FEATURE_DIR/spec.md
|
|
||||||
- PLAN = FEATURE_DIR/plan.md
|
|
||||||
- TASKS = FEATURE_DIR/tasks.md
|
|
||||||
Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command).
|
|
||||||
|
|
||||||
2. Load artifacts:
|
|
||||||
- Parse spec.md sections: Overview/Context, Functional Requirements, Non-Functional Requirements, User Stories, Edge Cases (if present).
|
|
||||||
- Parse plan.md: Architecture/stack choices, Data Model references, Phases, Technical constraints.
|
|
||||||
- Parse tasks.md: Task IDs, descriptions, phase grouping, parallel markers [P], referenced file paths.
|
|
||||||
- Load constitution `/memory/constitution.md` for principle validation.
|
|
||||||
|
|
||||||
3. Build internal semantic models:
|
|
||||||
- Requirements inventory: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" -> `user-can-upload-file`).
|
|
||||||
- User story/action inventory.
|
|
||||||
- Task coverage mapping: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases).
|
|
||||||
- Constitution rule set: Extract principle names and any MUST/SHOULD normative statements.
|
|
||||||
|
|
||||||
4. Detection passes:
|
|
||||||
A. Duplication detection:
|
|
||||||
- Identify near-duplicate requirements. Mark lower-quality phrasing for consolidation.
|
|
||||||
B. Ambiguity detection:
|
|
||||||
- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria.
|
|
||||||
- Flag unresolved placeholders (TODO, TKTK, ???, <placeholder>, etc.).
|
|
||||||
C. Underspecification:
|
|
||||||
- Requirements with verbs but missing object or measurable outcome.
|
|
||||||
- User stories missing acceptance criteria alignment.
|
|
||||||
- Tasks referencing files or components not defined in spec/plan.
|
|
||||||
D. Constitution alignment:
|
|
||||||
- Any requirement or plan element conflicting with a MUST principle.
|
|
||||||
- Missing mandated sections or quality gates from constitution.
|
|
||||||
E. Coverage gaps:
|
|
||||||
- Requirements with zero associated tasks.
|
|
||||||
- Tasks with no mapped requirement/story.
|
|
||||||
- Non-functional requirements not reflected in tasks (e.g., performance, security).
|
|
||||||
F. Inconsistency:
|
|
||||||
- Terminology drift (same concept named differently across files).
|
|
||||||
- Data entities referenced in plan but absent in spec (or vice versa).
|
|
||||||
- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note).
|
|
||||||
- Conflicting requirements (e.g., one requires to use Next.js while other says to use Vue as the framework).
|
|
||||||
|
|
||||||
5. Severity assignment heuristic:
|
|
||||||
- CRITICAL: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality.
|
|
||||||
- HIGH: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion.
|
|
||||||
- MEDIUM: Terminology drift, missing non-functional task coverage, underspecified edge case.
|
|
||||||
- LOW: Style/wording improvements, minor redundancy not affecting execution order.
|
|
||||||
|
|
||||||
6. Produce a Markdown report (no file writes) with sections:
|
|
||||||
|
|
||||||
### Specification Analysis Report
|
|
||||||
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|
|
||||||
|----|----------|----------|-------------|---------|----------------|
|
|
||||||
| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version |
|
|
||||||
(Add one row per finding; generate stable IDs prefixed by category initial.)
|
|
||||||
|
|
||||||
Additional subsections:
|
|
||||||
- Coverage Summary Table:
|
|
||||||
| Requirement Key | Has Task? | Task IDs | Notes |
|
|
||||||
- Constitution Alignment Issues (if any)
|
|
||||||
- Unmapped Tasks (if any)
|
|
||||||
- Metrics:
|
|
||||||
* Total Requirements
|
|
||||||
* Total Tasks
|
|
||||||
* Coverage % (requirements with >=1 task)
|
|
||||||
* Ambiguity Count
|
|
||||||
* Duplication Count
|
|
||||||
* Critical Issues Count
|
|
||||||
|
|
||||||
7. At end of report, output a concise Next Actions block:
|
|
||||||
- If CRITICAL issues exist: Recommend resolving before `/implement`.
|
|
||||||
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions.
|
|
||||||
- Provide explicit command suggestions: e.g., "Run /specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'".
|
|
||||||
|
|
||||||
8. Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.)
|
|
||||||
|
|
||||||
Behavior rules:
|
|
||||||
- NEVER modify files.
|
|
||||||
- NEVER hallucinate missing sections—if absent, report them.
|
|
||||||
- KEEP findings deterministic: if rerun without changes, produce consistent IDs and counts.
|
|
||||||
- LIMIT total findings in the main table to 50; aggregate remainder in a summarized overflow note.
|
|
||||||
- If zero issues found, emit a success report with coverage statistics and proceed recommendation.
|
|
||||||
|
|
||||||
Context: {ARGS}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
---
|
|
||||||
description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
|
|
||||||
scripts:
|
|
||||||
sh: scripts/bash/check-prerequisites.sh --json --paths-only
|
|
||||||
ps: scripts/powershell/check-prerequisites.ps1 -Json -PathsOnly
|
|
||||||
---
|
|
||||||
|
|
||||||
The user input to you can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty).
|
|
||||||
|
|
||||||
User input:
|
|
||||||
|
|
||||||
$ARGUMENTS
|
|
||||||
|
|
||||||
Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file.
|
|
||||||
|
|
||||||
Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases.
|
|
||||||
|
|
||||||
Execution steps:
|
|
||||||
|
|
||||||
1. Run `{SCRIPT}` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields:
|
|
||||||
- `FEATURE_DIR`
|
|
||||||
- `FEATURE_SPEC`
|
|
||||||
- (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.)
|
|
||||||
- If JSON parsing fails, abort and instruct user to re-run `/specify` or verify feature branch environment.
|
|
||||||
|
|
||||||
2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked).
|
|
||||||
|
|
||||||
Functional Scope & Behavior:
|
|
||||||
- Core user goals & success criteria
|
|
||||||
- Explicit out-of-scope declarations
|
|
||||||
- User roles / personas differentiation
|
|
||||||
|
|
||||||
Domain & Data Model:
|
|
||||||
- Entities, attributes, relationships
|
|
||||||
- Identity & uniqueness rules
|
|
||||||
- Lifecycle/state transitions
|
|
||||||
- Data volume / scale assumptions
|
|
||||||
|
|
||||||
Interaction & UX Flow:
|
|
||||||
- Critical user journeys / sequences
|
|
||||||
- Error/empty/loading states
|
|
||||||
- Accessibility or localization notes
|
|
||||||
|
|
||||||
Non-Functional Quality Attributes:
|
|
||||||
- Performance (latency, throughput targets)
|
|
||||||
- Scalability (horizontal/vertical, limits)
|
|
||||||
- Reliability & availability (uptime, recovery expectations)
|
|
||||||
- Observability (logging, metrics, tracing signals)
|
|
||||||
- Security & privacy (authN/Z, data protection, threat assumptions)
|
|
||||||
- Compliance / regulatory constraints (if any)
|
|
||||||
|
|
||||||
Integration & External Dependencies:
|
|
||||||
- External services/APIs and failure modes
|
|
||||||
- Data import/export formats
|
|
||||||
- Protocol/versioning assumptions
|
|
||||||
|
|
||||||
Edge Cases & Failure Handling:
|
|
||||||
- Negative scenarios
|
|
||||||
- Rate limiting / throttling
|
|
||||||
- Conflict resolution (e.g., concurrent edits)
|
|
||||||
|
|
||||||
Constraints & Tradeoffs:
|
|
||||||
- Technical constraints (language, storage, hosting)
|
|
||||||
- Explicit tradeoffs or rejected alternatives
|
|
||||||
|
|
||||||
Terminology & Consistency:
|
|
||||||
- Canonical glossary terms
|
|
||||||
- Avoided synonyms / deprecated terms
|
|
||||||
|
|
||||||
Completion Signals:
|
|
||||||
- Acceptance criteria testability
|
|
||||||
- Measurable Definition of Done style indicators
|
|
||||||
|
|
||||||
Misc / Placeholders:
|
|
||||||
- TODO markers / unresolved decisions
|
|
||||||
- Ambiguous adjectives ("robust", "intuitive") lacking quantification
|
|
||||||
|
|
||||||
For each category with Partial or Missing status, add a candidate question opportunity unless:
|
|
||||||
- Clarification would not materially change implementation or validation strategy
|
|
||||||
- Information is better deferred to planning phase (note internally)
|
|
||||||
|
|
||||||
3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
|
|
||||||
- Maximum of 5 total questions across the whole session.
|
|
||||||
- Each question must be answerable with EITHER:
|
|
||||||
* A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR
|
|
||||||
* A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words").
|
|
||||||
- Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation.
|
|
||||||
- Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved.
|
|
||||||
- Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness).
|
|
||||||
- Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests.
|
|
||||||
- If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic.
|
|
||||||
|
|
||||||
4. Sequential questioning loop (interactive):
|
|
||||||
- Present EXACTLY ONE question at a time.
|
|
||||||
- For multiple‑choice questions render options as a Markdown table:
|
|
||||||
|
|
||||||
| Option | Description |
|
|
||||||
|--------|-------------|
|
|
||||||
| A | <Option A description> |
|
|
||||||
| B | <Option B description> |
|
|
||||||
| C | <Option C description> | (add D/E as needed up to 5)
|
|
||||||
| Short | Provide a different short answer (<=5 words) | (Include only if free-form alternative is appropriate)
|
|
||||||
|
|
||||||
- For short‑answer style (no meaningful discrete options), output a single line after the question: `Format: Short answer (<=5 words)`.
|
|
||||||
- After the user answers:
|
|
||||||
* Validate the answer maps to one option or fits the <=5 word constraint.
|
|
||||||
* If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance).
|
|
||||||
* Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question.
|
|
||||||
- Stop asking further questions when:
|
|
||||||
* All critical ambiguities resolved early (remaining queued items become unnecessary), OR
|
|
||||||
* User signals completion ("done", "good", "no more"), OR
|
|
||||||
* You reach 5 asked questions.
|
|
||||||
- Never reveal future queued questions in advance.
|
|
||||||
- If no valid questions exist at start, immediately report no critical ambiguities.
|
|
||||||
|
|
||||||
5. Integration after EACH accepted answer (incremental update approach):
|
|
||||||
- Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents.
|
|
||||||
- For the first integrated answer in this session:
|
|
||||||
* Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing).
|
|
||||||
* Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today.
|
|
||||||
- Append a bullet line immediately after acceptance: `- Q: <question> → A: <final answer>`.
|
|
||||||
- Then immediately apply the clarification to the most appropriate section(s):
|
|
||||||
* Functional ambiguity → Update or add a bullet in Functional Requirements.
|
|
||||||
* User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
|
|
||||||
* Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
|
|
||||||
* Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
|
|
||||||
* Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
|
|
||||||
* Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
|
|
||||||
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
|
|
||||||
- Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite).
|
|
||||||
- Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact.
|
|
||||||
- Keep each inserted clarification minimal and testable (avoid narrative drift).
|
|
||||||
|
|
||||||
6. Validation (performed after EACH write plus final pass):
|
|
||||||
- Clarifications session contains exactly one bullet per accepted answer (no duplicates).
|
|
||||||
- Total asked (accepted) questions ≤ 5.
|
|
||||||
- Updated sections contain no lingering vague placeholders the new answer was meant to resolve.
|
|
||||||
- No contradictory earlier statement remains (scan for now-invalid alternative choices removed).
|
|
||||||
- Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`.
|
|
||||||
- Terminology consistency: same canonical term used across all updated sections.
|
|
||||||
|
|
||||||
7. Write the updated spec back to `FEATURE_SPEC`.
|
|
||||||
|
|
||||||
8. Report completion (after questioning loop ends or early termination):
|
|
||||||
- Number of questions asked & answered.
|
|
||||||
- Path to updated spec.
|
|
||||||
- Sections touched (list names).
|
|
||||||
- Coverage summary table listing each taxonomy category with Status: Resolved (was Partial/Missing and addressed), Deferred (exceeds question quota or better suited for planning), Clear (already sufficient), Outstanding (still Partial/Missing but low impact).
|
|
||||||
- If any Outstanding or Deferred remain, recommend whether to proceed to `/plan` or run `/clarify` again later post-plan.
|
|
||||||
- Suggested next command.
|
|
||||||
|
|
||||||
Behavior rules:
|
|
||||||
- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding.
|
|
||||||
- If spec file missing, instruct user to run `/specify` first (do not create a new spec here).
|
|
||||||
- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions).
|
|
||||||
- Avoid speculative tech stack questions unless the absence blocks functional clarity.
|
|
||||||
- Respect user early termination signals ("stop", "done", "proceed").
|
|
||||||
- If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing.
|
|
||||||
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
|
||||||
|
|
||||||
Context for prioritization: {ARGS}
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
---
|
|
||||||
description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
|
|
||||||
---
|
|
||||||
|
|
||||||
The user input to you can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty).
|
|
||||||
|
|
||||||
User input:
|
|
||||||
|
|
||||||
$ARGUMENTS
|
|
||||||
|
|
||||||
You are updating the project constitution at `/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
|
|
||||||
|
|
||||||
Follow this execution flow:
|
|
||||||
|
|
||||||
1. Load the existing constitution template at `/memory/constitution.md`.
|
|
||||||
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
|
|
||||||
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
|
|
||||||
|
|
||||||
2. Collect/derive values for placeholders:
|
|
||||||
- If user input (conversation) supplies a value, use it.
|
|
||||||
- Otherwise infer from existing repo context (README, docs, prior constitution versions if embedded).
|
|
||||||
- For governance dates: `RATIFICATION_DATE` is the original adoption date (if unknown ask or mark TODO), `LAST_AMENDED_DATE` is today if changes are made, otherwise keep previous.
|
|
||||||
- `CONSTITUTION_VERSION` must increment according to semantic versioning rules:
|
|
||||||
* MAJOR: Backward incompatible governance/principle removals or redefinitions.
|
|
||||||
* MINOR: New principle/section added or materially expanded guidance.
|
|
||||||
* PATCH: Clarifications, wording, typo fixes, non-semantic refinements.
|
|
||||||
- If version bump type ambiguous, propose reasoning before finalizing.
|
|
||||||
|
|
||||||
3. Draft the updated constitution content:
|
|
||||||
- Replace every placeholder with concrete text (no bracketed tokens left except intentionally retained template slots that the project has chosen not to define yet—explicitly justify any left).
|
|
||||||
- Preserve heading hierarchy and comments can be removed once replaced unless they still add clarifying guidance.
|
|
||||||
- Ensure each Principle section: succinct name line, paragraph (or bullet list) capturing non‑negotiable rules, explicit rationale if not obvious.
|
|
||||||
- Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
|
|
||||||
|
|
||||||
4. Consistency propagation checklist (convert prior checklist into active validations):
|
|
||||||
- Read `/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
|
|
||||||
- Read `/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
|
|
||||||
- Read `/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
|
|
||||||
- Read each command file in `/templates/commands/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
|
|
||||||
- Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
|
|
||||||
|
|
||||||
5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
|
|
||||||
- Version change: old → new
|
|
||||||
- List of modified principles (old title → new title if renamed)
|
|
||||||
- Added sections
|
|
||||||
- Removed sections
|
|
||||||
- Templates requiring updates (✅ updated / ⚠ pending) with file paths
|
|
||||||
- Follow-up TODOs if any placeholders intentionally deferred.
|
|
||||||
|
|
||||||
6. Validation before final output:
|
|
||||||
- No remaining unexplained bracket tokens.
|
|
||||||
- Version line matches report.
|
|
||||||
- Dates ISO format YYYY-MM-DD.
|
|
||||||
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
|
|
||||||
|
|
||||||
7. Write the completed constitution back to `/memory/constitution.md` (overwrite).
|
|
||||||
|
|
||||||
8. Output a final summary to the user with:
|
|
||||||
- New version and bump rationale.
|
|
||||||
- Any files flagged for manual follow-up.
|
|
||||||
- Suggested commit message (e.g., `docs: amend constitution to vX.Y.Z (principle additions + governance update)`).
|
|
||||||
|
|
||||||
Formatting & Style Requirements:
|
|
||||||
- Use Markdown headings exactly as in the template (do not demote/promote levels).
|
|
||||||
- Wrap long rationale lines to keep readability (<100 chars ideally) but do not hard enforce with awkward breaks.
|
|
||||||
- Keep a single blank line between sections.
|
|
||||||
- Avoid trailing whitespace.
|
|
||||||
|
|
||||||
If the user supplies partial updates (e.g., only one principle revision), still perform validation and version decision steps.
|
|
||||||
|
|
||||||
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
|
||||||
|
|
||||||
Do not create a new template; always operate on the existing `/memory/constitution.md` file.
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
---
|
|
||||||
description: Execute the implementation plan by processing and executing all tasks defined in tasks.md
|
|
||||||
scripts:
|
|
||||||
sh: scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks
|
|
||||||
ps: scripts/powershell/check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks
|
|
||||||
---
|
|
||||||
|
|
||||||
The user input can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty).
|
|
||||||
|
|
||||||
User input:
|
|
||||||
|
|
||||||
$ARGUMENTS
|
|
||||||
|
|
||||||
1. Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute.
|
|
||||||
|
|
||||||
2. Load and analyze the implementation context:
|
|
||||||
- **REQUIRED**: Read tasks.md for the complete task list and execution plan
|
|
||||||
- **REQUIRED**: Read plan.md for tech stack, architecture, and file structure
|
|
||||||
- **IF EXISTS**: Read data-model.md for entities and relationships
|
|
||||||
- **IF EXISTS**: Read contracts/ for API specifications and test requirements
|
|
||||||
- **IF EXISTS**: Read research.md for technical decisions and constraints
|
|
||||||
- **IF EXISTS**: Read quickstart.md for integration scenarios
|
|
||||||
|
|
||||||
3. Parse tasks.md structure and extract:
|
|
||||||
- **Task phases**: Setup, Tests, Core, Integration, Polish
|
|
||||||
- **Task dependencies**: Sequential vs parallel execution rules
|
|
||||||
- **Task details**: ID, description, file paths, parallel markers [P]
|
|
||||||
- **Execution flow**: Order and dependency requirements
|
|
||||||
|
|
||||||
4. Execute implementation following the task plan:
|
|
||||||
- **Phase-by-phase execution**: Complete each phase before moving to the next
|
|
||||||
- **Respect dependencies**: Run sequential tasks in order, parallel tasks [P] can run together
|
|
||||||
- **Follow TDD approach**: Execute test tasks before their corresponding implementation tasks
|
|
||||||
- **File-based coordination**: Tasks affecting the same files must run sequentially
|
|
||||||
- **Validation checkpoints**: Verify each phase completion before proceeding
|
|
||||||
|
|
||||||
5. Implementation execution rules:
|
|
||||||
- **Setup first**: Initialize project structure, dependencies, configuration
|
|
||||||
- **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios
|
|
||||||
- **Core development**: Implement models, services, CLI commands, endpoints
|
|
||||||
- **Integration work**: Database connections, middleware, logging, external services
|
|
||||||
- **Polish and validation**: Unit tests, performance optimization, documentation
|
|
||||||
|
|
||||||
6. Progress tracking and error handling:
|
|
||||||
- Report progress after each completed task
|
|
||||||
- Halt execution if any non-parallel task fails
|
|
||||||
- For parallel tasks [P], continue with successful tasks, report failed ones
|
|
||||||
- Provide clear error messages with context for debugging
|
|
||||||
- Suggest next steps if implementation cannot proceed
|
|
||||||
- **IMPORTANT** For completed tasks, make sure to mark the task off as [X] in the tasks file.
|
|
||||||
|
|
||||||
7. Completion validation:
|
|
||||||
- Verify all required tasks are completed
|
|
||||||
- Check that implemented features match the original specification
|
|
||||||
- Validate that tests pass and coverage meets requirements
|
|
||||||
- Confirm the implementation follows the technical plan
|
|
||||||
- Report final status with summary of completed work
|
|
||||||
|
|
||||||
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/tasks` first to regenerate the task list.
|
|
||||||
@@ -5,16 +5,9 @@ scripts:
|
|||||||
ps: scripts/powershell/setup-plan.ps1 -Json
|
ps: scripts/powershell/setup-plan.ps1 -Json
|
||||||
---
|
---
|
||||||
|
|
||||||
The user input to you can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty).
|
|
||||||
|
|
||||||
User input:
|
|
||||||
|
|
||||||
$ARGUMENTS
|
|
||||||
|
|
||||||
Given the implementation details provided as an argument, do this:
|
Given the implementation details provided as an argument, do this:
|
||||||
|
|
||||||
1. Run `{SCRIPT}` from the repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. All future file paths must be absolute.
|
1. Run `{SCRIPT}` from the repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. All future file paths must be absolute.
|
||||||
- BEFORE proceeding, inspect FEATURE_SPEC for a `## Clarifications` section with at least one `Session` subheading. If missing or clearly ambiguous areas remain (vague adjectives, unresolved critical choices), PAUSE and instruct the user to run `/clarify` first to reduce rework. Only continue if: (a) Clarifications exist OR (b) an explicit user override is provided (e.g., "proceed without clarification"). Do not attempt to fabricate clarifications yourself.
|
|
||||||
2. Read and analyze the feature specification to understand:
|
2. Read and analyze the feature specification to understand:
|
||||||
- The feature requirements and user stories
|
- The feature requirements and user stories
|
||||||
- Functional and non-functional requirements
|
- Functional and non-functional requirements
|
||||||
@@ -26,7 +19,7 @@ Given the implementation details provided as an argument, do this:
|
|||||||
4. Execute the implementation plan template:
|
4. Execute the implementation plan template:
|
||||||
- Load `/templates/plan-template.md` (already copied to IMPL_PLAN path)
|
- Load `/templates/plan-template.md` (already copied to IMPL_PLAN path)
|
||||||
- Set Input path to FEATURE_SPEC
|
- Set Input path to FEATURE_SPEC
|
||||||
- Run the Execution Flow (main) function steps 1-9
|
- Run the Execution Flow (main) function steps 1-10
|
||||||
- The template is self-contained and executable
|
- The template is self-contained and executable
|
||||||
- Follow error handling and gate checks as specified
|
- Follow error handling and gate checks as specified
|
||||||
- Let the template guide artifact generation in $SPECS_DIR:
|
- Let the template guide artifact generation in $SPECS_DIR:
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user