mirror of
https://github.com/github/spec-kit.git
synced 2026-03-17 02:43:08 +00:00
Compare commits
32 Commits
v0.1.10
...
5c0bedb410
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c0bedb410 | ||
|
|
d92798d5b0 | ||
|
|
ee922cbde9 | ||
|
|
1df24f1953 | ||
|
|
3033834d64 | ||
|
|
4b00078907 | ||
|
|
2d72f85790 | ||
|
|
855ac838b8 | ||
|
|
a8ec87e3c2 | ||
|
|
9d6c05ad5b | ||
|
|
3ef12cae3e | ||
|
|
8618d0a53e | ||
|
|
71e6b4da4a | ||
|
|
ad74334a85 | ||
|
|
8c3982d65b | ||
|
|
13dec1de05 | ||
|
|
d0a112c60f | ||
|
|
c84756b7f3 | ||
|
|
524affca79 | ||
|
|
32c6e7f40c | ||
|
|
9cf33e81cc | ||
|
|
254e9bbdf0 | ||
|
|
6757c90dbd | ||
|
|
f6264d4ef4 | ||
|
|
dd8dbf6344 | ||
|
|
bf8fb125ad | ||
|
|
2b92ab444d | ||
|
|
abe1b7085c | ||
|
|
bfaca2c449 | ||
|
|
78ed453e38 | ||
|
|
658ab2a38c | ||
|
|
2c41d3627e |
@@ -50,8 +50,6 @@
|
||||
"kilocode.Kilo-Code",
|
||||
// Roo Code
|
||||
"RooVeterinaryInc.roo-cline",
|
||||
// Amazon Developer Q
|
||||
"AmazonWebServices.amazon-q-vscode",
|
||||
// Claude Code
|
||||
"anthropic.claude-code"
|
||||
],
|
||||
|
||||
@@ -51,32 +51,33 @@ echo -e "\n🤖 Installing OpenCode CLI..."
|
||||
run_command "npm install -g opencode-ai@latest"
|
||||
echo "✅ Done"
|
||||
|
||||
echo -e "\n🤖 Installing Amazon Q CLI..."
|
||||
# 👉🏾 https://docs.aws.amazon.com/amazonq/latest/qdeveloper-ug/command-line-verify-download.html
|
||||
echo -e "\n🤖 Installing Kiro CLI..."
|
||||
# https://kiro.dev/docs/cli/
|
||||
KIRO_INSTALLER_URL="https://cli.kiro.dev/install"
|
||||
KIRO_INSTALLER_SHA256="7487a65cf310b7fb59b357c4b5e6e3f3259d383f4394ecedb39acf70f307cffb"
|
||||
KIRO_INSTALLER_PATH="$(mktemp)"
|
||||
|
||||
run_command "curl --proto '=https' --tlsv1.2 -sSf 'https://desktop-release.q.us-east-1.amazonaws.com/latest/q-x86_64-linux.zip' -o 'q.zip'"
|
||||
run_command "curl --proto '=https' --tlsv1.2 -sSf 'https://desktop-release.q.us-east-1.amazonaws.com/latest/q-x86_64-linux.zip.sig' -o 'q.zip.sig'"
|
||||
cat > amazonq-public-key.asc << 'EOF'
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
cleanup_kiro_installer() {
|
||||
rm -f "$KIRO_INSTALLER_PATH"
|
||||
}
|
||||
trap cleanup_kiro_installer EXIT
|
||||
|
||||
mDMEZig60RYJKwYBBAHaRw8BAQdAy/+G05U5/EOA72WlcD4WkYn5SInri8pc4Z6D
|
||||
BKNNGOm0JEFtYXpvbiBRIENMSSBUZWFtIDxxLWNsaUBhbWF6b24uY29tPoiZBBMW
|
||||
CgBBFiEEmvYEF+gnQskUPgPsUNx6jcJMVmcFAmYoOtECGwMFCQPCZwAFCwkIBwIC
|
||||
IgIGFQoJCAsCBBYCAwECHgcCF4AACgkQUNx6jcJMVmef5QD/QWWEGG/cOnbDnp68
|
||||
SJXuFkwiNwlH2rPw9ZRIQMnfAS0A/0V6ZsGB4kOylBfc7CNfzRFGtovdBBgHqA6P
|
||||
zQ/PNscGuDgEZig60RIKKwYBBAGXVQEFAQEHQC4qleONMBCq3+wJwbZSr0vbuRba
|
||||
D1xr4wUPn4Avn4AnAwEIB4h+BBgWCgAmFiEEmvYEF+gnQskUPgPsUNx6jcJMVmcF
|
||||
AmYoOtECGwwFCQPCZwAACgkQUNx6jcJMVmchMgEA6l3RveCM0YHAGQaSFMkguoAo
|
||||
vK6FgOkDawgP0NPIP2oA/jIAO4gsAntuQgMOsPunEdDeji2t+AhV02+DQIsXZpoB
|
||||
=f8yY
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
run_command "gpg --batch --import amazonq-public-key.asc"
|
||||
run_command "gpg --verify q.zip.sig q.zip"
|
||||
run_command "unzip -q q.zip"
|
||||
run_command "chmod +x ./q/install.sh"
|
||||
run_command "./q/install.sh --no-confirm"
|
||||
run_command "rm -rf ./q q.zip q.zip.sig amazonq-public-key.asc"
|
||||
run_command "curl -fsSL \"$KIRO_INSTALLER_URL\" -o \"$KIRO_INSTALLER_PATH\""
|
||||
run_command "echo \"$KIRO_INSTALLER_SHA256 $KIRO_INSTALLER_PATH\" | sha256sum -c -"
|
||||
|
||||
run_command "bash \"$KIRO_INSTALLER_PATH\""
|
||||
|
||||
kiro_binary=""
|
||||
if command -v kiro-cli >/dev/null 2>&1; then
|
||||
kiro_binary="kiro-cli"
|
||||
elif command -v kiro >/dev/null 2>&1; then
|
||||
kiro_binary="kiro"
|
||||
else
|
||||
echo -e "\033[0;31m[ERROR] Kiro CLI installation did not create 'kiro-cli' or 'kiro' in PATH.\033[0m" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
run_command "$kiro_binary --help > /dev/null"
|
||||
echo "✅ Done"
|
||||
|
||||
echo -e "\n🤖 Installing CodeBuddy CLI..."
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/agent_request.yml
vendored
2
.github/ISSUE_TEMPLATE/agent_request.yml
vendored
@@ -8,7 +8,7 @@ body:
|
||||
value: |
|
||||
Thanks for requesting a new agent! Before submitting, please check if the agent is already supported.
|
||||
|
||||
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Amazon Q Developer CLI, Amp, SHAI, IBM Bob, Antigravity
|
||||
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, IBM Bob, Antigravity
|
||||
|
||||
- type: input
|
||||
id: agent-name
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -75,7 +75,7 @@ body:
|
||||
- Roo Code
|
||||
- CodeBuddy
|
||||
- Qoder CLI
|
||||
- Amazon Q Developer CLI
|
||||
- Kiro CLI
|
||||
- Amp
|
||||
- SHAI
|
||||
- IBM Bob
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -69,7 +69,7 @@ body:
|
||||
- Roo Code
|
||||
- CodeBuddy
|
||||
- Qoder CLI
|
||||
- Amazon Q Developer CLI
|
||||
- Kiro CLI
|
||||
- Amp
|
||||
- SHAI
|
||||
- IBM Bob
|
||||
|
||||
191
.github/workflows/RELEASE-PROCESS.md
vendored
Normal file
191
.github/workflows/RELEASE-PROCESS.md
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
# Release Process
|
||||
|
||||
This document describes the automated release process for Spec Kit.
|
||||
|
||||
## Overview
|
||||
|
||||
The release process is split into two workflows to ensure version consistency:
|
||||
|
||||
1. **Release Trigger Workflow** (`release-trigger.yml`) - Manages versioning and triggers release
|
||||
2. **Release Workflow** (`release.yml`) - Builds and publishes artifacts
|
||||
|
||||
This separation ensures that git tags always point to commits with the correct version in `pyproject.toml`.
|
||||
|
||||
## Before Creating a Release
|
||||
|
||||
**Important**: Write clear, descriptive commit messages!
|
||||
|
||||
### How CHANGELOG.md Works
|
||||
|
||||
The CHANGELOG is **automatically generated** from your git commit messages:
|
||||
|
||||
1. **During Development**: Write clear, descriptive commit messages:
|
||||
```bash
|
||||
git commit -m "feat: Add new authentication feature"
|
||||
git commit -m "fix: Resolve timeout issue in API client (#123)"
|
||||
git commit -m "docs: Update installation instructions"
|
||||
```
|
||||
|
||||
2. **When Releasing**: The release trigger workflow automatically:
|
||||
- Finds all commits since the last release tag
|
||||
- Formats them as changelog entries
|
||||
- Inserts them into CHANGELOG.md
|
||||
- Commits the updated changelog before creating the new tag
|
||||
|
||||
### Commit Message Best Practices
|
||||
|
||||
Good commit messages make good changelogs:
|
||||
- **Be descriptive**: "Add user authentication" not "Update files"
|
||||
- **Reference issues/PRs**: Include `(#123)` for automated linking
|
||||
- **Use conventional commits** (optional): `feat:`, `fix:`, `docs:`, `chore:`
|
||||
- **Keep it concise**: One line is ideal, details go in commit body
|
||||
|
||||
**Example commits that become good changelog entries:**
|
||||
```
|
||||
fix: prepend YAML frontmatter to Cursor .mdc files (#1699)
|
||||
feat: add generic agent support with customizable command directories (#1639)
|
||||
docs: document dual-catalog system for extensions (#1689)
|
||||
```
|
||||
|
||||
## Creating a Release
|
||||
|
||||
### Option 1: Auto-Increment (Recommended for patches)
|
||||
|
||||
1. Go to **Actions** → **Release Trigger**
|
||||
2. Click **Run workflow**
|
||||
3. Leave the version field **empty**
|
||||
4. Click **Run workflow**
|
||||
|
||||
The workflow will:
|
||||
- Auto-increment the patch version (e.g., `0.1.10` → `0.1.11`)
|
||||
- Update `pyproject.toml`
|
||||
- Update `CHANGELOG.md` by adding a new section for the release based on commits since the last tag
|
||||
- Commit changes to a `chore/release-vX.Y.Z` branch
|
||||
- Create and push the git tag from that branch
|
||||
- Open a PR to merge the version bump into `main`
|
||||
- Trigger the release workflow automatically via the tag push
|
||||
|
||||
### Option 2: Manual Version (For major/minor bumps)
|
||||
|
||||
1. Go to **Actions** → **Release Trigger**
|
||||
2. Click **Run workflow**
|
||||
3. Enter the desired version (e.g., `0.2.0` or `v0.2.0`)
|
||||
4. Click **Run workflow**
|
||||
|
||||
The workflow will:
|
||||
- Use your specified version
|
||||
- Update `pyproject.toml`
|
||||
- Update `CHANGELOG.md` by adding a new section for the release based on commits since the last tag
|
||||
- Commit changes to a `chore/release-vX.Y.Z` branch
|
||||
- Create and push the git tag from that branch
|
||||
- Open a PR to merge the version bump into `main`
|
||||
- Trigger the release workflow automatically via the tag push
|
||||
|
||||
## What Happens Next
|
||||
|
||||
Once the release trigger workflow completes:
|
||||
|
||||
1. A `chore/release-vX.Y.Z` branch is pushed with the version bump commit
|
||||
2. The git tag is pushed, pointing to that commit
|
||||
3. The **Release Workflow** is automatically triggered by the tag push
|
||||
4. Release artifacts are built for all supported agents
|
||||
5. A GitHub Release is created with all assets
|
||||
6. A PR is opened to merge the version bump branch into `main`
|
||||
|
||||
> **Note**: Merge the auto-opened PR after the release is published to keep `main` in sync.
|
||||
|
||||
## Workflow Details
|
||||
|
||||
### Release Trigger Workflow
|
||||
|
||||
**File**: `.github/workflows/release-trigger.yml`
|
||||
|
||||
**Trigger**: Manual (`workflow_dispatch`)
|
||||
|
||||
**Permissions Required**: `contents: write`
|
||||
|
||||
**Steps**:
|
||||
1. Checkout repository
|
||||
2. Determine version (manual or auto-increment)
|
||||
3. Check if tag already exists (prevents duplicates)
|
||||
4. Create `chore/release-vX.Y.Z` branch
|
||||
5. Update `pyproject.toml`
|
||||
6. Update `CHANGELOG.md` from git commits
|
||||
7. Commit changes
|
||||
8. Push branch and tag
|
||||
9. Open PR to merge version bump into `main`
|
||||
|
||||
### Release Workflow
|
||||
|
||||
**File**: `.github/workflows/release.yml`
|
||||
|
||||
**Trigger**: Tag push (`v*`)
|
||||
|
||||
**Permissions Required**: `contents: write`
|
||||
|
||||
**Steps**:
|
||||
1. Checkout repository at tag
|
||||
2. Extract version from tag name
|
||||
3. Check if release already exists
|
||||
4. Build release package variants (all agents × shell/powershell)
|
||||
5. Generate release notes from commits
|
||||
6. Create GitHub Release with all assets
|
||||
|
||||
## Version Constraints
|
||||
|
||||
- Tags must follow format: `v{MAJOR}.{MINOR}.{PATCH}`
|
||||
- Example valid versions: `v0.1.11`, `v0.2.0`, `v1.0.0`
|
||||
- Auto-increment only bumps patch version
|
||||
- Cannot create duplicate tags (workflow will fail)
|
||||
|
||||
## Benefits of This Approach
|
||||
|
||||
✅ **Version Consistency**: Git tags point to commits with matching `pyproject.toml` version
|
||||
|
||||
✅ **Single Source of Truth**: Version set once, used everywhere
|
||||
|
||||
✅ **Prevents Drift**: No more manual version synchronization needed
|
||||
|
||||
✅ **Clean Separation**: Versioning logic separate from artifact building
|
||||
|
||||
✅ **Flexibility**: Supports both auto-increment and manual versioning
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No Commits Since Last Release
|
||||
|
||||
If you run the release trigger workflow when there are no new commits since the last tag:
|
||||
- The workflow will still succeed
|
||||
- The CHANGELOG will show "- Initial release" if it's the first release
|
||||
- Or it will be empty if there are no commits
|
||||
- Consider adding meaningful commits before releasing
|
||||
|
||||
**Best Practice**: Use descriptive commit messages - they become your changelog!
|
||||
|
||||
### Tag Already Exists
|
||||
|
||||
If you see "Error: Tag vX.Y.Z already exists!", you need to:
|
||||
- Choose a different version number, or
|
||||
- Delete the existing tag if it was created in error
|
||||
|
||||
### Release Workflow Didn't Trigger
|
||||
|
||||
Check that:
|
||||
- The release trigger workflow completed successfully
|
||||
- The tag was pushed (check repository tags)
|
||||
- The release workflow is enabled in Actions settings
|
||||
|
||||
### Version Mismatch
|
||||
|
||||
If `pyproject.toml` doesn't match the latest tag:
|
||||
- Run the release trigger workflow to sync versions
|
||||
- Or manually update `pyproject.toml` and push changes before running the release trigger
|
||||
|
||||
## Legacy Behavior (Pre-v0.1.10)
|
||||
|
||||
Before this change, the release workflow:
|
||||
- Created tags automatically on main branch pushes
|
||||
- Updated `pyproject.toml` AFTER creating the tag
|
||||
- Resulted in tags pointing to commits with outdated versions
|
||||
|
||||
This has been fixed in v0.1.10+.
|
||||
161
.github/workflows/release-trigger.yml
vendored
Normal file
161
.github/workflows/release-trigger.yml
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
name: Release Trigger
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to release (e.g., 0.1.11). Leave empty to auto-increment patch version.'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.RELEASE_PAT }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Determine version
|
||||
id: version
|
||||
env:
|
||||
INPUT_VERSION: ${{ github.event.inputs.version }}
|
||||
run: |
|
||||
if [[ -n "$INPUT_VERSION" ]]; then
|
||||
# Manual version specified - strip optional v prefix
|
||||
VERSION="${INPUT_VERSION#v}"
|
||||
# Validate strict semver format to prevent injection
|
||||
if [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Error: Invalid version format '$VERSION'. Must be X.Y.Z (e.g. 1.2.3 or v1.2.3)"
|
||||
exit 1
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "tag=v$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Using manual version: $VERSION"
|
||||
else
|
||||
# Auto-increment patch version
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
|
||||
# Extract version number and increment
|
||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]:-0}
|
||||
MINOR=${VERSION_PARTS[1]:-0}
|
||||
PATCH=${VERSION_PARTS[2]:-0}
|
||||
|
||||
# Increment patch version
|
||||
PATCH=$((PATCH + 1))
|
||||
NEW_VERSION="$MAJOR.$MINOR.$PATCH"
|
||||
|
||||
echo "version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "tag=v$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Auto-incremented version: $NEW_VERSION"
|
||||
fi
|
||||
|
||||
- name: Check if tag already exists
|
||||
run: |
|
||||
if git rev-parse "${{ steps.version.outputs.tag }}" >/dev/null 2>&1; then
|
||||
echo "Error: Tag ${{ steps.version.outputs.tag }} already exists!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create release branch
|
||||
run: |
|
||||
BRANCH="chore/release-${{ steps.version.outputs.tag }}"
|
||||
git checkout -b "$BRANCH"
|
||||
echo "branch=$BRANCH" >> $GITHUB_ENV
|
||||
|
||||
- name: Update pyproject.toml
|
||||
run: |
|
||||
sed -i "s/version = \".*\"/version = \"${{ steps.version.outputs.version }}\"/" pyproject.toml
|
||||
echo "Updated pyproject.toml to version ${{ steps.version.outputs.version }}"
|
||||
|
||||
- name: Update CHANGELOG.md
|
||||
run: |
|
||||
if [ -f "CHANGELOG.md" ]; then
|
||||
DATE=$(date +%Y-%m-%d)
|
||||
|
||||
# Get the previous tag to compare commits
|
||||
PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
|
||||
|
||||
echo "Generating changelog from commits..."
|
||||
if [[ -n "$PREVIOUS_TAG" ]]; then
|
||||
echo "Changes since $PREVIOUS_TAG"
|
||||
COMMITS=$(git log --oneline "$PREVIOUS_TAG"..HEAD --no-merges --pretty=format:"- %s" 2>/dev/null || echo "- Initial release")
|
||||
else
|
||||
echo "No previous tag found - this is the first release"
|
||||
COMMITS="- Initial release"
|
||||
fi
|
||||
|
||||
# Create new changelog entry
|
||||
{
|
||||
head -n 8 CHANGELOG.md
|
||||
echo ""
|
||||
echo "## [${{ steps.version.outputs.version }}] - $DATE"
|
||||
echo ""
|
||||
echo "### Changed"
|
||||
echo ""
|
||||
echo "$COMMITS"
|
||||
echo ""
|
||||
tail -n +9 CHANGELOG.md
|
||||
} > CHANGELOG.md.tmp
|
||||
mv CHANGELOG.md.tmp CHANGELOG.md
|
||||
|
||||
echo "✅ Updated CHANGELOG.md with commits since $PREVIOUS_TAG"
|
||||
else
|
||||
echo "No CHANGELOG.md found"
|
||||
fi
|
||||
|
||||
- name: Commit version bump
|
||||
run: |
|
||||
if [ -f "CHANGELOG.md" ]; then
|
||||
git add pyproject.toml CHANGELOG.md
|
||||
else
|
||||
git add pyproject.toml
|
||||
fi
|
||||
|
||||
if git diff --cached --quiet; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git commit -m "chore: bump version to ${{ steps.version.outputs.version }}"
|
||||
echo "Changes committed"
|
||||
fi
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
git tag -a "${{ steps.version.outputs.tag }}" -m "Release ${{ steps.version.outputs.tag }}"
|
||||
git push origin "${{ env.branch }}"
|
||||
git push origin "${{ steps.version.outputs.tag }}"
|
||||
echo "Branch ${{ env.branch }} and tag ${{ steps.version.outputs.tag }} pushed"
|
||||
|
||||
- name: Open pull request
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_PAT }}
|
||||
run: |
|
||||
gh pr create \
|
||||
--base main \
|
||||
--head "${{ env.branch }}" \
|
||||
--title "chore: bump version to ${{ steps.version.outputs.version }}" \
|
||||
--body "Automated version bump to ${{ steps.version.outputs.version }}.
|
||||
|
||||
This PR was created by the Release Trigger workflow. The git tag \`${{ steps.version.outputs.tag }}\` has already been pushed and the release artifacts are being built.
|
||||
|
||||
Merge this PR to record the version bump and changelog update on \`main\`."
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "✅ Version bumped to ${{ steps.version.outputs.version }}"
|
||||
echo "✅ Tag ${{ steps.version.outputs.tag }} created and pushed"
|
||||
echo "✅ PR opened to merge version bump into main"
|
||||
echo "🚀 Release workflow is building artifacts from the tag"
|
||||
52
.github/workflows/release.yml
vendored
52
.github/workflows/release.yml
vendored
@@ -2,68 +2,60 @@ name: Create Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'memory/**'
|
||||
- 'scripts/**'
|
||||
- 'src/**'
|
||||
- 'templates/**'
|
||||
- '.github/workflows/**'
|
||||
workflow_dispatch:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Get latest tag
|
||||
id: get_tag
|
||||
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/get-next-version.sh
|
||||
.github/workflows/scripts/get-next-version.sh
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
echo "tag=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Building release for $VERSION"
|
||||
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/check-release-exists.sh
|
||||
.github/workflows/scripts/check-release-exists.sh ${{ steps.get_tag.outputs.new_version }}
|
||||
.github/workflows/scripts/check-release-exists.sh ${{ steps.version.outputs.tag }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create release package variants
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/create-release-packages.sh
|
||||
.github/workflows/scripts/create-release-packages.sh ${{ steps.get_tag.outputs.new_version }}
|
||||
.github/workflows/scripts/create-release-packages.sh ${{ steps.version.outputs.tag }}
|
||||
|
||||
- name: Generate release notes
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
id: release_notes
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/generate-release-notes.sh
|
||||
.github/workflows/scripts/generate-release-notes.sh ${{ steps.get_tag.outputs.new_version }} ${{ steps.get_tag.outputs.latest_tag }}
|
||||
# Get the previous tag for changelog generation
|
||||
PREVIOUS_TAG=$(git describe --tags --abbrev=0 ${{ steps.version.outputs.tag }}^ 2>/dev/null || echo "")
|
||||
# Default to v0.0.0 if no previous tag is found (e.g., first release)
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
PREVIOUS_TAG="v0.0.0"
|
||||
fi
|
||||
.github/workflows/scripts/generate-release-notes.sh ${{ steps.version.outputs.tag }} "$PREVIOUS_TAG"
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/create-github-release.sh
|
||||
.github/workflows/scripts/create-github-release.sh ${{ steps.get_tag.outputs.new_version }}
|
||||
.github/workflows/scripts/create-github-release.sh ${{ steps.version.outputs.tag }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Update version in pyproject.toml (for release artifacts only)
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
chmod +x .github/workflows/scripts/update-version.sh
|
||||
.github/workflows/scripts/update-version.sh ${{ steps.get_tag.outputs.new_version }}
|
||||
- name: Commit version bump to main
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add pyproject.toml
|
||||
git diff --cached --quiet || git commit -m "chore: bump version to ${{ steps.get_tag.outputs.new_version }} [skip ci]"
|
||||
git push
|
||||
|
||||
|
||||
@@ -46,12 +46,16 @@ gh release create "$VERSION" \
|
||||
.genreleases/spec-kit-template-amp-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-shai-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-shai-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-q-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-q-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-tabnine-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-tabnine-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-kiro-cli-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-kiro-cli-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-agy-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-agy-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-bob-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-bob-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-vibe-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-vibe-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-generic-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-generic-ps-"$VERSION".zip \
|
||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
.PARAMETER Agents
|
||||
Comma or space separated subset of agents to build (default: all)
|
||||
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, q, bob, qodercli, shai, agy, generic
|
||||
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, generic
|
||||
|
||||
.PARAMETER Scripts
|
||||
Comma or space separated subset of script types to build (default: both)
|
||||
@@ -335,9 +335,9 @@ function Build-Variant {
|
||||
$cmdDir = Join-Path $baseDir ".agents/commands"
|
||||
Generate-Commands -Agent 'amp' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'q' {
|
||||
$cmdDir = Join-Path $baseDir ".amazonq/prompts"
|
||||
Generate-Commands -Agent 'q' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
'kiro-cli' {
|
||||
$cmdDir = Join-Path $baseDir ".kiro/prompts"
|
||||
Generate-Commands -Agent 'kiro-cli' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'bob' {
|
||||
$cmdDir = Join-Path $baseDir ".bob/commands"
|
||||
@@ -347,10 +347,31 @@ function Build-Variant {
|
||||
$cmdDir = Join-Path $baseDir ".qoder/commands"
|
||||
Generate-Commands -Agent 'qodercli' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'shai' {
|
||||
$cmdDir = Join-Path $baseDir ".shai/commands"
|
||||
Generate-Commands -Agent 'shai' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'tabnine' {
|
||||
$cmdDir = Join-Path $baseDir ".tabnine/agent/commands"
|
||||
Generate-Commands -Agent 'tabnine' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
$tabnineTemplate = Join-Path 'agent_templates' 'tabnine/TABNINE.md'
|
||||
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
||||
}
|
||||
'agy' {
|
||||
$cmdDir = Join-Path $baseDir ".agent/workflows"
|
||||
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'generic' {
|
||||
$cmdDir = Join-Path $baseDir ".speckit/commands"
|
||||
Generate-Commands -Agent 'generic' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'vibe' {
|
||||
$cmdDir = Join-Path $baseDir ".vibe/prompts"
|
||||
Generate-Commands -Agent 'vibe' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
default {
|
||||
throw "Unsupported agent '$Agent'."
|
||||
}
|
||||
}
|
||||
|
||||
# Create zip archive
|
||||
@@ -360,7 +381,7 @@ function Build-Variant {
|
||||
}
|
||||
|
||||
# Define all agents and scripts
|
||||
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'q', 'bob', 'qodercli', 'shai', 'agy', 'generic')
|
||||
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'generic')
|
||||
$AllScripts = @('sh', 'ps')
|
||||
|
||||
function Normalize-List {
|
||||
@@ -425,4 +446,4 @@ foreach ($agent in $AgentList) {
|
||||
Write-Host "`nArchives in ${GenReleasesDir}:"
|
||||
Get-ChildItem -Path $GenReleasesDir -Filter "spec-kit-template-*-${Version}.zip" | ForEach-Object {
|
||||
Write-Host " $($_.Name)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ set -euo pipefail
|
||||
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
||||
# Version argument should include leading 'v'.
|
||||
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
||||
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex amp shai bob generic (default: all)
|
||||
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli generic (default: all)
|
||||
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
||||
# Examples:
|
||||
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
||||
@@ -155,7 +155,7 @@ build_variant() {
|
||||
|
||||
# NOTE: We substitute {ARGS} internally. Outward tokens differ intentionally:
|
||||
# * Markdown/prompt (claude, copilot, cursor-agent, opencode): $ARGUMENTS
|
||||
# * TOML (gemini, qwen): {{args}}
|
||||
# * TOML (gemini, qwen, tabnine): {{args}}
|
||||
# This keeps formats readable without extra abstraction.
|
||||
|
||||
case $agent in
|
||||
@@ -212,15 +212,22 @@ build_variant() {
|
||||
shai)
|
||||
mkdir -p "$base_dir/.shai/commands"
|
||||
generate_commands shai md "\$ARGUMENTS" "$base_dir/.shai/commands" "$script" ;;
|
||||
q)
|
||||
mkdir -p "$base_dir/.amazonq/prompts"
|
||||
generate_commands q md "\$ARGUMENTS" "$base_dir/.amazonq/prompts" "$script" ;;
|
||||
tabnine)
|
||||
mkdir -p "$base_dir/.tabnine/agent/commands"
|
||||
generate_commands tabnine toml "{{args}}" "$base_dir/.tabnine/agent/commands" "$script"
|
||||
[[ -f agent_templates/tabnine/TABNINE.md ]] && cp agent_templates/tabnine/TABNINE.md "$base_dir/TABNINE.md" ;;
|
||||
kiro-cli)
|
||||
mkdir -p "$base_dir/.kiro/prompts"
|
||||
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
||||
agy)
|
||||
mkdir -p "$base_dir/.agent/workflows"
|
||||
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/workflows" "$script" ;;
|
||||
bob)
|
||||
mkdir -p "$base_dir/.bob/commands"
|
||||
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
||||
vibe)
|
||||
mkdir -p "$base_dir/.vibe/prompts"
|
||||
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
|
||||
generic)
|
||||
mkdir -p "$base_dir/.speckit/commands"
|
||||
generate_commands generic md "\$ARGUMENTS" "$base_dir/.speckit/commands" "$script" ;;
|
||||
@@ -230,7 +237,7 @@ build_variant() {
|
||||
}
|
||||
|
||||
# Determine agent list
|
||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai q agy bob qodercli generic)
|
||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli generic)
|
||||
ALL_SCRIPTS=(sh ps)
|
||||
|
||||
norm_list() {
|
||||
@@ -277,4 +284,3 @@ done
|
||||
|
||||
echo "Archives in $GENRELEASES_DIR:"
|
||||
ls -1 "$GENRELEASES_DIR"/spec-kit-template-*-"${NEW_VERSION}".zip
|
||||
|
||||
|
||||
161
.github/workflows/scripts/simulate-release.sh
vendored
Executable file
161
.github/workflows/scripts/simulate-release.sh
vendored
Executable file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# simulate-release.sh
|
||||
# Simulate the release process locally without pushing to GitHub
|
||||
# Usage: simulate-release.sh [version]
|
||||
# If version is omitted, auto-increments patch version
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${BLUE}🧪 Simulating Release Process Locally${NC}"
|
||||
echo "======================================"
|
||||
echo ""
|
||||
|
||||
# Step 1: Determine version
|
||||
if [[ -n "${1:-}" ]]; then
|
||||
VERSION="${1#v}"
|
||||
TAG="v$VERSION"
|
||||
echo -e "${GREEN}📝 Using manual version: $VERSION${NC}"
|
||||
else
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
echo -e "${BLUE}Latest tag: $LATEST_TAG${NC}"
|
||||
|
||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]:-0}
|
||||
MINOR=${VERSION_PARTS[1]:-0}
|
||||
PATCH=${VERSION_PARTS[2]:-0}
|
||||
|
||||
PATCH=$((PATCH + 1))
|
||||
VERSION="$MAJOR.$MINOR.$PATCH"
|
||||
TAG="v$VERSION"
|
||||
echo -e "${GREEN}📝 Auto-incremented to: $VERSION${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
# Step 2: Check if tag exists
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo -e "${RED}❌ Error: Tag $TAG already exists!${NC}"
|
||||
echo " Please use a different version or delete the tag first."
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}✓ Tag $TAG is available${NC}"
|
||||
|
||||
# Step 3: Backup current state
|
||||
echo ""
|
||||
echo -e "${YELLOW}💾 Creating backup of current state...${NC}"
|
||||
BACKUP_DIR=$(mktemp -d)
|
||||
cp pyproject.toml "$BACKUP_DIR/pyproject.toml.bak"
|
||||
cp CHANGELOG.md "$BACKUP_DIR/CHANGELOG.md.bak"
|
||||
echo -e "${GREEN}✓ Backup created at: $BACKUP_DIR${NC}"
|
||||
|
||||
# Step 4: Update pyproject.toml
|
||||
echo ""
|
||||
echo -e "${YELLOW}📝 Updating pyproject.toml...${NC}"
|
||||
sed -i.tmp "s/version = \".*\"/version = \"$VERSION\"/" pyproject.toml
|
||||
rm -f pyproject.toml.tmp
|
||||
echo -e "${GREEN}✓ Updated pyproject.toml to version $VERSION${NC}"
|
||||
|
||||
# Step 5: Update CHANGELOG.md
|
||||
echo ""
|
||||
echo -e "${YELLOW}📝 Updating CHANGELOG.md...${NC}"
|
||||
DATE=$(date +%Y-%m-%d)
|
||||
|
||||
# Get the previous tag to compare commits
|
||||
PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
|
||||
|
||||
if [[ -n "$PREVIOUS_TAG" ]]; then
|
||||
echo " Generating changelog from commits since $PREVIOUS_TAG"
|
||||
# Get commits since last tag, format as bullet points
|
||||
COMMITS=$(git log --oneline "$PREVIOUS_TAG"..HEAD --no-merges --pretty=format:"- %s" 2>/dev/null || echo "- Initial release")
|
||||
else
|
||||
echo " No previous tag found - this is the first release"
|
||||
COMMITS="- Initial release"
|
||||
fi
|
||||
|
||||
# Create temp file with new entry
|
||||
{
|
||||
head -n 8 CHANGELOG.md
|
||||
echo ""
|
||||
echo "## [$VERSION] - $DATE"
|
||||
echo ""
|
||||
echo "### Changed"
|
||||
echo ""
|
||||
echo "$COMMITS"
|
||||
echo ""
|
||||
tail -n +9 CHANGELOG.md
|
||||
} > CHANGELOG.md.tmp
|
||||
mv CHANGELOG.md.tmp CHANGELOG.md
|
||||
echo -e "${GREEN}✓ Updated CHANGELOG.md with commits since $PREVIOUS_TAG${NC}"
|
||||
|
||||
# Step 6: Show what would be committed
|
||||
echo ""
|
||||
echo -e "${YELLOW}📋 Changes that would be committed:${NC}"
|
||||
git diff pyproject.toml CHANGELOG.md
|
||||
|
||||
# Step 7: Create temporary tag (no push)
|
||||
echo ""
|
||||
echo -e "${YELLOW}🏷️ Creating temporary local tag...${NC}"
|
||||
git tag -a "$TAG" -m "Simulated release $TAG" 2>/dev/null || true
|
||||
echo -e "${GREEN}✓ Tag $TAG created locally${NC}"
|
||||
|
||||
# Step 8: Simulate release artifact creation
|
||||
echo ""
|
||||
echo -e "${YELLOW}📦 Simulating release package creation...${NC}"
|
||||
echo " (High-level simulation only; packaging script is not executed)"
|
||||
echo ""
|
||||
|
||||
# Check if script exists and is executable
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
if [[ -x "$SCRIPT_DIR/create-release-packages.sh" ]]; then
|
||||
echo -e "${BLUE}In a real release, the following command would be run to create packages:${NC}"
|
||||
echo " $SCRIPT_DIR/create-release-packages.sh \"$TAG\""
|
||||
echo ""
|
||||
echo "This simulation does not enumerate individual package files to avoid"
|
||||
echo "drifting from the actual behavior of create-release-packages.sh."
|
||||
else
|
||||
echo -e "${RED}⚠️ create-release-packages.sh not found or not executable${NC}"
|
||||
fi
|
||||
|
||||
# Step 9: Simulate release notes generation
|
||||
echo ""
|
||||
echo -e "${YELLOW}📄 Simulating release notes generation...${NC}"
|
||||
echo ""
|
||||
PREVIOUS_TAG=$(git describe --tags --abbrev=0 $TAG^ 2>/dev/null || echo "")
|
||||
if [[ -n "$PREVIOUS_TAG" ]]; then
|
||||
echo -e "${BLUE}Changes since $PREVIOUS_TAG:${NC}"
|
||||
git log --oneline "$PREVIOUS_TAG".."$TAG" | head -n 10
|
||||
echo ""
|
||||
else
|
||||
echo -e "${BLUE}No previous tag found - this would be the first release${NC}"
|
||||
fi
|
||||
|
||||
# Step 10: Summary
|
||||
echo ""
|
||||
echo -e "${GREEN}🎉 Simulation Complete!${NC}"
|
||||
echo "======================================"
|
||||
echo ""
|
||||
echo -e "${BLUE}Summary:${NC}"
|
||||
echo " Version: $VERSION"
|
||||
echo " Tag: $TAG"
|
||||
echo " Backup: $BACKUP_DIR"
|
||||
echo ""
|
||||
echo -e "${YELLOW}⚠️ SIMULATION ONLY - NO CHANGES PUSHED${NC}"
|
||||
echo ""
|
||||
echo -e "${BLUE}Next steps:${NC}"
|
||||
echo " 1. Review the changes above"
|
||||
echo " 2. To keep changes: git add pyproject.toml CHANGELOG.md && git commit"
|
||||
echo " 3. To discard changes: git checkout pyproject.toml CHANGELOG.md && git tag -d $TAG"
|
||||
echo " 4. To restore from backup: cp $BACKUP_DIR/* ."
|
||||
echo ""
|
||||
echo -e "${BLUE}To run the actual release:${NC}"
|
||||
echo " Go to: https://github.com/github/spec-kit/actions/workflows/release-trigger.yml"
|
||||
echo " Click 'Run workflow' and enter version: $VERSION"
|
||||
echo ""
|
||||
16
AGENTS.md
16
AGENTS.md
@@ -44,9 +44,10 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **Roo Code** | `.roo/rules/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
||||
| **CodeBuddy CLI** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy CLI |
|
||||
| **Qoder CLI** | `.qoder/commands/` | Markdown | `qodercli` | Qoder CLI |
|
||||
| **Amazon Q Developer CLI** | `.amazonq/prompts/` | Markdown | `q` | Amazon Q Developer CLI |
|
||||
| **Kiro CLI** | `.kiro/prompts/` | Markdown | `kiro-cli` | Kiro CLI |
|
||||
| **Amp** | `.agents/commands/` | Markdown | `amp` | Amp CLI |
|
||||
| **SHAI** | `.shai/commands/` | Markdown | `shai` | SHAI CLI |
|
||||
| **Tabnine CLI** | `.tabnine/agent/commands/` | TOML | `tabnine` | Tabnine CLI |
|
||||
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
|
||||
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
|
||||
|
||||
@@ -86,7 +87,7 @@ This eliminates the need for special-case mappings throughout the codebase.
|
||||
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
||||
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
||||
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode, agy), `"prompts"` (codex, q), `"command"` (opencode - singular)
|
||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode, agy), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
||||
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
||||
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
||||
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
||||
@@ -96,7 +97,7 @@ This eliminates the need for special-case mappings throughout the codebase.
|
||||
Update the `--ai` parameter help text in the `init()` command to include the new agent:
|
||||
|
||||
```python
|
||||
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, codebuddy, new-agent-cli, or q"),
|
||||
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, codebuddy, new-agent-cli, or kiro-cli"),
|
||||
```
|
||||
|
||||
Also update any function docstrings, examples, and error messages that list available agents.
|
||||
@@ -117,7 +118,7 @@ Modify `.github/workflows/scripts/create-release-packages.sh`:
|
||||
##### Add to ALL_AGENTS array
|
||||
|
||||
```bash
|
||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf q)
|
||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf kiro-cli)
|
||||
```
|
||||
|
||||
##### Add case statement for directory structure
|
||||
@@ -317,11 +318,12 @@ Require a command-line tool to be installed:
|
||||
- **Cursor**: `cursor-agent` CLI
|
||||
- **Qwen Code**: `qwen` CLI
|
||||
- **opencode**: `opencode` CLI
|
||||
- **Amazon Q Developer CLI**: `q` CLI
|
||||
- **Kiro CLI**: `kiro-cli` CLI
|
||||
- **CodeBuddy CLI**: `codebuddy` CLI
|
||||
- **Qoder CLI**: `qodercli` CLI
|
||||
- **Amp**: `amp` CLI
|
||||
- **SHAI**: `shai` CLI
|
||||
- **Tabnine CLI**: `tabnine` CLI
|
||||
|
||||
### IDE-Based Agents
|
||||
|
||||
@@ -335,7 +337,7 @@ Work within integrated development environments:
|
||||
|
||||
### Markdown Format
|
||||
|
||||
Used by: Claude, Cursor, opencode, Windsurf, Amazon Q Developer, Amp, SHAI, IBM Bob
|
||||
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob
|
||||
|
||||
**Standard format:**
|
||||
|
||||
@@ -360,7 +362,7 @@ Command content with {SCRIPT} and $ARGUMENTS placeholders.
|
||||
|
||||
### TOML Format
|
||||
|
||||
Used by: Gemini, Qwen
|
||||
Used by: Gemini, Qwen, Tabnine
|
||||
|
||||
```toml
|
||||
description = "Command description"
|
||||
|
||||
133
CHANGELOG.md
133
CHANGELOG.md
@@ -7,6 +7,139 @@ Recent changes to the Specify CLI and templates are documented here.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.2.0] - 2026-03-09
|
||||
|
||||
### Changed
|
||||
|
||||
- fix: sync agent list comments with actual supported agents (#1785)
|
||||
- feat(extensions): support multiple active catalogs simultaneously (#1720)
|
||||
- Pavel/add tabnine cli support (#1503)
|
||||
- Add Understanding extension to community catalog (#1778)
|
||||
- Add ralph extension to community catalog (#1780)
|
||||
- Update README with project initialization instructions (#1772)
|
||||
- feat: add review extension to community catalog (#1775)
|
||||
- Add fleet extension to community catalog (#1771)
|
||||
- Integration of Mistral vibe support into speckit (#1725)
|
||||
- fix: Remove duplicate options in specify.md (#1765)
|
||||
- fix: use global branch numbering instead of per-short-name detection (#1757)
|
||||
- Add Community Walkthroughs section to README (#1766)
|
||||
- feat(extensions): add Jira Integration to community catalog (#1764)
|
||||
- Add Azure DevOps Integration extension to community catalog (#1734)
|
||||
- Fix docs: update Antigravity link and add initialization example (#1748)
|
||||
- fix: wire after_tasks and after_implement hook events into command templates (#1702)
|
||||
- make c ignores consistent with c++ (#1747)
|
||||
- chore: bump version to 0.1.13 (#1746)
|
||||
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
|
||||
- feat: add verify extension to community catalog (#1726)
|
||||
- Add Retrospective Extension to community catalog README table (#1741)
|
||||
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
|
||||
- fix: correct Copilot extension command registration (#1724)
|
||||
- fix(implement): remove Makefile from C ignore patterns (#1558)
|
||||
- Add sync extension to community catalog (#1728)
|
||||
- fix(checklist): clarify file handling behavior for append vs create (#1556)
|
||||
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
|
||||
- chore: bump version to 0.1.12 (#1737)
|
||||
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
||||
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
||||
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
||||
|
||||
|
||||
## [0.1.14] - 2026-03-09
|
||||
|
||||
### Added
|
||||
|
||||
- feat: add Tabnine CLI agent support
|
||||
- **Multi-Catalog Support (#1707)**: Extension catalog system now supports multiple active catalogs simultaneously via a catalog stack
|
||||
- New `specify extension catalog list` command lists all active catalogs with name, URL, priority, and `install_allowed` status
|
||||
- New `specify extension catalog add` and `specify extension catalog remove` commands for project-scoped catalog management
|
||||
- Default built-in stack includes `catalog.json` (default, installable) and `catalog.community.json` (community, discovery only) — community extensions are now surfaced in search results out of the box
|
||||
- `specify extension search` aggregates results across all active catalogs, annotating each result with source catalog
|
||||
- `specify extension add` enforces `install_allowed` policy — extensions from discovery-only catalogs cannot be installed directly
|
||||
- Project-level `.specify/extension-catalogs.yml` and user-level `~/.specify/extension-catalogs.yml` config files supported, with project-level taking precedence
|
||||
- `SPECKIT_CATALOG_URL` environment variable still works for backward compatibility (replaces full stack with single catalog)
|
||||
- All catalog URLs require HTTPS (HTTP allowed for localhost development)
|
||||
- New `CatalogEntry` dataclass in `extensions.py` for catalog stack representation
|
||||
- Per-URL hash-based caching for non-default catalogs; legacy cache preserved for default catalog
|
||||
- Higher-priority catalogs win on merge conflicts (same extension id in multiple catalogs)
|
||||
- 13 new tests covering catalog stack resolution, merge conflicts, URL validation, and `install_allowed` enforcement
|
||||
- Updated RFC, Extension User Guide, and Extension API Reference documentation
|
||||
|
||||
## [0.1.13] - 2026-03-03
|
||||
|
||||
### Changed
|
||||
|
||||
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
|
||||
- feat: add verify extension to community catalog (#1726)
|
||||
- Add Retrospective Extension to community catalog README table (#1741)
|
||||
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
|
||||
- fix: correct Copilot extension command registration (#1724)
|
||||
- fix(implement): remove Makefile from C ignore patterns (#1558)
|
||||
- Add sync extension to community catalog (#1728)
|
||||
- fix(checklist): clarify file handling behavior for append vs create (#1556)
|
||||
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
|
||||
- chore: bump version to 0.1.12 (#1737)
|
||||
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
||||
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
||||
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
||||
|
||||
|
||||
## [0.1.13] - 2026-03-03
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Copilot Extension Commands Not Visible**: Fixed extension commands not appearing in GitHub Copilot when installed via `specify extension add --dev`
|
||||
- Changed Copilot file extension from `.md` to `.agent.md` in `CommandRegistrar.AGENT_CONFIGS` so Copilot recognizes agent files
|
||||
- Added generation of companion `.prompt.md` files in `.github/prompts/` during extension command registration, matching the release packaging behavior
|
||||
- Added cleanup of `.prompt.md` companion files when removing extensions via `specify extension remove`
|
||||
- Fixed a syntax regression in `src/specify_cli/__init__.py` in `_build_ai_assistant_help()` that broke `ruff` and `pytest` collection in CI.
|
||||
## [0.1.12] - 2026-03-02
|
||||
|
||||
### Changed
|
||||
|
||||
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
||||
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
||||
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
||||
|
||||
|
||||
## [0.1.10] - 2026-03-02
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Version Sync Issue (#1721)**: Fixed version mismatch between `pyproject.toml` and git release tags
|
||||
- Split release process into two workflows: `release-trigger.yml` for version management and `release.yml` for artifact building
|
||||
- Version bump now happens BEFORE tag creation, ensuring tags point to commits with correct version
|
||||
- Supports both manual version specification and auto-increment (patch version)
|
||||
- Git tags now accurately reflect the version in `pyproject.toml` at that commit
|
||||
- Prevents confusion when installing from source
|
||||
|
||||
## [0.1.9] - 2026-02-28
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated dependency: bumped astral-sh/setup-uv from 6 to 7
|
||||
|
||||
## [0.1.8] - 2026-02-28
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated dependency: bumped actions/setup-python from 5 to 6
|
||||
|
||||
## [0.1.7] - 2026-02-27
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated outdated GitHub Actions versions
|
||||
- Documented dual-catalog system for extensions
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed version command in documentation
|
||||
|
||||
### Added
|
||||
|
||||
- Added Cleanup Extension to README
|
||||
- Added retrospective extension to community catalog
|
||||
|
||||
## [0.1.6] - 2026-02-23
|
||||
|
||||
### Fixed
|
||||
|
||||
38
README.md
38
README.md
@@ -22,6 +22,7 @@
|
||||
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
||||
- [⚡ Get Started](#-get-started)
|
||||
- [📽️ Video Overview](#️-video-overview)
|
||||
- [🚶 Community Walkthroughs](#-community-walkthroughs)
|
||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||
- [📚 Core Philosophy](#-core-philosophy)
|
||||
@@ -79,7 +80,13 @@ uv tool install specify-cli --force --from git+https://github.com/github/spec-ki
|
||||
Run directly without installing:
|
||||
|
||||
```bash
|
||||
# Create new project
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
|
||||
# Or initialize in existing project
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init . --ai claude
|
||||
# or
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai claude
|
||||
```
|
||||
|
||||
**Benefits of persistent installation:**
|
||||
@@ -139,12 +146,22 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
||||
|
||||
[](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
|
||||
|
||||
## 🚶 Community Walkthroughs
|
||||
|
||||
See Spec-Driven Development in action across different scenarios with these community-contributed walkthroughs:
|
||||
|
||||
- **[Greenfield .NET CLI tool](https://github.com/mnriem/spec-kit-dotnet-cli-demo)** — Builds a Timezone Utility as a .NET single-binary CLI tool from a blank directory, covering the full spec-kit workflow: constitution, specify, plan, tasks, and multi-pass implement using GitHub Copilot agents.
|
||||
|
||||
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
||||
|
||||
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
||||
|
||||
## 🤖 Supported AI Agents
|
||||
|
||||
| Agent | Support | Notes |
|
||||
| ------------------------------------------------------------------------------------ | ------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [Qoder CLI](https://qoder.com/cli) | ✅ | |
|
||||
| [Amazon Q Developer CLI](https://aws.amazon.com/developer/learning/q-developer-cli/) | ⚠️ | Amazon Q Developer CLI [does not support](https://github.com/aws/amazon-q-developer-cli/issues/3064) custom arguments for slash commands. |
|
||||
| [Kiro CLI](https://kiro.dev/docs/cli/) | ✅ | Use `--ai kiro-cli` (alias: `--ai kiro`) |
|
||||
| [Amp](https://ampcode.com/) | ✅ | |
|
||||
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
||||
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | |
|
||||
@@ -160,8 +177,10 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
||||
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
|
||||
| [Roo Code](https://roocode.com/) | ✅ | |
|
||||
| [SHAI (OVHcloud)](https://github.com/ovh/shai) | ✅ | |
|
||||
| [Tabnine CLI](https://docs.tabnine.com/main/getting-started/tabnine-cli) | ✅ | |
|
||||
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
||||
| [Antigravity (agy)](https://agy.ai/) | ✅ | |
|
||||
| [Antigravity (agy)](https://antigravity.google/) | ✅ | |
|
||||
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
||||
|
||||
## 🔧 Specify CLI Reference
|
||||
@@ -173,14 +192,14 @@ The `specify` command supports the following options:
|
||||
| Command | Description |
|
||||
| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `init` | Initialize a new Specify project from the latest template |
|
||||
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `shai`, `qodercli`) |
|
||||
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`) |
|
||||
|
||||
### `specify init` Arguments & Options
|
||||
|
||||
| Argument/Option | Type | Description |
|
||||
| ---------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
|
||||
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `q`, `agy`, `bob`, `qodercli`, or `generic` (requires `--ai-commands-dir`) |
|
||||
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, or `generic` (requires `--ai-commands-dir`) |
|
||||
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
|
||||
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
||||
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
||||
@@ -210,15 +229,24 @@ specify init my-project --ai qodercli
|
||||
# Initialize with Windsurf support
|
||||
specify init my-project --ai windsurf
|
||||
|
||||
# Initialize with Kiro CLI support
|
||||
specify init my-project --ai kiro-cli
|
||||
|
||||
# Initialize with Amp support
|
||||
specify init my-project --ai amp
|
||||
|
||||
# Initialize with SHAI support
|
||||
specify init my-project --ai shai
|
||||
|
||||
# Initialize with Mistral Vibe support
|
||||
specify init my-project --ai vibe
|
||||
|
||||
# Initialize with IBM Bob support
|
||||
specify init my-project --ai bob
|
||||
|
||||
# Initialize with Antigravity support
|
||||
specify init my-project --ai agy
|
||||
|
||||
# Initialize with an unsupported agent (generic / bring your own agent)
|
||||
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
||||
|
||||
@@ -393,7 +421,7 @@ specify init . --force --ai claude
|
||||
specify init --here --force --ai claude
|
||||
```
|
||||
|
||||
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, or Amazon Q Developer CLI installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
||||
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
||||
|
||||
```bash
|
||||
specify init <project_name> --ai claude --ignore-agent-tools
|
||||
|
||||
@@ -243,6 +243,34 @@ manager.check_compatibility(
|
||||
) # Raises: CompatibilityError if incompatible
|
||||
```
|
||||
|
||||
### CatalogEntry
|
||||
|
||||
**Module**: `specify_cli.extensions`
|
||||
|
||||
Represents a single catalog in the active catalog stack.
|
||||
|
||||
```python
|
||||
from specify_cli.extensions import CatalogEntry
|
||||
|
||||
entry = CatalogEntry(
|
||||
url="https://example.com/catalog.json",
|
||||
name="default",
|
||||
priority=1,
|
||||
install_allowed=True,
|
||||
description="Built-in catalog of installable extensions",
|
||||
)
|
||||
```
|
||||
|
||||
**Fields**:
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `url` | `str` | Catalog URL (must use HTTPS, or HTTP for localhost) |
|
||||
| `name` | `str` | Human-readable catalog name |
|
||||
| `priority` | `int` | Sort order (lower = higher priority, wins on conflicts) |
|
||||
| `install_allowed` | `bool` | Whether extensions from this catalog can be installed |
|
||||
| `description` | `str` | Optional human-readable description of the catalog (default: empty) |
|
||||
|
||||
### ExtensionCatalog
|
||||
|
||||
**Module**: `specify_cli.extensions`
|
||||
@@ -253,30 +281,67 @@ from specify_cli.extensions import ExtensionCatalog
|
||||
catalog = ExtensionCatalog(project_root)
|
||||
```
|
||||
|
||||
**Class attributes**:
|
||||
|
||||
```python
|
||||
ExtensionCatalog.DEFAULT_CATALOG_URL # default catalog URL
|
||||
ExtensionCatalog.COMMUNITY_CATALOG_URL # community catalog URL
|
||||
```
|
||||
|
||||
**Methods**:
|
||||
|
||||
```python
|
||||
# Fetch catalog
|
||||
# Get the ordered list of active catalogs
|
||||
entries = catalog.get_active_catalogs() # List[CatalogEntry]
|
||||
|
||||
# Fetch catalog (primary catalog, backward compat)
|
||||
catalog_data = catalog.fetch_catalog(force_refresh: bool = False) # Dict
|
||||
|
||||
# Search extensions
|
||||
# Search extensions across all active catalogs
|
||||
# Each result includes _catalog_name and _install_allowed
|
||||
results = catalog.search(
|
||||
query: Optional[str] = None,
|
||||
tag: Optional[str] = None,
|
||||
author: Optional[str] = None,
|
||||
verified_only: bool = False
|
||||
) # Returns: List[Dict]
|
||||
) # Returns: List[Dict] — each dict includes _catalog_name, _install_allowed
|
||||
|
||||
# Get extension info
|
||||
# Get extension info (searches all active catalogs)
|
||||
# Returns None if not found; includes _catalog_name and _install_allowed
|
||||
ext_info = catalog.get_extension_info(extension_id: str) # Optional[Dict]
|
||||
|
||||
# Check cache validity
|
||||
# Check cache validity (primary catalog)
|
||||
is_valid = catalog.is_cache_valid() # bool
|
||||
|
||||
# Clear cache
|
||||
# Clear all catalog caches
|
||||
catalog.clear_cache()
|
||||
```
|
||||
|
||||
**Result annotation fields**:
|
||||
|
||||
Each extension dict returned by `search()` and `get_extension_info()` includes:
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `_catalog_name` | `str` | Name of the source catalog |
|
||||
| `_install_allowed` | `bool` | Whether installation is allowed from this catalog |
|
||||
|
||||
**Catalog config file** (`.specify/extension-catalogs.yml`):
|
||||
|
||||
```yaml
|
||||
catalogs:
|
||||
- name: "default"
|
||||
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
|
||||
priority: 1
|
||||
install_allowed: true
|
||||
description: "Built-in catalog of installable extensions"
|
||||
- name: "community"
|
||||
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
|
||||
priority: 2
|
||||
install_allowed: false
|
||||
description: "Community-contributed extensions (discovery only)"
|
||||
```
|
||||
|
||||
### HookExecutor
|
||||
|
||||
**Module**: `specify_cli.extensions`
|
||||
@@ -543,6 +608,39 @@ EXECUTE_COMMAND: {command}
|
||||
|
||||
**Output**: List of installed extensions with metadata
|
||||
|
||||
### extension catalog list
|
||||
|
||||
**Usage**: `specify extension catalog list`
|
||||
|
||||
Lists all active catalogs in the current catalog stack, showing name, description, URL, priority, and `install_allowed` status.
|
||||
|
||||
### extension catalog add
|
||||
|
||||
**Usage**: `specify extension catalog add URL [OPTIONS]`
|
||||
|
||||
**Options**:
|
||||
|
||||
- `--name NAME` - Catalog name (required)
|
||||
- `--priority INT` - Priority (lower = higher priority, default: 10)
|
||||
- `--install-allowed / --no-install-allowed` - Allow installs from this catalog (default: false)
|
||||
- `--description TEXT` - Optional description of the catalog
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `URL` - Catalog URL (must use HTTPS)
|
||||
|
||||
Adds a catalog entry to `.specify/extension-catalogs.yml`.
|
||||
|
||||
### extension catalog remove
|
||||
|
||||
**Usage**: `specify extension catalog remove NAME`
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `NAME` - Catalog name to remove
|
||||
|
||||
Removes a catalog entry from `.specify/extension-catalogs.yml`.
|
||||
|
||||
### extension add
|
||||
|
||||
**Usage**: `specify extension add EXTENSION [OPTIONS]`
|
||||
@@ -551,13 +649,13 @@ EXECUTE_COMMAND: {command}
|
||||
|
||||
- `--from URL` - Install from custom URL
|
||||
- `--dev PATH` - Install from local directory
|
||||
- `--version VERSION` - Install specific version
|
||||
- `--no-register` - Skip command registration
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `EXTENSION` - Extension name or URL
|
||||
|
||||
**Note**: Extensions from catalogs with `install_allowed: false` cannot be installed via this command.
|
||||
|
||||
### extension remove
|
||||
|
||||
**Usage**: `specify extension remove EXTENSION [OPTIONS]`
|
||||
@@ -575,6 +673,8 @@ EXECUTE_COMMAND: {command}
|
||||
|
||||
**Usage**: `specify extension search [QUERY] [OPTIONS]`
|
||||
|
||||
Searches all active catalogs simultaneously. Results include source catalog name and install_allowed status.
|
||||
|
||||
**Options**:
|
||||
|
||||
- `--tag TAG` - Filter by tag
|
||||
@@ -589,6 +689,8 @@ EXECUTE_COMMAND: {command}
|
||||
|
||||
**Usage**: `specify extension info EXTENSION`
|
||||
|
||||
Shows source catalog and install_allowed status.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `EXTENSION` - Extension ID
|
||||
|
||||
@@ -76,7 +76,7 @@ vim .specify/extensions/jira/jira-config.yml
|
||||
|
||||
## Finding Extensions
|
||||
|
||||
**Note**: By default, `specify extension search` uses your organization's catalog (`catalog.json`). If the catalog is empty, you won't see any results. See [Extension Catalogs](#extension-catalogs) to learn how to populate your catalog from the community reference catalog.
|
||||
`specify extension search` searches **all active catalogs** simultaneously, including the community catalog by default. Results are annotated with their source catalog and install status.
|
||||
|
||||
### Browse All Extensions
|
||||
|
||||
@@ -84,7 +84,7 @@ vim .specify/extensions/jira/jira-config.yml
|
||||
specify extension search
|
||||
```
|
||||
|
||||
Shows all extensions in your organization's catalog.
|
||||
Shows all extensions across all active catalogs (default and community by default).
|
||||
|
||||
### Search by Keyword
|
||||
|
||||
@@ -402,13 +402,13 @@ In addition to extension-specific environment variables (`SPECKIT_{EXT_ID}_*`),
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `SPECKIT_CATALOG_URL` | Override the extension catalog URL | GitHub-hosted catalog |
|
||||
| `SPECKIT_CATALOG_URL` | Override the full catalog stack with a single URL (backward compat) | Built-in default stack |
|
||||
| `GH_TOKEN` / `GITHUB_TOKEN` | GitHub API token for downloads | None |
|
||||
|
||||
#### Example: Using a custom catalog for testing
|
||||
|
||||
```bash
|
||||
# Point to a local or alternative catalog
|
||||
# Point to a local or alternative catalog (replaces the full stack)
|
||||
export SPECKIT_CATALOG_URL="http://localhost:8000/catalog.json"
|
||||
|
||||
# Or use a staging catalog
|
||||
@@ -419,13 +419,76 @@ export SPECKIT_CATALOG_URL="https://example.com/staging/catalog.json"
|
||||
|
||||
## Extension Catalogs
|
||||
|
||||
For information about how Spec Kit's dual-catalog system works (`catalog.json` vs `catalog.community.json`), see the main [Extensions README](README.md#extension-catalogs).
|
||||
Spec Kit uses a **catalog stack** — an ordered list of catalogs searched simultaneously. By default, two catalogs are active:
|
||||
|
||||
| Priority | Catalog | Install Allowed | Purpose |
|
||||
|----------|---------|-----------------|---------|
|
||||
| 1 | `catalog.json` (default) | ✅ Yes | Curated extensions available for installation |
|
||||
| 2 | `catalog.community.json` (community) | ❌ No (discovery only) | Browse community extensions |
|
||||
|
||||
### Listing Active Catalogs
|
||||
|
||||
```bash
|
||||
specify extension catalog list
|
||||
```
|
||||
|
||||
### Adding a Catalog (Project-scoped)
|
||||
|
||||
```bash
|
||||
# Add an internal catalog that allows installs
|
||||
specify extension catalog add \
|
||||
--name "internal" \
|
||||
--priority 2 \
|
||||
--install-allowed \
|
||||
https://internal.company.com/spec-kit/catalog.json
|
||||
|
||||
# Add a discovery-only catalog
|
||||
specify extension catalog add \
|
||||
--name "partner" \
|
||||
--priority 5 \
|
||||
https://partner.example.com/spec-kit/catalog.json
|
||||
```
|
||||
|
||||
This creates or updates `.specify/extension-catalogs.yml`.
|
||||
|
||||
### Removing a Catalog
|
||||
|
||||
```bash
|
||||
specify extension catalog remove internal
|
||||
```
|
||||
|
||||
### Manual Config File
|
||||
|
||||
You can also edit `.specify/extension-catalogs.yml` directly:
|
||||
|
||||
```yaml
|
||||
catalogs:
|
||||
- name: "default"
|
||||
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
|
||||
priority: 1
|
||||
install_allowed: true
|
||||
description: "Built-in catalog of installable extensions"
|
||||
|
||||
- name: "internal"
|
||||
url: "https://internal.company.com/spec-kit/catalog.json"
|
||||
priority: 2
|
||||
install_allowed: true
|
||||
description: "Internal company extensions"
|
||||
|
||||
- name: "community"
|
||||
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
|
||||
priority: 3
|
||||
install_allowed: false
|
||||
description: "Community-contributed extensions (discovery only)"
|
||||
```
|
||||
|
||||
A user-level equivalent lives at `~/.specify/extension-catalogs.yml`. Project-level config takes full precedence when it contains one or more catalog entries. An empty `catalogs: []` list falls back to built-in defaults.
|
||||
|
||||
## Organization Catalog Customization
|
||||
|
||||
### Why Customize Your Catalog
|
||||
|
||||
Organizations customize their `catalog.json` to:
|
||||
Organizations customize their catalogs to:
|
||||
|
||||
- **Control available extensions** - Curate which extensions your team can install
|
||||
- **Host private extensions** - Internal tools that shouldn't be public
|
||||
@@ -503,24 +566,40 @@ Options for hosting your catalog:
|
||||
|
||||
#### 3. Configure Your Environment
|
||||
|
||||
##### Option A: Environment variable (recommended for CI/CD)
|
||||
##### Option A: Catalog stack config file (recommended)
|
||||
|
||||
Add to `.specify/extension-catalogs.yml` in your project:
|
||||
|
||||
```yaml
|
||||
catalogs:
|
||||
- name: "my-org"
|
||||
url: "https://your-org.com/spec-kit/catalog.json"
|
||||
priority: 1
|
||||
install_allowed: true
|
||||
```
|
||||
|
||||
Or use the CLI:
|
||||
|
||||
```bash
|
||||
specify extension catalog add \
|
||||
--name "my-org" \
|
||||
--install-allowed \
|
||||
https://your-org.com/spec-kit/catalog.json
|
||||
```
|
||||
|
||||
##### Option B: Environment variable (recommended for CI/CD, single-catalog)
|
||||
|
||||
```bash
|
||||
# In ~/.bashrc, ~/.zshrc, or CI pipeline
|
||||
export SPECKIT_CATALOG_URL="https://your-org.com/spec-kit/catalog.json"
|
||||
```
|
||||
|
||||
##### Option B: Per-project configuration
|
||||
|
||||
Create `.env` or set in your shell before running spec-kit commands:
|
||||
|
||||
```bash
|
||||
SPECKIT_CATALOG_URL="https://your-org.com/spec-kit/catalog.json" specify extension search
|
||||
```
|
||||
|
||||
#### 4. Verify Configuration
|
||||
|
||||
```bash
|
||||
# List active catalogs
|
||||
specify extension catalog list
|
||||
|
||||
# Search should now show your catalog's extensions
|
||||
specify extension search
|
||||
|
||||
|
||||
@@ -72,8 +72,18 @@ The following community-contributed extensions are available in [`catalog.commun
|
||||
|
||||
| Extension | Purpose | URL |
|
||||
|-----------|---------|-----|
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
|
||||
|
||||
## Adding Your Extension
|
||||
|
||||
|
||||
@@ -868,7 +868,7 @@ Spec Kit uses two catalog files with different purposes:
|
||||
|
||||
- **Purpose**: Organization's curated catalog of approved extensions
|
||||
- **Default State**: Empty by design - users populate with extensions they trust
|
||||
- **Usage**: Default catalog used by `specify extension` CLI commands
|
||||
- **Usage**: Primary catalog (priority 1, `install_allowed: true`) in the default stack
|
||||
- **Control**: Organizations maintain their own fork/version for their teams
|
||||
|
||||
#### Community Reference Catalog (`catalog.community.json`)
|
||||
@@ -879,16 +879,16 @@ Spec Kit uses two catalog files with different purposes:
|
||||
- **Verification**: Community extensions may have `verified: false` initially
|
||||
- **Status**: Active - open for community contributions
|
||||
- **Submission**: Via Pull Request following the Extension Publishing Guide
|
||||
- **Usage**: Browse to discover extensions, then copy to your `catalog.json`
|
||||
- **Usage**: Secondary catalog (priority 2, `install_allowed: false`) in the default stack — discovery only
|
||||
|
||||
**How It Works:**
|
||||
**How It Works (default stack):**
|
||||
|
||||
1. **Discover**: Browse `catalog.community.json` to find available extensions
|
||||
2. **Review**: Evaluate extensions for security, quality, and organizational fit
|
||||
3. **Curate**: Copy approved extension entries from community catalog to your `catalog.json`
|
||||
4. **Install**: Use `specify extension add <name>` (pulls from your curated catalog)
|
||||
1. **Discover**: `specify extension search` searches both catalogs — community extensions appear automatically
|
||||
2. **Review**: Evaluate community extensions for security, quality, and organizational fit
|
||||
3. **Curate**: Copy approved entries from community catalog to your `catalog.json`, or add to `.specify/extension-catalogs.yml` with `install_allowed: true`
|
||||
4. **Install**: Use `specify extension add <name>` — only allowed from `install_allowed: true` catalogs
|
||||
|
||||
This approach gives organizations full control over which extensions are available to their teams while maintaining a shared community resource for discovery.
|
||||
This approach gives organizations full control over which extensions can be installed while still providing community discoverability out of the box.
|
||||
|
||||
### Catalog Format
|
||||
|
||||
@@ -961,30 +961,92 @@ specify extension info jira
|
||||
|
||||
### Custom Catalogs
|
||||
|
||||
**⚠️ FUTURE FEATURE - NOT YET IMPLEMENTED**
|
||||
Spec Kit supports a **catalog stack** — an ordered list of catalogs that the CLI merges and searches across. This allows organizations to maintain their own org-approved extensions alongside an internal catalog and community discovery, all at once.
|
||||
|
||||
The following catalog management commands are proposed design concepts but are not yet available in the current implementation:
|
||||
#### Catalog Stack Resolution
|
||||
|
||||
```bash
|
||||
# Add custom catalog (FUTURE - NOT AVAILABLE)
|
||||
specify extension add-catalog https://internal.company.com/spec-kit/catalog.json
|
||||
The active catalog stack is resolved in this order (first match wins):
|
||||
|
||||
# Set as default (FUTURE - NOT AVAILABLE)
|
||||
specify extension set-catalog --default https://internal.company.com/spec-kit/catalog.json
|
||||
1. **`SPECKIT_CATALOG_URL` environment variable** — single catalog replacing all defaults (backward compat)
|
||||
2. **Project-level `.specify/extension-catalogs.yml`** — full control for the project
|
||||
3. **User-level `~/.specify/extension-catalogs.yml`** — personal defaults
|
||||
4. **Built-in default stack** — `catalog.json` (install_allowed: true) + `catalog.community.json` (install_allowed: false)
|
||||
|
||||
# List catalogs (FUTURE - NOT AVAILABLE)
|
||||
specify extension catalogs
|
||||
#### Default Built-in Stack
|
||||
|
||||
When no config file exists, the CLI uses:
|
||||
|
||||
| Priority | Catalog | install_allowed | Purpose |
|
||||
|----------|---------|-----------------|---------|
|
||||
| 1 | `catalog.json` (default) | `true` | Curated extensions available for installation |
|
||||
| 2 | `catalog.community.json` (community) | `false` | Discovery only — browse but not install |
|
||||
|
||||
This means `specify extension search` surfaces community extensions out of the box, while `specify extension add` is still restricted to entries from catalogs with `install_allowed: true`.
|
||||
|
||||
#### `.specify/extension-catalogs.yml` Config File
|
||||
|
||||
```yaml
|
||||
catalogs:
|
||||
- name: "default"
|
||||
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
|
||||
priority: 1 # Highest — only approved entries can be installed
|
||||
install_allowed: true
|
||||
description: "Built-in catalog of installable extensions"
|
||||
|
||||
- name: "internal"
|
||||
url: "https://internal.company.com/spec-kit/catalog.json"
|
||||
priority: 2
|
||||
install_allowed: true
|
||||
description: "Internal company extensions"
|
||||
|
||||
- name: "community"
|
||||
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
|
||||
priority: 3 # Lowest — discovery only, not installable
|
||||
install_allowed: false
|
||||
description: "Community-contributed extensions (discovery only)"
|
||||
```
|
||||
|
||||
**Proposed catalog priority** (future design):
|
||||
A user-level equivalent lives at `~/.specify/extension-catalogs.yml`. When a project-level config is present with one or more catalog entries, it takes full control and the built-in defaults are not applied. An empty `catalogs: []` list is treated the same as no config file, falling back to defaults.
|
||||
|
||||
1. Project-specific catalog (`.specify/extension-catalogs.yml`) - *not implemented*
|
||||
2. User-level catalog (`~/.specify/extension-catalogs.yml`) - *not implemented*
|
||||
3. Default GitHub catalog
|
||||
#### Catalog CLI Commands
|
||||
|
||||
#### Current Implementation: SPECKIT_CATALOG_URL
|
||||
```bash
|
||||
# List active catalogs with name, URL, priority, and install_allowed
|
||||
specify extension catalog list
|
||||
|
||||
**The currently available method** for using custom catalogs is the `SPECKIT_CATALOG_URL` environment variable:
|
||||
# Add a catalog (project-scoped)
|
||||
specify extension catalog add --name "internal" --install-allowed \
|
||||
https://internal.company.com/spec-kit/catalog.json
|
||||
|
||||
# Add a discovery-only catalog
|
||||
specify extension catalog add --name "community" \
|
||||
https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json
|
||||
|
||||
# Remove a catalog
|
||||
specify extension catalog remove internal
|
||||
|
||||
# Show which catalog an extension came from
|
||||
specify extension info jira
|
||||
# → Source catalog: default
|
||||
```
|
||||
|
||||
#### Merge Conflict Resolution
|
||||
|
||||
When the same extension `id` appears in multiple catalogs, the higher-priority (lower priority number) catalog wins. Extensions from lower-priority catalogs with the same `id` are ignored.
|
||||
|
||||
#### `install_allowed: false` Behavior
|
||||
|
||||
Extensions from discovery-only catalogs are shown in `specify extension search` results but cannot be installed directly:
|
||||
|
||||
```
|
||||
⚠ 'linear' is available in the 'community' catalog but installation is not allowed from that catalog.
|
||||
|
||||
To enable installation, add 'linear' to an approved catalog (install_allowed: true) in .specify/extension-catalogs.yml.
|
||||
```
|
||||
|
||||
#### `SPECKIT_CATALOG_URL` (Backward Compatibility)
|
||||
|
||||
The `SPECKIT_CATALOG_URL` environment variable still works — it is treated as a single `install_allowed: true` catalog, **replacing both defaults** for full backward compatibility:
|
||||
|
||||
```bash
|
||||
# Point to your organization's catalog
|
||||
|
||||
@@ -1,8 +1,47 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-02-24T00:00:00Z",
|
||||
"updated_at": "2026-03-09T00:00:00Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||
"extensions": {
|
||||
"azure-devops": {
|
||||
"name": "Azure DevOps Integration",
|
||||
"id": "azure-devops",
|
||||
"description": "Sync user stories and tasks to Azure DevOps work items using OAuth authentication.",
|
||||
"author": "pragya247",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/pragya247/spec-kit-azure-devops/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/pragya247/spec-kit-azure-devops",
|
||||
"homepage": "https://github.com/pragya247/spec-kit-azure-devops",
|
||||
"documentation": "https://github.com/pragya247/spec-kit-azure-devops/blob/main/README.md",
|
||||
"changelog": "https://github.com/pragya247/spec-kit-azure-devops/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0",
|
||||
"tools": [
|
||||
{
|
||||
"name": "az",
|
||||
"version": ">=2.0.0",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"azure",
|
||||
"devops",
|
||||
"project-management",
|
||||
"work-items",
|
||||
"issue-tracking"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-03T00:00:00Z",
|
||||
"updated_at": "2026-03-03T00:00:00Z"
|
||||
},
|
||||
"cleanup": {
|
||||
"name": "Cleanup Extension",
|
||||
"id": "cleanup",
|
||||
@@ -22,13 +61,112 @@
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": ["quality", "tech-debt", "review", "cleanup", "scout-rule"],
|
||||
"tags": [
|
||||
"quality",
|
||||
"tech-debt",
|
||||
"review",
|
||||
"cleanup",
|
||||
"scout-rule"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-02-22T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"fleet": {
|
||||
"name": "Fleet Orchestrator",
|
||||
"id": "fleet",
|
||||
"description": "Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases.",
|
||||
"author": "sharathsatish",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/sharathsatish/spec-kit-fleet/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/sharathsatish/spec-kit-fleet",
|
||||
"homepage": "https://github.com/sharathsatish/spec-kit-fleet",
|
||||
"documentation": "https://github.com/sharathsatish/spec-kit-fleet/blob/main/README.md",
|
||||
"changelog": "https://github.com/sharathsatish/spec-kit-fleet/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": ["orchestration", "workflow", "human-in-the-loop", "parallel"],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-06T00:00:00Z",
|
||||
"updated_at": "2026-03-06T00:00:00Z"
|
||||
},
|
||||
"jira": {
|
||||
"name": "Jira Integration",
|
||||
"id": "jira",
|
||||
"description": "Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support.",
|
||||
"author": "mbachorik",
|
||||
"version": "2.1.0",
|
||||
"download_url": "https://github.com/mbachorik/spec-kit-jira/archive/refs/tags/v2.1.0.zip",
|
||||
"repository": "https://github.com/mbachorik/spec-kit-jira",
|
||||
"homepage": "https://github.com/mbachorik/spec-kit-jira",
|
||||
"documentation": "https://github.com/mbachorik/spec-kit-jira/blob/main/README.md",
|
||||
"changelog": "https://github.com/mbachorik/spec-kit-jira/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 3,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"issue-tracking",
|
||||
"jira",
|
||||
"atlassian",
|
||||
"project-management"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-05T00:00:00Z",
|
||||
"updated_at": "2026-03-05T00:00:00Z"
|
||||
},
|
||||
"ralph": {
|
||||
"name": "Ralph Loop",
|
||||
"id": "ralph",
|
||||
"description": "Autonomous implementation loop using AI agent CLI.",
|
||||
"author": "Rubiss",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/Rubiss/spec-kit-ralph/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/Rubiss/spec-kit-ralph",
|
||||
"homepage": "https://github.com/Rubiss/spec-kit-ralph",
|
||||
"documentation": "https://github.com/Rubiss/spec-kit-ralph/blob/main/README.md",
|
||||
"changelog": "https://github.com/Rubiss/spec-kit-ralph/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0",
|
||||
"tools": [
|
||||
{
|
||||
"name": "copilot",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "git",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": ["implementation", "automation", "loop", "copilot"],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-09T00:00:00Z",
|
||||
"updated_at": "2026-03-09T00:00:00Z"
|
||||
},
|
||||
"retrospective": {
|
||||
"name": "Retrospective Extension",
|
||||
"id": "retrospective",
|
||||
@@ -48,13 +186,118 @@
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": ["retrospective", "spec-drift", "quality", "analysis", "governance"],
|
||||
"tags": [
|
||||
"retrospective",
|
||||
"spec-drift",
|
||||
"quality",
|
||||
"analysis",
|
||||
"governance"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-02-24T00:00:00Z",
|
||||
"updated_at": "2026-02-24T00:00:00Z"
|
||||
},
|
||||
"review": {
|
||||
"name": "Review Extension",
|
||||
"id": "review",
|
||||
"description": "Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification.",
|
||||
"author": "ismaelJimenez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/ismaelJimenez/spec-kit-review/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/ismaelJimenez/spec-kit-review",
|
||||
"homepage": "https://github.com/ismaelJimenez/spec-kit-review",
|
||||
"documentation": "https://github.com/ismaelJimenez/spec-kit-review/blob/main/README.md",
|
||||
"changelog": "https://github.com/ismaelJimenez/spec-kit-review/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 7,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": ["code-review", "quality", "review", "testing", "error-handling", "type-design", "simplification"],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-06T00:00:00Z",
|
||||
"updated_at": "2026-03-06T00:00:00Z"
|
||||
},
|
||||
"sync": {
|
||||
"name": "Spec Sync",
|
||||
"id": "sync",
|
||||
"description": "Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval.",
|
||||
"author": "bgervin",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/bgervin/spec-kit-sync/archive/refs/tags/v0.1.0.zip",
|
||||
"repository": "https://github.com/bgervin/spec-kit-sync",
|
||||
"homepage": "https://github.com/bgervin/spec-kit-sync",
|
||||
"documentation": "https://github.com/bgervin/spec-kit-sync/blob/main/README.md",
|
||||
"changelog": "https://github.com/bgervin/spec-kit-sync/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 5,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"sync",
|
||||
"drift",
|
||||
"validation",
|
||||
"bidirectional",
|
||||
"backfill"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-02T00:00:00Z",
|
||||
"updated_at": "2026-03-02T00:00:00Z"
|
||||
},
|
||||
"understanding": {
|
||||
"name": "Understanding",
|
||||
"id": "understanding",
|
||||
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"author": "Ladislav Bihari",
|
||||
"version": "3.4.0",
|
||||
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
||||
"repository": "https://github.com/Testimonial/understanding",
|
||||
"homepage": "https://github.com/Testimonial/understanding",
|
||||
"documentation": "https://github.com/Testimonial/understanding/blob/main/extension/README.md",
|
||||
"changelog": "https://github.com/Testimonial/understanding/blob/main/extension/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0",
|
||||
"tools": [
|
||||
{
|
||||
"name": "understanding",
|
||||
"version": ">=3.4.0",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 3,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"quality",
|
||||
"metrics",
|
||||
"requirements",
|
||||
"validation",
|
||||
"readability",
|
||||
"IEEE-830",
|
||||
"ISO-29148"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-07T00:00:00Z",
|
||||
"updated_at": "2026-03-07T00:00:00Z"
|
||||
},
|
||||
"v-model": {
|
||||
"name": "V-Model Extension Pack",
|
||||
"id": "v-model",
|
||||
@@ -74,12 +317,50 @@
|
||||
"commands": 9,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": ["v-model", "traceability", "testing", "compliance", "safety-critical"],
|
||||
"tags": [
|
||||
"v-model",
|
||||
"traceability",
|
||||
"testing",
|
||||
"compliance",
|
||||
"safety-critical"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-02-20T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"verify": {
|
||||
"name": "Verify Extension",
|
||||
"id": "verify",
|
||||
"description": "Post-implementation quality gate that validates implemented code against specification artifacts.",
|
||||
"author": "ismaelJimenez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/ismaelJimenez/spec-kit-verify/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/ismaelJimenez/spec-kit-verify",
|
||||
"homepage": "https://github.com/ismaelJimenez/spec-kit-verify",
|
||||
"documentation": "https://github.com/ismaelJimenez/spec-kit-verify/blob/main/README.md",
|
||||
"changelog": "https://github.com/ismaelJimenez/spec-kit-verify/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"verification",
|
||||
"quality-gate",
|
||||
"implementation",
|
||||
"spec-adherence",
|
||||
"compliance"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-03T00:00:00Z",
|
||||
"updated_at": "2026-03-03T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.1.6"
|
||||
version = "0.2.0"
|
||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
@@ -51,4 +51,3 @@ precision = 2
|
||||
show_missing = true
|
||||
skip_covered = false
|
||||
|
||||
|
||||
|
||||
@@ -67,6 +67,13 @@ if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Trim whitespace and validate description is not empty (e.g., user passed only whitespace)
|
||||
FEATURE_DESCRIPTION=$(echo "$FEATURE_DESCRIPTION" | xargs)
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Error: Feature description cannot be empty or contain only whitespace" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Function to find the repository root by searching for existing project markers
|
||||
find_repo_root() {
|
||||
local dir="$1"
|
||||
@@ -272,7 +279,16 @@ if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
|
||||
fi
|
||||
|
||||
if [ "$HAS_GIT" = true ]; then
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
||||
# Check if branch already exists
|
||||
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
>&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
|
||||
fi
|
||||
|
||||
@@ -30,12 +30,12 @@
|
||||
#
|
||||
# 5. Multi-Agent Support
|
||||
# - Handles agent-specific file paths and naming conventions
|
||||
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Amazon Q Developer CLI, or Antigravity
|
||||
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Antigravity or Generic
|
||||
# - Can update single agents or all existing agent files
|
||||
# - Creates default Claude file if no agent files exist
|
||||
#
|
||||
# Usage: ./update-agent-context.sh [agent_type]
|
||||
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli
|
||||
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic
|
||||
# Leave empty to update all existing agent files
|
||||
|
||||
set -e
|
||||
@@ -73,9 +73,11 @@ CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
||||
QODER_FILE="$REPO_ROOT/QODER.md"
|
||||
AMP_FILE="$REPO_ROOT/AGENTS.md"
|
||||
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
||||
Q_FILE="$REPO_ROOT/AGENTS.md"
|
||||
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
||||
KIRO_FILE="$REPO_ROOT/AGENTS.md"
|
||||
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
||||
BOB_FILE="$REPO_ROOT/AGENTS.md"
|
||||
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
||||
|
||||
# Template file
|
||||
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
|
||||
@@ -648,8 +650,11 @@ update_specific_agent() {
|
||||
shai)
|
||||
update_agent_file "$SHAI_FILE" "SHAI"
|
||||
;;
|
||||
q)
|
||||
update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
|
||||
tabnine)
|
||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
||||
;;
|
||||
kiro-cli)
|
||||
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
||||
;;
|
||||
agy)
|
||||
update_agent_file "$AGY_FILE" "Antigravity"
|
||||
@@ -657,12 +662,15 @@ update_specific_agent() {
|
||||
bob)
|
||||
update_agent_file "$BOB_FILE" "IBM Bob"
|
||||
;;
|
||||
vibe)
|
||||
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
||||
;;
|
||||
generic)
|
||||
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown agent type '$agent_type'"
|
||||
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli|generic"
|
||||
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -732,13 +740,18 @@ update_all_existing_agents() {
|
||||
found_agent=true
|
||||
fi
|
||||
|
||||
if [[ -f "$TABNINE_FILE" ]]; then
|
||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
||||
found_agent=true
|
||||
fi
|
||||
|
||||
if [[ -f "$QODER_FILE" ]]; then
|
||||
update_agent_file "$QODER_FILE" "Qoder CLI"
|
||||
found_agent=true
|
||||
fi
|
||||
|
||||
if [[ -f "$Q_FILE" ]]; then
|
||||
update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
|
||||
if [[ -f "$KIRO_FILE" ]]; then
|
||||
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
||||
found_agent=true
|
||||
fi
|
||||
|
||||
@@ -750,6 +763,11 @@ update_all_existing_agents() {
|
||||
update_agent_file "$BOB_FILE" "IBM Bob"
|
||||
found_agent=true
|
||||
fi
|
||||
|
||||
if [[ -f "$VIBE_FILE" ]]; then
|
||||
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
||||
found_agent=true
|
||||
fi
|
||||
|
||||
# If no agent files exist, create a default Claude file
|
||||
if [[ "$found_agent" == false ]]; then
|
||||
@@ -774,8 +792,7 @@ print_summary() {
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli]"
|
||||
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]"
|
||||
}
|
||||
|
||||
#==============================================================================
|
||||
@@ -827,4 +844,3 @@ main() {
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
main "$@"
|
||||
fi
|
||||
|
||||
|
||||
@@ -35,6 +35,12 @@ if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||
|
||||
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
||||
|
||||
# Validate description is not empty after trimming (e.g., user passed only whitespace)
|
||||
if ([string]::IsNullOrWhiteSpace($featureDesc)) {
|
||||
Write-Error "Error: Feature description cannot be empty or contain only whitespace"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Resolve repository root. Prefer git information when available, but fall back
|
||||
# to searching for repository markers so the workflow still functions in repositories that
|
||||
# were initialized with --no-git.
|
||||
@@ -242,10 +248,26 @@ if ($branchName.Length -gt $maxBranchLength) {
|
||||
}
|
||||
|
||||
if ($hasGit) {
|
||||
$branchCreated = $false
|
||||
try {
|
||||
git checkout -b $branchName | Out-Null
|
||||
git checkout -b $branchName 2>$null | Out-Null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
$branchCreated = $true
|
||||
}
|
||||
} catch {
|
||||
Write-Warning "Failed to create git branch: $branchName"
|
||||
# Exception during git command
|
||||
}
|
||||
|
||||
if (-not $branchCreated) {
|
||||
# Check if branch already exists
|
||||
$existingBranch = git branch --list $branchName 2>$null
|
||||
if ($existingBranch) {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName"
|
||||
|
||||
@@ -9,7 +9,7 @@ Mirrors the behavior of scripts/bash/update-agent-context.sh:
|
||||
2. Plan Data Extraction
|
||||
3. Agent File Management (create from template or update existing)
|
||||
4. Content Generation (technology stack, recent changes, timestamp)
|
||||
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, q, agy, bob, qodercli)
|
||||
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, generic)
|
||||
|
||||
.PARAMETER AgentType
|
||||
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
|
||||
@@ -25,7 +25,7 @@ Relies on common helper functions in common.ps1
|
||||
#>
|
||||
param(
|
||||
[Parameter(Position=0)]
|
||||
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','q','agy','bob','qodercli','generic')]
|
||||
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','generic')]
|
||||
[string]$AgentType
|
||||
)
|
||||
|
||||
@@ -58,9 +58,11 @@ $CODEBUDDY_FILE = Join-Path $REPO_ROOT 'CODEBUDDY.md'
|
||||
$QODER_FILE = Join-Path $REPO_ROOT 'QODER.md'
|
||||
$AMP_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
$SHAI_FILE = Join-Path $REPO_ROOT 'SHAI.md'
|
||||
$Q_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
$TABNINE_FILE = Join-Path $REPO_ROOT 'TABNINE.md'
|
||||
$KIRO_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
$AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
|
||||
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
$VIBE_FILE = Join-Path $REPO_ROOT '.vibe/agents/specify-agents.md'
|
||||
|
||||
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
|
||||
|
||||
@@ -399,11 +401,13 @@ function Update-SpecificAgent {
|
||||
'qodercli' { Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI' }
|
||||
'amp' { Update-AgentFile -TargetFile $AMP_FILE -AgentName 'Amp' }
|
||||
'shai' { Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI' }
|
||||
'q' { Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI' }
|
||||
'tabnine' { Update-AgentFile -TargetFile $TABNINE_FILE -AgentName 'Tabnine CLI' }
|
||||
'kiro-cli' { Update-AgentFile -TargetFile $KIRO_FILE -AgentName 'Kiro CLI' }
|
||||
'agy' { Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity' }
|
||||
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
|
||||
'vibe' { Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe' }
|
||||
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
|
||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli|generic'; return $false }
|
||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic'; return $false }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,9 +427,11 @@ function Update-AllExistingAgents {
|
||||
if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $QODER_FILE) { if (-not (Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $SHAI_FILE) { if (-not (Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $Q_FILE) { if (-not (Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $TABNINE_FILE) { if (-not (Update-AgentFile -TargetFile $TABNINE_FILE -AgentName 'Tabnine CLI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $KIRO_FILE) { if (-not (Update-AgentFile -TargetFile $KIRO_FILE -AgentName 'Kiro CLI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $AGY_FILE) { if (-not (Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $VIBE_FILE) { if (-not (Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe')) { $ok = $false }; $found = $true }
|
||||
if (-not $found) {
|
||||
Write-Info 'No existing agent files found, creating default Claude file...'
|
||||
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
|
||||
@@ -440,7 +446,7 @@ function Print-Summary {
|
||||
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
|
||||
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
|
||||
Write-Host ''
|
||||
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli|generic]'
|
||||
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]'
|
||||
}
|
||||
|
||||
function Main {
|
||||
@@ -461,4 +467,3 @@ function Main {
|
||||
}
|
||||
|
||||
Main
|
||||
|
||||
|
||||
@@ -216,11 +216,11 @@ AGENT_CONFIG = {
|
||||
"install_url": None, # IDE-based
|
||||
"requires_cli": False,
|
||||
},
|
||||
"q": {
|
||||
"name": "Amazon Q Developer CLI",
|
||||
"folder": ".amazonq/",
|
||||
"kiro-cli": {
|
||||
"name": "Kiro CLI",
|
||||
"folder": ".kiro/",
|
||||
"commands_subdir": "prompts", # Special: uses prompts/ not commands/
|
||||
"install_url": "https://aws.amazon.com/developer/learning/q-developer-cli/",
|
||||
"install_url": "https://kiro.dev/docs/cli/",
|
||||
"requires_cli": True,
|
||||
},
|
||||
"amp": {
|
||||
@@ -237,6 +237,13 @@ AGENT_CONFIG = {
|
||||
"install_url": "https://github.com/ovh/shai",
|
||||
"requires_cli": True,
|
||||
},
|
||||
"tabnine": {
|
||||
"name": "Tabnine CLI",
|
||||
"folder": ".tabnine/agent/",
|
||||
"commands_subdir": "commands",
|
||||
"install_url": "https://docs.tabnine.com/main/getting-started/tabnine-cli",
|
||||
"requires_cli": True,
|
||||
},
|
||||
"agy": {
|
||||
"name": "Antigravity",
|
||||
"folder": ".agent/",
|
||||
@@ -251,6 +258,13 @@ AGENT_CONFIG = {
|
||||
"install_url": None, # IDE-based
|
||||
"requires_cli": False,
|
||||
},
|
||||
"vibe": {
|
||||
"name": "Mistral Vibe",
|
||||
"folder": ".vibe/",
|
||||
"commands_subdir": "prompts",
|
||||
"install_url": "https://github.com/mistralai/mistral-vibe",
|
||||
"requires_cli": True,
|
||||
},
|
||||
"generic": {
|
||||
"name": "Generic (bring your own agent)",
|
||||
"folder": None, # Set dynamically via --ai-commands-dir
|
||||
@@ -260,6 +274,34 @@ AGENT_CONFIG = {
|
||||
},
|
||||
}
|
||||
|
||||
AI_ASSISTANT_ALIASES = {
|
||||
"kiro": "kiro-cli",
|
||||
}
|
||||
|
||||
def _build_ai_assistant_help() -> str:
|
||||
"""Build the --ai help text from AGENT_CONFIG so it stays in sync with runtime config."""
|
||||
|
||||
non_generic_agents = sorted(agent for agent in AGENT_CONFIG if agent != "generic")
|
||||
base_help = (
|
||||
f"AI assistant to use: {', '.join(non_generic_agents)}, "
|
||||
"or generic (requires --ai-commands-dir)."
|
||||
)
|
||||
|
||||
if not AI_ASSISTANT_ALIASES:
|
||||
return base_help
|
||||
|
||||
alias_phrases = []
|
||||
for alias, target in sorted(AI_ASSISTANT_ALIASES.items()):
|
||||
alias_phrases.append(f"'{alias}' as an alias for '{target}'")
|
||||
|
||||
if len(alias_phrases) == 1:
|
||||
aliases_text = alias_phrases[0]
|
||||
else:
|
||||
aliases_text = ', '.join(alias_phrases[:-1]) + ' and ' + alias_phrases[-1]
|
||||
|
||||
return base_help + " Use " + aliases_text + "."
|
||||
AI_ASSISTANT_HELP = _build_ai_assistant_help()
|
||||
|
||||
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
||||
|
||||
CLAUDE_LOCAL_PATH = Path.home() / ".claude" / "local" / "claude"
|
||||
@@ -534,7 +576,12 @@ def check_tool(tool: str, tracker: StepTracker = None) -> bool:
|
||||
tracker.complete(tool, "available")
|
||||
return True
|
||||
|
||||
found = shutil.which(tool) is not None
|
||||
if tool == "kiro-cli":
|
||||
# Kiro currently supports both executable names. Prefer kiro-cli and
|
||||
# accept kiro as a compatibility fallback.
|
||||
found = shutil.which("kiro-cli") is not None or shutil.which("kiro") is not None
|
||||
else:
|
||||
found = shutil.which(tool) is not None
|
||||
|
||||
if tracker:
|
||||
if found:
|
||||
@@ -1084,7 +1131,7 @@ def install_ai_skills(project_path: Path, selected_ai: str, tracker: StepTracker
|
||||
if not templates_dir.exists() or not any(templates_dir.glob("*.md")):
|
||||
# Fallback: try the repo-relative path (for running from source checkout)
|
||||
# This also covers agents whose extracted commands are in a different
|
||||
# format (e.g. gemini uses .toml, not .md).
|
||||
# format (e.g. gemini/tabnine use .toml, not .md).
|
||||
script_dir = Path(__file__).parent.parent.parent # up from src/specify_cli/
|
||||
fallback_dir = script_dir / "templates" / "commands"
|
||||
if fallback_dir.exists() and any(fallback_dir.glob("*.md")):
|
||||
@@ -1214,7 +1261,7 @@ def install_ai_skills(project_path: Path, selected_ai: str, tracker: StepTracker
|
||||
@app.command()
|
||||
def init(
|
||||
project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here, or use '.' for current directory)"),
|
||||
ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, codebuddy, amp, shai, q, agy, bob, qodercli, or generic (requires --ai-commands-dir)"),
|
||||
ai_assistant: str = typer.Option(None, "--ai", help=AI_ASSISTANT_HELP),
|
||||
ai_commands_dir: str = typer.Option(None, "--ai-commands-dir", help="Directory for agent command files (required with --ai generic, e.g. .myagent/commands/)"),
|
||||
script_type: str = typer.Option(None, "--script", help="Script type to use: sh or ps"),
|
||||
ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"),
|
||||
@@ -1247,6 +1294,7 @@ def init(
|
||||
specify init --here --ai claude # Alternative syntax for current directory
|
||||
specify init --here --ai codex
|
||||
specify init --here --ai codebuddy
|
||||
specify init --here --ai vibe # Initialize with Mistral Vibe support
|
||||
specify init --here
|
||||
specify init --here --force # Skip confirmation when current directory not empty
|
||||
specify init my-project --ai claude --ai-skills # Install agent skills
|
||||
@@ -1270,6 +1318,9 @@ def init(
|
||||
console.print("[yellow]Example:[/yellow] specify init --ai generic --ai-commands-dir .myagent/commands/")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if ai_assistant:
|
||||
ai_assistant = AI_ASSISTANT_ALIASES.get(ai_assistant, ai_assistant)
|
||||
|
||||
if project_name == ".":
|
||||
here = True
|
||||
project_name = None # Clear project_name to use existing validation logic
|
||||
@@ -1464,8 +1515,9 @@ def init(
|
||||
if skills_ok and not here:
|
||||
agent_cfg = AGENT_CONFIG.get(selected_ai, {})
|
||||
agent_folder = agent_cfg.get("folder", "")
|
||||
commands_subdir = agent_cfg.get("commands_subdir", "commands")
|
||||
if agent_folder:
|
||||
cmds_dir = project_path / agent_folder.rstrip("/") / "commands"
|
||||
cmds_dir = project_path / agent_folder.rstrip("/") / commands_subdir
|
||||
if cmds_dir.exists():
|
||||
try:
|
||||
shutil.rmtree(cmds_dir)
|
||||
@@ -1720,6 +1772,13 @@ extension_app = typer.Typer(
|
||||
)
|
||||
app.add_typer(extension_app, name="extension")
|
||||
|
||||
catalog_app = typer.Typer(
|
||||
name="catalog",
|
||||
help="Manage extension catalogs",
|
||||
add_completion=False,
|
||||
)
|
||||
extension_app.add_typer(catalog_app, name="catalog")
|
||||
|
||||
|
||||
def get_speckit_version() -> str:
|
||||
"""Get current spec-kit version."""
|
||||
@@ -1785,6 +1844,181 @@ def extension_list(
|
||||
console.print(" [cyan]specify extension add <name>[/cyan]")
|
||||
|
||||
|
||||
@catalog_app.command("list")
|
||||
def catalog_list():
|
||||
"""List all active extension catalogs."""
|
||||
from .extensions import ExtensionCatalog, ValidationError
|
||||
|
||||
project_root = Path.cwd()
|
||||
|
||||
specify_dir = project_root / ".specify"
|
||||
if not specify_dir.exists():
|
||||
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
|
||||
console.print("Run this command from a spec-kit project root")
|
||||
raise typer.Exit(1)
|
||||
|
||||
catalog = ExtensionCatalog(project_root)
|
||||
|
||||
try:
|
||||
active_catalogs = catalog.get_active_catalogs()
|
||||
except ValidationError as e:
|
||||
console.print(f"[red]Error:[/red] {e}")
|
||||
raise typer.Exit(1)
|
||||
|
||||
console.print("\n[bold cyan]Active Extension Catalogs:[/bold cyan]\n")
|
||||
for entry in active_catalogs:
|
||||
install_str = (
|
||||
"[green]install allowed[/green]"
|
||||
if entry.install_allowed
|
||||
else "[yellow]discovery only[/yellow]"
|
||||
)
|
||||
console.print(f" [bold]{entry.name}[/bold] (priority {entry.priority})")
|
||||
if entry.description:
|
||||
console.print(f" {entry.description}")
|
||||
console.print(f" URL: {entry.url}")
|
||||
console.print(f" Install: {install_str}")
|
||||
console.print()
|
||||
|
||||
config_path = project_root / ".specify" / "extension-catalogs.yml"
|
||||
user_config_path = Path.home() / ".specify" / "extension-catalogs.yml"
|
||||
if os.environ.get("SPECKIT_CATALOG_URL"):
|
||||
console.print("[dim]Catalog configured via SPECKIT_CATALOG_URL environment variable.[/dim]")
|
||||
else:
|
||||
try:
|
||||
proj_loaded = config_path.exists() and catalog._load_catalog_config(config_path) is not None
|
||||
except ValidationError:
|
||||
proj_loaded = False
|
||||
if proj_loaded:
|
||||
console.print(f"[dim]Config: {config_path.relative_to(project_root)}[/dim]")
|
||||
else:
|
||||
try:
|
||||
user_loaded = user_config_path.exists() and catalog._load_catalog_config(user_config_path) is not None
|
||||
except ValidationError:
|
||||
user_loaded = False
|
||||
if user_loaded:
|
||||
console.print("[dim]Config: ~/.specify/extension-catalogs.yml[/dim]")
|
||||
else:
|
||||
console.print("[dim]Using built-in default catalog stack.[/dim]")
|
||||
console.print(
|
||||
"[dim]Add .specify/extension-catalogs.yml to customize.[/dim]"
|
||||
)
|
||||
|
||||
|
||||
@catalog_app.command("add")
|
||||
def catalog_add(
|
||||
url: str = typer.Argument(help="Catalog URL (must use HTTPS)"),
|
||||
name: str = typer.Option(..., "--name", help="Catalog name"),
|
||||
priority: int = typer.Option(10, "--priority", help="Priority (lower = higher priority)"),
|
||||
install_allowed: bool = typer.Option(
|
||||
False, "--install-allowed/--no-install-allowed",
|
||||
help="Allow extensions from this catalog to be installed",
|
||||
),
|
||||
description: str = typer.Option("", "--description", help="Description of the catalog"),
|
||||
):
|
||||
"""Add a catalog to .specify/extension-catalogs.yml."""
|
||||
from .extensions import ExtensionCatalog, ValidationError
|
||||
|
||||
project_root = Path.cwd()
|
||||
|
||||
specify_dir = project_root / ".specify"
|
||||
if not specify_dir.exists():
|
||||
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
|
||||
console.print("Run this command from a spec-kit project root")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Validate URL
|
||||
tmp_catalog = ExtensionCatalog(project_root)
|
||||
try:
|
||||
tmp_catalog._validate_catalog_url(url)
|
||||
except ValidationError as e:
|
||||
console.print(f"[red]Error:[/red] {e}")
|
||||
raise typer.Exit(1)
|
||||
|
||||
config_path = specify_dir / "extension-catalogs.yml"
|
||||
|
||||
# Load existing config
|
||||
if config_path.exists():
|
||||
try:
|
||||
config = yaml.safe_load(config_path.read_text()) or {}
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error:[/red] Failed to read {config_path}: {e}")
|
||||
raise typer.Exit(1)
|
||||
else:
|
||||
config = {}
|
||||
|
||||
catalogs = config.get("catalogs", [])
|
||||
if not isinstance(catalogs, list):
|
||||
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Check for duplicate name
|
||||
for existing in catalogs:
|
||||
if isinstance(existing, dict) and existing.get("name") == name:
|
||||
console.print(f"[yellow]Warning:[/yellow] A catalog named '{name}' already exists.")
|
||||
console.print("Use 'specify extension catalog remove' first, or choose a different name.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
catalogs.append({
|
||||
"name": name,
|
||||
"url": url,
|
||||
"priority": priority,
|
||||
"install_allowed": install_allowed,
|
||||
"description": description,
|
||||
})
|
||||
|
||||
config["catalogs"] = catalogs
|
||||
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
|
||||
|
||||
install_label = "install allowed" if install_allowed else "discovery only"
|
||||
console.print(f"\n[green]✓[/green] Added catalog '[bold]{name}[/bold]' ({install_label})")
|
||||
console.print(f" URL: {url}")
|
||||
console.print(f" Priority: {priority}")
|
||||
console.print(f"\nConfig saved to {config_path.relative_to(project_root)}")
|
||||
|
||||
|
||||
@catalog_app.command("remove")
|
||||
def catalog_remove(
|
||||
name: str = typer.Argument(help="Catalog name to remove"),
|
||||
):
|
||||
"""Remove a catalog from .specify/extension-catalogs.yml."""
|
||||
project_root = Path.cwd()
|
||||
|
||||
specify_dir = project_root / ".specify"
|
||||
if not specify_dir.exists():
|
||||
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
|
||||
console.print("Run this command from a spec-kit project root")
|
||||
raise typer.Exit(1)
|
||||
|
||||
config_path = specify_dir / "extension-catalogs.yml"
|
||||
if not config_path.exists():
|
||||
console.print("[red]Error:[/red] No catalog config found. Nothing to remove.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
config = yaml.safe_load(config_path.read_text()) or {}
|
||||
except Exception:
|
||||
console.print("[red]Error:[/red] Failed to read catalog config.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
catalogs = config.get("catalogs", [])
|
||||
if not isinstance(catalogs, list):
|
||||
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
|
||||
raise typer.Exit(1)
|
||||
original_count = len(catalogs)
|
||||
catalogs = [c for c in catalogs if isinstance(c, dict) and c.get("name") != name]
|
||||
|
||||
if len(catalogs) == original_count:
|
||||
console.print(f"[red]Error:[/red] Catalog '{name}' not found.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
config["catalogs"] = catalogs
|
||||
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
|
||||
|
||||
console.print(f"[green]✓[/green] Removed catalog '{name}'")
|
||||
if not catalogs:
|
||||
console.print("\n[dim]No catalogs remain in config. Built-in defaults will be used.[/dim]")
|
||||
|
||||
|
||||
@extension_app.command("add")
|
||||
def extension_add(
|
||||
extension: str = typer.Argument(help="Extension name or path"),
|
||||
@@ -1873,6 +2107,19 @@ def extension_add(
|
||||
console.print(" specify extension search")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Enforce install_allowed policy
|
||||
if not ext_info.get("_install_allowed", True):
|
||||
catalog_name = ext_info.get("_catalog_name", "community")
|
||||
console.print(
|
||||
f"[red]Error:[/red] '{extension}' is available in the "
|
||||
f"'{catalog_name}' catalog but installation is not allowed from that catalog."
|
||||
)
|
||||
console.print(
|
||||
f"\nTo enable installation, add '{extension}' to an approved catalog "
|
||||
f"(install_allowed: true) in .specify/extension-catalogs.yml."
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Download extension ZIP
|
||||
console.print(f"Downloading {ext_info['name']} v{ext_info.get('version', 'unknown')}...")
|
||||
zip_path = catalog.download_extension(extension)
|
||||
@@ -2017,6 +2264,15 @@ def extension_search(
|
||||
tags_str = ", ".join(ext['tags'])
|
||||
console.print(f" [dim]Tags:[/dim] {tags_str}")
|
||||
|
||||
# Source catalog
|
||||
catalog_name = ext.get("_catalog_name", "")
|
||||
install_allowed = ext.get("_install_allowed", True)
|
||||
if catalog_name:
|
||||
if install_allowed:
|
||||
console.print(f" [dim]Catalog:[/dim] {catalog_name}")
|
||||
else:
|
||||
console.print(f" [dim]Catalog:[/dim] {catalog_name} [yellow](discovery only — not installable)[/yellow]")
|
||||
|
||||
# Stats
|
||||
stats = []
|
||||
if ext.get('downloads') is not None:
|
||||
@@ -2030,8 +2286,15 @@ def extension_search(
|
||||
if ext.get('repository'):
|
||||
console.print(f" [dim]Repository:[/dim] {ext['repository']}")
|
||||
|
||||
# Install command
|
||||
console.print(f"\n [cyan]Install:[/cyan] specify extension add {ext['id']}")
|
||||
# Install command (show warning if not installable)
|
||||
if install_allowed:
|
||||
console.print(f"\n [cyan]Install:[/cyan] specify extension add {ext['id']}")
|
||||
else:
|
||||
console.print(f"\n [yellow]⚠[/yellow] Not directly installable from '{catalog_name}'.")
|
||||
console.print(
|
||||
f" Add to an approved catalog with install_allowed: true, "
|
||||
f"or install from a ZIP URL: specify extension add {ext['id']} --from <zip-url>"
|
||||
)
|
||||
console.print()
|
||||
|
||||
except ExtensionError as e:
|
||||
@@ -2080,6 +2343,12 @@ def extension_info(
|
||||
# Author and License
|
||||
console.print(f"[dim]Author:[/dim] {ext_info.get('author', 'Unknown')}")
|
||||
console.print(f"[dim]License:[/dim] {ext_info.get('license', 'Unknown')}")
|
||||
|
||||
# Source catalog
|
||||
if ext_info.get("_catalog_name"):
|
||||
install_allowed = ext_info.get("_install_allowed", True)
|
||||
install_note = "" if install_allowed else " [yellow](discovery only)[/yellow]"
|
||||
console.print(f"[dim]Source catalog:[/dim] {ext_info['_catalog_name']}{install_note}")
|
||||
console.print()
|
||||
|
||||
# Requirements
|
||||
@@ -2136,12 +2405,21 @@ def extension_info(
|
||||
|
||||
# Installation status and command
|
||||
is_installed = manager.registry.is_installed(ext_info['id'])
|
||||
install_allowed = ext_info.get("_install_allowed", True)
|
||||
if is_installed:
|
||||
console.print("[green]✓ Installed[/green]")
|
||||
console.print(f"\nTo remove: specify extension remove {ext_info['id']}")
|
||||
else:
|
||||
elif install_allowed:
|
||||
console.print("[yellow]Not installed[/yellow]")
|
||||
console.print(f"\n[cyan]Install:[/cyan] specify extension add {ext_info['id']}")
|
||||
else:
|
||||
catalog_name = ext_info.get("_catalog_name", "community")
|
||||
console.print("[yellow]Not installed[/yellow]")
|
||||
console.print(
|
||||
f"\n[yellow]⚠[/yellow] '{ext_info['id']}' is available in the '{catalog_name}' catalog "
|
||||
f"but not in your approved catalog. Add it to .specify/extension-catalogs.yml "
|
||||
f"with install_allowed: true to enable installation."
|
||||
)
|
||||
|
||||
except ExtensionError as e:
|
||||
console.print(f"\n[red]Error:[/red] {e}")
|
||||
@@ -2350,4 +2628,3 @@ def main():
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
@@ -8,9 +8,11 @@ without bloating the core framework.
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
import os
|
||||
import tempfile
|
||||
import zipfile
|
||||
import shutil
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, List, Any
|
||||
from datetime import datetime, timezone
|
||||
@@ -36,6 +38,16 @@ class CompatibilityError(ExtensionError):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class CatalogEntry:
|
||||
"""Represents a single catalog entry in the catalog stack."""
|
||||
url: str
|
||||
name: str
|
||||
priority: int
|
||||
install_allowed: bool
|
||||
description: str = ""
|
||||
|
||||
|
||||
class ExtensionManifest:
|
||||
"""Represents and validates an extension manifest (extension.yml)."""
|
||||
|
||||
@@ -455,6 +467,12 @@ class ExtensionManager:
|
||||
if cmd_file.exists():
|
||||
cmd_file.unlink()
|
||||
|
||||
# Also remove companion .prompt.md for Copilot
|
||||
if agent_name == "copilot":
|
||||
prompt_file = self.project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
|
||||
if prompt_file.exists():
|
||||
prompt_file.unlink()
|
||||
|
||||
if keep_config:
|
||||
# Preserve config files, only remove non-config files
|
||||
if extension_dir.exists():
|
||||
@@ -597,7 +615,7 @@ class CommandRegistrar:
|
||||
"dir": ".github/agents",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
"extension": ".agent.md"
|
||||
},
|
||||
"cursor": {
|
||||
"dir": ".cursor/commands",
|
||||
@@ -653,8 +671,8 @@ class CommandRegistrar:
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
},
|
||||
"q": {
|
||||
"dir": ".amazonq/prompts",
|
||||
"kiro-cli": {
|
||||
"dir": ".kiro/prompts",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
@@ -671,6 +689,12 @@ class CommandRegistrar:
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
},
|
||||
"tabnine": {
|
||||
"dir": ".tabnine/agent/commands",
|
||||
"format": "toml",
|
||||
"args": "{{args}}",
|
||||
"extension": ".toml"
|
||||
},
|
||||
"bob": {
|
||||
"dir": ".bob/commands",
|
||||
"format": "markdown",
|
||||
@@ -871,16 +895,40 @@ class CommandRegistrar:
|
||||
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||
dest_file.write_text(output)
|
||||
|
||||
# Generate companion .prompt.md for Copilot agents
|
||||
if agent_name == "copilot":
|
||||
self._write_copilot_prompt(project_root, cmd_name)
|
||||
|
||||
registered.append(cmd_name)
|
||||
|
||||
# Register aliases
|
||||
for alias in cmd_info.get("aliases", []):
|
||||
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
|
||||
alias_file.write_text(output)
|
||||
# Generate companion .prompt.md for alias too
|
||||
if agent_name == "copilot":
|
||||
self._write_copilot_prompt(project_root, alias)
|
||||
registered.append(alias)
|
||||
|
||||
return registered
|
||||
|
||||
@staticmethod
|
||||
def _write_copilot_prompt(project_root: Path, cmd_name: str) -> None:
|
||||
"""Generate a companion .prompt.md file for a Copilot agent command.
|
||||
|
||||
Copilot requires a .prompt.md file in .github/prompts/ that references
|
||||
the corresponding .agent.md file in .github/agents/ via an ``agent:``
|
||||
frontmatter field.
|
||||
|
||||
Args:
|
||||
project_root: Path to project root
|
||||
cmd_name: Command name (used as the file stem, e.g. 'speckit.my-ext.example')
|
||||
"""
|
||||
prompts_dir = project_root / ".github" / "prompts"
|
||||
prompts_dir.mkdir(parents=True, exist_ok=True)
|
||||
prompt_file = prompts_dir / f"{cmd_name}.prompt.md"
|
||||
prompt_file.write_text(f"---\nagent: {cmd_name}\n---\n")
|
||||
|
||||
def register_commands_for_all_agents(
|
||||
self,
|
||||
manifest: ExtensionManifest,
|
||||
@@ -940,6 +988,7 @@ class ExtensionCatalog:
|
||||
"""Manages extension catalog fetching, caching, and searching."""
|
||||
|
||||
DEFAULT_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
|
||||
COMMUNITY_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
|
||||
CACHE_DURATION = 3600 # 1 hour in seconds
|
||||
|
||||
def __init__(self, project_root: Path):
|
||||
@@ -954,43 +1003,109 @@ class ExtensionCatalog:
|
||||
self.cache_file = self.cache_dir / "catalog.json"
|
||||
self.cache_metadata_file = self.cache_dir / "catalog-metadata.json"
|
||||
|
||||
def get_catalog_url(self) -> str:
|
||||
"""Get catalog URL from config or use default.
|
||||
def _validate_catalog_url(self, url: str) -> None:
|
||||
"""Validate that a catalog URL uses HTTPS (localhost HTTP allowed).
|
||||
|
||||
Checks in order:
|
||||
1. SPECKIT_CATALOG_URL environment variable
|
||||
2. Default catalog URL
|
||||
|
||||
Returns:
|
||||
URL to fetch catalog from
|
||||
Args:
|
||||
url: URL to validate
|
||||
|
||||
Raises:
|
||||
ValidationError: If custom URL is invalid (non-HTTPS)
|
||||
ValidationError: If URL is invalid or uses non-HTTPS scheme
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Environment variable override (useful for testing)
|
||||
parsed = urlparse(url)
|
||||
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
|
||||
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
|
||||
raise ValidationError(
|
||||
f"Catalog URL must use HTTPS (got {parsed.scheme}://). "
|
||||
"HTTP is only allowed for localhost."
|
||||
)
|
||||
if not parsed.netloc:
|
||||
raise ValidationError("Catalog URL must be a valid URL with a host.")
|
||||
|
||||
def _load_catalog_config(self, config_path: Path) -> Optional[List[CatalogEntry]]:
|
||||
"""Load catalog stack configuration from a YAML file.
|
||||
|
||||
Args:
|
||||
config_path: Path to extension-catalogs.yml
|
||||
|
||||
Returns:
|
||||
Ordered list of CatalogEntry objects, or None if file doesn't exist
|
||||
or contains no valid catalog entries.
|
||||
|
||||
Raises:
|
||||
ValidationError: If any catalog entry has an invalid URL,
|
||||
the file cannot be parsed, or a priority value is invalid.
|
||||
"""
|
||||
if not config_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = yaml.safe_load(config_path.read_text()) or {}
|
||||
except (yaml.YAMLError, OSError) as e:
|
||||
raise ValidationError(
|
||||
f"Failed to read catalog config {config_path}: {e}"
|
||||
)
|
||||
catalogs_data = data.get("catalogs", [])
|
||||
if not catalogs_data:
|
||||
return None
|
||||
if not isinstance(catalogs_data, list):
|
||||
raise ValidationError(
|
||||
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
||||
)
|
||||
entries: List[CatalogEntry] = []
|
||||
for idx, item in enumerate(catalogs_data):
|
||||
if not isinstance(item, dict):
|
||||
raise ValidationError(
|
||||
f"Invalid catalog entry at index {idx}: expected a mapping, got {type(item).__name__}"
|
||||
)
|
||||
url = str(item.get("url", "")).strip()
|
||||
if not url:
|
||||
continue
|
||||
self._validate_catalog_url(url)
|
||||
try:
|
||||
priority = int(item.get("priority", idx + 1))
|
||||
except (TypeError, ValueError):
|
||||
raise ValidationError(
|
||||
f"Invalid priority for catalog '{item.get('name', idx + 1)}': "
|
||||
f"expected integer, got {item.get('priority')!r}"
|
||||
)
|
||||
raw_install = item.get("install_allowed", False)
|
||||
if isinstance(raw_install, str):
|
||||
install_allowed = raw_install.strip().lower() in ("true", "yes", "1")
|
||||
else:
|
||||
install_allowed = bool(raw_install)
|
||||
entries.append(CatalogEntry(
|
||||
url=url,
|
||||
name=str(item.get("name", f"catalog-{idx + 1}")),
|
||||
priority=priority,
|
||||
install_allowed=install_allowed,
|
||||
description=str(item.get("description", "")),
|
||||
))
|
||||
entries.sort(key=lambda e: e.priority)
|
||||
return entries if entries else None
|
||||
|
||||
def get_active_catalogs(self) -> List[CatalogEntry]:
|
||||
"""Get the ordered list of active catalogs.
|
||||
|
||||
Resolution order:
|
||||
1. SPECKIT_CATALOG_URL env var — single catalog replacing all defaults
|
||||
2. Project-level .specify/extension-catalogs.yml
|
||||
3. User-level ~/.specify/extension-catalogs.yml
|
||||
4. Built-in default stack (default + community)
|
||||
|
||||
Returns:
|
||||
List of CatalogEntry objects sorted by priority (ascending)
|
||||
|
||||
Raises:
|
||||
ValidationError: If a catalog URL is invalid
|
||||
"""
|
||||
import sys
|
||||
|
||||
# 1. SPECKIT_CATALOG_URL env var replaces all defaults for backward compat
|
||||
if env_value := os.environ.get("SPECKIT_CATALOG_URL"):
|
||||
catalog_url = env_value.strip()
|
||||
parsed = urlparse(catalog_url)
|
||||
|
||||
# Require HTTPS for security (prevent man-in-the-middle attacks)
|
||||
# Allow http://localhost for local development/testing
|
||||
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
|
||||
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
|
||||
raise ValidationError(
|
||||
f"Invalid SPECKIT_CATALOG_URL: must use HTTPS (got {parsed.scheme}://). "
|
||||
"HTTP is only allowed for localhost."
|
||||
)
|
||||
|
||||
if not parsed.netloc:
|
||||
raise ValidationError(
|
||||
"Invalid SPECKIT_CATALOG_URL: must be a valid URL with a host."
|
||||
)
|
||||
|
||||
# Warn users when using a non-default catalog (once per instance)
|
||||
self._validate_catalog_url(catalog_url)
|
||||
if catalog_url != self.DEFAULT_CATALOG_URL:
|
||||
if not getattr(self, "_non_default_catalog_warning_shown", False):
|
||||
print(
|
||||
@@ -999,11 +1114,163 @@ class ExtensionCatalog:
|
||||
file=sys.stderr,
|
||||
)
|
||||
self._non_default_catalog_warning_shown = True
|
||||
return [CatalogEntry(url=catalog_url, name="custom", priority=1, install_allowed=True, description="Custom catalog via SPECKIT_CATALOG_URL")]
|
||||
|
||||
return catalog_url
|
||||
# 2. Project-level config overrides all defaults
|
||||
project_config_path = self.project_root / ".specify" / "extension-catalogs.yml"
|
||||
catalogs = self._load_catalog_config(project_config_path)
|
||||
if catalogs is not None:
|
||||
return catalogs
|
||||
|
||||
# TODO: Support custom catalogs from .specify/extension-catalogs.yml
|
||||
return self.DEFAULT_CATALOG_URL
|
||||
# 3. User-level config
|
||||
user_config_path = Path.home() / ".specify" / "extension-catalogs.yml"
|
||||
catalogs = self._load_catalog_config(user_config_path)
|
||||
if catalogs is not None:
|
||||
return catalogs
|
||||
|
||||
# 4. Built-in default stack
|
||||
return [
|
||||
CatalogEntry(url=self.DEFAULT_CATALOG_URL, name="default", priority=1, install_allowed=True, description="Built-in catalog of installable extensions"),
|
||||
CatalogEntry(url=self.COMMUNITY_CATALOG_URL, name="community", priority=2, install_allowed=False, description="Community-contributed extensions (discovery only)"),
|
||||
]
|
||||
|
||||
def get_catalog_url(self) -> str:
|
||||
"""Get the primary catalog URL.
|
||||
|
||||
Returns the URL of the highest-priority catalog. Kept for backward
|
||||
compatibility. Use get_active_catalogs() for full multi-catalog support.
|
||||
|
||||
Returns:
|
||||
URL of the primary catalog
|
||||
|
||||
Raises:
|
||||
ValidationError: If a catalog URL is invalid
|
||||
"""
|
||||
active = self.get_active_catalogs()
|
||||
return active[0].url if active else self.DEFAULT_CATALOG_URL
|
||||
|
||||
def _fetch_single_catalog(self, entry: CatalogEntry, force_refresh: bool = False) -> Dict[str, Any]:
|
||||
"""Fetch a single catalog with per-URL caching.
|
||||
|
||||
For the DEFAULT_CATALOG_URL, uses legacy cache files (self.cache_file /
|
||||
self.cache_metadata_file) for backward compatibility. For all other URLs,
|
||||
uses URL-hash-based cache files in self.cache_dir.
|
||||
|
||||
Args:
|
||||
entry: CatalogEntry describing the catalog to fetch
|
||||
force_refresh: If True, bypass cache
|
||||
|
||||
Returns:
|
||||
Catalog data dictionary
|
||||
|
||||
Raises:
|
||||
ExtensionError: If catalog cannot be fetched or has invalid format
|
||||
"""
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
# Determine cache file paths (backward compat for default catalog)
|
||||
if entry.url == self.DEFAULT_CATALOG_URL:
|
||||
cache_file = self.cache_file
|
||||
cache_meta_file = self.cache_metadata_file
|
||||
is_valid = not force_refresh and self.is_cache_valid()
|
||||
else:
|
||||
url_hash = hashlib.sha256(entry.url.encode()).hexdigest()[:16]
|
||||
cache_file = self.cache_dir / f"catalog-{url_hash}.json"
|
||||
cache_meta_file = self.cache_dir / f"catalog-{url_hash}-metadata.json"
|
||||
is_valid = False
|
||||
if not force_refresh and cache_file.exists() and cache_meta_file.exists():
|
||||
try:
|
||||
metadata = json.loads(cache_meta_file.read_text())
|
||||
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
|
||||
if cached_at.tzinfo is None:
|
||||
cached_at = cached_at.replace(tzinfo=timezone.utc)
|
||||
age = (datetime.now(timezone.utc) - cached_at).total_seconds()
|
||||
is_valid = age < self.CACHE_DURATION
|
||||
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
|
||||
# If metadata is invalid or missing expected fields, treat cache as invalid
|
||||
pass
|
||||
|
||||
# Use cache if valid
|
||||
if is_valid:
|
||||
try:
|
||||
return json.loads(cache_file.read_text())
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Fetch from network
|
||||
try:
|
||||
with urllib.request.urlopen(entry.url, timeout=10) as response:
|
||||
catalog_data = json.loads(response.read())
|
||||
|
||||
if "schema_version" not in catalog_data or "extensions" not in catalog_data:
|
||||
raise ExtensionError(f"Invalid catalog format from {entry.url}")
|
||||
|
||||
# Save to cache
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
cache_file.write_text(json.dumps(catalog_data, indent=2))
|
||||
cache_meta_file.write_text(json.dumps({
|
||||
"cached_at": datetime.now(timezone.utc).isoformat(),
|
||||
"catalog_url": entry.url,
|
||||
}, indent=2))
|
||||
|
||||
return catalog_data
|
||||
|
||||
except urllib.error.URLError as e:
|
||||
raise ExtensionError(f"Failed to fetch catalog from {entry.url}: {e}")
|
||||
except json.JSONDecodeError as e:
|
||||
raise ExtensionError(f"Invalid JSON in catalog from {entry.url}: {e}")
|
||||
|
||||
def _get_merged_extensions(self, force_refresh: bool = False) -> List[Dict[str, Any]]:
|
||||
"""Fetch and merge extensions from all active catalogs.
|
||||
|
||||
Higher-priority (lower priority number) catalogs win on conflicts
|
||||
(same extension id in two catalogs). Each extension dict is annotated with:
|
||||
- _catalog_name: name of the source catalog
|
||||
- _install_allowed: whether installation is allowed from this catalog
|
||||
|
||||
Catalogs that fail to fetch are skipped. Raises ExtensionError only if
|
||||
ALL catalogs fail.
|
||||
|
||||
Args:
|
||||
force_refresh: If True, bypass all caches
|
||||
|
||||
Returns:
|
||||
List of merged extension dicts
|
||||
|
||||
Raises:
|
||||
ExtensionError: If all catalogs fail to fetch
|
||||
"""
|
||||
import sys
|
||||
|
||||
active_catalogs = self.get_active_catalogs()
|
||||
merged: Dict[str, Dict[str, Any]] = {}
|
||||
any_success = False
|
||||
|
||||
for catalog_entry in active_catalogs:
|
||||
try:
|
||||
catalog_data = self._fetch_single_catalog(catalog_entry, force_refresh)
|
||||
any_success = True
|
||||
except ExtensionError as e:
|
||||
print(
|
||||
f"Warning: Could not fetch catalog '{catalog_entry.name}': {e}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
continue
|
||||
|
||||
for ext_id, ext_data in catalog_data.get("extensions", {}).items():
|
||||
if ext_id not in merged: # Higher-priority catalog wins
|
||||
merged[ext_id] = {
|
||||
**ext_data,
|
||||
"id": ext_id,
|
||||
"_catalog_name": catalog_entry.name,
|
||||
"_install_allowed": catalog_entry.install_allowed,
|
||||
}
|
||||
|
||||
if not any_success and active_catalogs:
|
||||
raise ExtensionError("Failed to fetch any extension catalog")
|
||||
|
||||
return list(merged.values())
|
||||
|
||||
def is_cache_valid(self) -> bool:
|
||||
"""Check if cached catalog is still valid.
|
||||
@@ -1017,9 +1284,11 @@ class ExtensionCatalog:
|
||||
try:
|
||||
metadata = json.loads(self.cache_metadata_file.read_text())
|
||||
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
|
||||
if cached_at.tzinfo is None:
|
||||
cached_at = cached_at.replace(tzinfo=timezone.utc)
|
||||
age_seconds = (datetime.now(timezone.utc) - cached_at).total_seconds()
|
||||
return age_seconds < self.CACHE_DURATION
|
||||
except (json.JSONDecodeError, ValueError, KeyError):
|
||||
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
|
||||
return False
|
||||
|
||||
def fetch_catalog(self, force_refresh: bool = False) -> Dict[str, Any]:
|
||||
@@ -1080,7 +1349,7 @@ class ExtensionCatalog:
|
||||
author: Optional[str] = None,
|
||||
verified_only: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Search catalog for extensions.
|
||||
"""Search catalog for extensions across all active catalogs.
|
||||
|
||||
Args:
|
||||
query: Search query (searches name, description, tags)
|
||||
@@ -1089,14 +1358,16 @@ class ExtensionCatalog:
|
||||
verified_only: If True, show only verified extensions
|
||||
|
||||
Returns:
|
||||
List of matching extension metadata
|
||||
List of matching extension metadata, each annotated with
|
||||
``_catalog_name`` and ``_install_allowed`` from its source catalog.
|
||||
"""
|
||||
catalog = self.fetch_catalog()
|
||||
extensions = catalog.get("extensions", {})
|
||||
all_extensions = self._get_merged_extensions()
|
||||
|
||||
results = []
|
||||
|
||||
for ext_id, ext_data in extensions.items():
|
||||
for ext_data in all_extensions:
|
||||
ext_id = ext_data["id"]
|
||||
|
||||
# Apply filters
|
||||
if verified_only and not ext_data.get("verified", False):
|
||||
continue
|
||||
@@ -1122,25 +1393,26 @@ class ExtensionCatalog:
|
||||
if query_lower not in searchable_text:
|
||||
continue
|
||||
|
||||
results.append({"id": ext_id, **ext_data})
|
||||
results.append(ext_data)
|
||||
|
||||
return results
|
||||
|
||||
def get_extension_info(self, extension_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get detailed information about a specific extension.
|
||||
|
||||
Searches all active catalogs in priority order.
|
||||
|
||||
Args:
|
||||
extension_id: ID of the extension
|
||||
|
||||
Returns:
|
||||
Extension metadata or None if not found
|
||||
Extension metadata (annotated with ``_catalog_name`` and
|
||||
``_install_allowed``) or None if not found.
|
||||
"""
|
||||
catalog = self.fetch_catalog()
|
||||
extensions = catalog.get("extensions", {})
|
||||
|
||||
if extension_id in extensions:
|
||||
return {"id": extension_id, **extensions[extension_id]}
|
||||
|
||||
all_extensions = self._get_merged_extensions()
|
||||
for ext_data in all_extensions:
|
||||
if ext_data["id"] == extension_id:
|
||||
return ext_data
|
||||
return None
|
||||
|
||||
def download_extension(self, extension_id: str, target_dir: Optional[Path] = None) -> Path:
|
||||
@@ -1200,11 +1472,18 @@ class ExtensionCatalog:
|
||||
raise ExtensionError(f"Failed to save extension ZIP: {e}")
|
||||
|
||||
def clear_cache(self):
|
||||
"""Clear the catalog cache."""
|
||||
"""Clear the catalog cache (both legacy and URL-hash-based files)."""
|
||||
if self.cache_file.exists():
|
||||
self.cache_file.unlink()
|
||||
if self.cache_metadata_file.exists():
|
||||
self.cache_metadata_file.unlink()
|
||||
# Also clear any per-URL hash-based cache files
|
||||
if self.cache_dir.exists():
|
||||
for extra_cache in self.cache_dir.glob("catalog-*.json"):
|
||||
if extra_cache != self.cache_file:
|
||||
extra_cache.unlink(missing_ok=True)
|
||||
for extra_meta in self.cache_dir.glob("catalog-*-metadata.json"):
|
||||
extra_meta.unlink(missing_ok=True)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
@@ -1782,4 +2061,3 @@ class HookExecutor:
|
||||
|
||||
self.save_project_config(config)
|
||||
|
||||
|
||||
|
||||
@@ -94,9 +94,10 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- Generate unique checklist filename:
|
||||
- Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`)
|
||||
- Format: `[domain].md`
|
||||
- If file exists, append to existing file
|
||||
- Number items sequentially starting from CHK001
|
||||
- Each `/speckit.checklist` run creates a NEW file (never overwrites existing checklists)
|
||||
- File handling behavior:
|
||||
- If file does NOT exist: Create new file and number items starting from CHK001
|
||||
- If file exists: Append new items to existing file, continuing from the last CHK ID (e.g., if last item is CHK015, start new items at CHK016)
|
||||
- Never delete or replace existing checklist content - always preserve and append
|
||||
|
||||
**CORE PRINCIPLE - Test the Requirements, Not the Implementation**:
|
||||
Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for:
|
||||
@@ -208,13 +209,13 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
6. **Structure Reference**: Generate the checklist following the canonical template in `templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### <requirement item>` lines with globally incrementing IDs starting at CHK001.
|
||||
|
||||
7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize:
|
||||
7. **Report**: Output full path to checklist file, item count, and summarize whether the run created a new file or appended to an existing one. Summarize:
|
||||
- Focus areas selected
|
||||
- Depth level
|
||||
- Actor/timing
|
||||
- Any explicit user-specified must-have items incorporated
|
||||
|
||||
**Important**: Each `/speckit.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows:
|
||||
**Important**: Each `/speckit.checklist` command invocation uses a short, descriptive checklist filename and either creates a new file or appends to an existing one. This allows:
|
||||
|
||||
- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`)
|
||||
- Simple, memorable filenames that indicate checklist purpose
|
||||
|
||||
@@ -89,7 +89,7 @@ Execution steps:
|
||||
- Information is better deferred to planning phase (note internally)
|
||||
|
||||
3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
|
||||
- Maximum of 10 total questions across the whole session.
|
||||
- Maximum of 5 total questions across the whole session.
|
||||
- Each question must be answerable with EITHER:
|
||||
- A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR
|
||||
- A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words").
|
||||
|
||||
@@ -13,6 +13,40 @@ $ARGUMENTS
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Pre-Execution Checks
|
||||
|
||||
**Check for extension hooks (before implementation)**:
|
||||
- Check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.before_implement` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Pre-Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Pre-Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
|
||||
Wait for the result of the hook command before proceeding to the Outline.
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
## Outline
|
||||
|
||||
1. Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
@@ -88,7 +122,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
|
||||
- **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
|
||||
- **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
|
||||
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `Makefile`, `config.log`, `.idea/`, `*.log`, `.env*`
|
||||
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `*.dll`, `autom4te.cache/`, `config.status`, `config.log`, `.idea/`, `*.log`, `.env*`
|
||||
- **Swift**: `.build/`, `DerivedData/`, `*.swiftpm/`, `Packages/`
|
||||
- **R**: `.Rproj.user/`, `.Rhistory`, `.RData`, `.Ruserdata`, `*.Rproj`, `packrat/`, `renv/`
|
||||
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
|
||||
@@ -136,3 +170,32 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- Report final status with summary of completed work
|
||||
|
||||
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit.tasks` first to regenerate the task list.
|
||||
|
||||
10. **Check for extension hooks**: After completion validation, check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.after_implement` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
@@ -9,8 +9,8 @@ handoffs:
|
||||
prompt: Clarify specification requirements
|
||||
send: true
|
||||
scripts:
|
||||
sh: scripts/bash/create-new-feature.sh --json "{ARGS}"
|
||||
ps: scripts/powershell/create-new-feature.ps1 -Json "{ARGS}"
|
||||
sh: scripts/bash/create-new-feature.sh "{ARGS}"
|
||||
ps: scripts/powershell/create-new-feature.ps1 "{ARGS}"
|
||||
---
|
||||
|
||||
## User Input
|
||||
@@ -39,33 +39,14 @@ Given that feature description, do this:
|
||||
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||
|
||||
2. **Check for existing branches before creating new one**:
|
||||
2. **Create the feature branch** by running the script with `--short-name` (and `--json`), and do NOT pass `--number` (the script auto-detects the next globally available number across all branches and spec directories):
|
||||
|
||||
a. First, fetch all remote branches to ensure we have the latest information:
|
||||
|
||||
```bash
|
||||
git fetch --all --prune
|
||||
```
|
||||
|
||||
b. Find the highest feature number across all sources for the short-name:
|
||||
- Remote branches: `git ls-remote --heads origin | grep -E 'refs/heads/[0-9]+-<short-name>$'`
|
||||
- Local branches: `git branch | grep -E '^[* ]*[0-9]+-<short-name>$'`
|
||||
- Specs directories: Check for directories matching `specs/[0-9]+-<short-name>`
|
||||
|
||||
c. Determine the next available number:
|
||||
- Extract all numbers from all three sources
|
||||
- Find the highest number N
|
||||
- Use N+1 for the new branch number
|
||||
|
||||
d. Run the script `{SCRIPT}` with the calculated number and short-name:
|
||||
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
|
||||
- Bash example: `{SCRIPT} --json --number 5 --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `{SCRIPT} -Json -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||
- Bash example: `{SCRIPT} --json --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `{SCRIPT} -Json -ShortName "user-auth" "Add user authentication"`
|
||||
|
||||
**IMPORTANT**:
|
||||
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
|
||||
- Only match branches/directories with the exact short-name pattern
|
||||
- If no existing branches/directories found with this short-name, start with number 1
|
||||
- Do NOT pass `--number` — the script determines the correct next number automatically
|
||||
- Always include the JSON flag (`--json` for Bash, `-Json` for PowerShell) so the output can be parsed reliably
|
||||
- You must only ever run this script once per feature
|
||||
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
|
||||
- The JSON output will contain BRANCH_NAME and SPEC_FILE paths
|
||||
|
||||
@@ -22,6 +22,40 @@ $ARGUMENTS
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Pre-Execution Checks
|
||||
|
||||
**Check for extension hooks (before tasks generation)**:
|
||||
- Check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.before_tasks` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Pre-Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Pre-Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
|
||||
Wait for the result of the hook command before proceeding to the Outline.
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
## Outline
|
||||
|
||||
1. **Setup**: Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
@@ -63,6 +97,35 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- Suggested MVP scope (typically just User Story 1)
|
||||
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
|
||||
|
||||
6. **Check for extension hooks**: After tasks.md is generated, check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.after_tasks` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
Context for task generation: {ARGS}
|
||||
|
||||
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
|
||||
|
||||
34
tests/hooks/.specify/extensions.yml
Normal file
34
tests/hooks/.specify/extensions.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
hooks:
|
||||
before_implement:
|
||||
- id: pre_test
|
||||
enabled: true
|
||||
optional: false
|
||||
extension: "test-extension"
|
||||
command: "pre_implement_test"
|
||||
description: "Test before implement hook execution"
|
||||
|
||||
after_implement:
|
||||
- id: post_test
|
||||
enabled: true
|
||||
optional: true
|
||||
extension: "test-extension"
|
||||
command: "post_implement_test"
|
||||
description: "Test after implement hook execution"
|
||||
prompt: "Would you like to run the post-implement test?"
|
||||
|
||||
before_tasks:
|
||||
- id: pre_tasks_test
|
||||
enabled: true
|
||||
optional: false
|
||||
extension: "test-extension"
|
||||
command: "pre_tasks_test"
|
||||
description: "Test before tasks hook execution"
|
||||
|
||||
after_tasks:
|
||||
- id: post_tasks_test
|
||||
enabled: true
|
||||
optional: true
|
||||
extension: "test-extension"
|
||||
command: "post_tasks_test"
|
||||
description: "Test after tasks hook execution"
|
||||
prompt: "Would you like to run the post-tasks test?"
|
||||
30
tests/hooks/TESTING.md
Normal file
30
tests/hooks/TESTING.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Testing Extension Hooks
|
||||
|
||||
This directory contains a mock project to verify that LLM agents correctly identify and execute hook commands defined in `.specify/extensions.yml`.
|
||||
|
||||
## Test 1: Testing `before_tasks` and `after_tasks`
|
||||
|
||||
1. Open a chat with an LLM (like GitHub Copilot) in this project.
|
||||
2. Ask it to generate tasks for the current directory:
|
||||
> "Please follow `/speckit.tasks` for the `./tests/hooks` directory."
|
||||
3. **Expected Behavior**:
|
||||
- Before doing any generation, the LLM should notice the `AUTOMATIC Pre-Hook` in `.specify/extensions.yml` under `before_tasks`.
|
||||
- It should state it is executing `EXECUTE_COMMAND: pre_tasks_test`.
|
||||
- It should then proceed to read the `.md` docs and produce a `tasks.md`.
|
||||
- After generation, it should output the optional `after_tasks` hook (`post_tasks_test`) block, asking if you want to run it.
|
||||
|
||||
## Test 2: Testing `before_implement` and `after_implement`
|
||||
|
||||
*(Requires `tasks.md` from Test 1 to exist)*
|
||||
|
||||
1. In the same (or new) chat, ask the LLM to implement the tasks:
|
||||
> "Please follow `/speckit.implement` for the `./tests/hooks` directory."
|
||||
2. **Expected Behavior**:
|
||||
- The LLM should first check for `before_implement` hooks.
|
||||
- It should state it is executing `EXECUTE_COMMAND: pre_implement_test` BEFORE doing any actual task execution.
|
||||
- It should evaluate the checklists and execute the code writing tasks.
|
||||
- Upon completion, it should output the optional `after_implement` hook (`post_implement_test`) block.
|
||||
|
||||
## How it works
|
||||
|
||||
The templates for these commands in `templates/commands/tasks.md` and `templates/commands/implement.md` contains strict ordered lists. The new `before_*` hooks are explicitly formulated in a **Pre-Execution Checks** section prior to the outline to ensure they're evaluated first without breaking template step numbers.
|
||||
3
tests/hooks/plan.md
Normal file
3
tests/hooks/plan.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Test Setup for Hooks
|
||||
|
||||
This feature is designed to test if LLMs correctly invoke Spec Kit extensions hooks when generating tasks and implementing code.
|
||||
1
tests/hooks/spec.md
Normal file
1
tests/hooks/spec.md
Normal file
@@ -0,0 +1 @@
|
||||
- **User Story 1:** I want a test script that prints "Hello hooks!".
|
||||
1
tests/hooks/tasks.md
Normal file
1
tests/hooks/tasks.md
Normal file
@@ -0,0 +1 @@
|
||||
- [ ] T001 [US1] Create script that prints 'Hello hooks!' in hello.py
|
||||
166
tests/test_agent_config_consistency.py
Normal file
166
tests/test_agent_config_consistency.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Consistency checks for agent configuration across runtime and packaging scripts."""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from specify_cli import AGENT_CONFIG, AI_ASSISTANT_ALIASES, AI_ASSISTANT_HELP
|
||||
from specify_cli.extensions import CommandRegistrar
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
||||
class TestAgentConfigConsistency:
|
||||
"""Ensure kiro-cli migration stays synchronized across key surfaces."""
|
||||
|
||||
def test_runtime_config_uses_kiro_cli_and_removes_q(self):
|
||||
"""AGENT_CONFIG should include kiro-cli and exclude legacy q."""
|
||||
assert "kiro-cli" in AGENT_CONFIG
|
||||
assert AGENT_CONFIG["kiro-cli"]["folder"] == ".kiro/"
|
||||
assert AGENT_CONFIG["kiro-cli"]["commands_subdir"] == "prompts"
|
||||
assert "q" not in AGENT_CONFIG
|
||||
|
||||
def test_extension_registrar_uses_kiro_cli_and_removes_q(self):
|
||||
"""Extension command registrar should target .kiro/prompts."""
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
assert "kiro-cli" in cfg
|
||||
assert cfg["kiro-cli"]["dir"] == ".kiro/prompts"
|
||||
assert "q" not in cfg
|
||||
|
||||
def test_release_agent_lists_include_kiro_cli_and_exclude_q(self):
|
||||
"""Bash and PowerShell release scripts should agree on agent key set for Kiro."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
||||
assert sh_match is not None
|
||||
sh_agents = sh_match.group(1).split()
|
||||
|
||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
||||
assert ps_match is not None
|
||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
||||
|
||||
assert "kiro-cli" in sh_agents
|
||||
assert "kiro-cli" in ps_agents
|
||||
assert "shai" in sh_agents
|
||||
assert "shai" in ps_agents
|
||||
assert "agy" in sh_agents
|
||||
assert "agy" in ps_agents
|
||||
assert "q" not in sh_agents
|
||||
assert "q" not in ps_agents
|
||||
|
||||
def test_release_ps_switch_has_shai_and_agy_generation(self):
|
||||
"""PowerShell release builder must generate files for shai and agy agents."""
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
||||
assert re.search(r"'agy'\s*\{.*?\.agent/workflows", ps_text, re.S) is not None
|
||||
|
||||
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
||||
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
||||
assert "roo" in AI_ASSISTANT_HELP
|
||||
for alias, target in AI_ASSISTANT_ALIASES.items():
|
||||
assert alias in AI_ASSISTANT_HELP
|
||||
assert target in AI_ASSISTANT_HELP
|
||||
|
||||
def test_devcontainer_kiro_installer_uses_pinned_checksum(self):
|
||||
"""Devcontainer installer should always verify Kiro installer via pinned SHA256."""
|
||||
post_create_text = (REPO_ROOT / ".devcontainer" / "post-create.sh").read_text(encoding="utf-8")
|
||||
|
||||
assert 'KIRO_INSTALLER_SHA256="7487a65cf310b7fb59b357c4b5e6e3f3259d383f4394ecedb39acf70f307cffb"' in post_create_text
|
||||
assert "sha256sum -c -" in post_create_text
|
||||
assert "KIRO_SKIP_KIRO_INSTALLER_VERIFY" not in post_create_text
|
||||
|
||||
def test_release_output_targets_kiro_prompt_dir(self):
|
||||
"""Packaging and release scripts should no longer emit amazonq artifacts."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
||||
|
||||
assert ".kiro/prompts" in sh_text
|
||||
assert ".kiro/prompts" in ps_text
|
||||
assert ".amazonq/prompts" not in sh_text
|
||||
assert ".amazonq/prompts" not in ps_text
|
||||
|
||||
assert "spec-kit-template-kiro-cli-sh-" in gh_release_text
|
||||
assert "spec-kit-template-kiro-cli-ps-" in gh_release_text
|
||||
assert "spec-kit-template-q-sh-" not in gh_release_text
|
||||
assert "spec-kit-template-q-ps-" not in gh_release_text
|
||||
|
||||
def test_agent_context_scripts_use_kiro_cli(self):
|
||||
"""Agent context scripts should advertise kiro-cli and not legacy q agent key."""
|
||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert "kiro-cli" in bash_text
|
||||
assert "kiro-cli" in pwsh_text
|
||||
assert "Amazon Q Developer CLI" not in bash_text
|
||||
assert "Amazon Q Developer CLI" not in pwsh_text
|
||||
|
||||
# --- Tabnine CLI consistency checks ---
|
||||
|
||||
def test_runtime_config_includes_tabnine(self):
|
||||
"""AGENT_CONFIG should include tabnine with correct folder and subdir."""
|
||||
assert "tabnine" in AGENT_CONFIG
|
||||
assert AGENT_CONFIG["tabnine"]["folder"] == ".tabnine/agent/"
|
||||
assert AGENT_CONFIG["tabnine"]["commands_subdir"] == "commands"
|
||||
assert AGENT_CONFIG["tabnine"]["requires_cli"] is True
|
||||
assert AGENT_CONFIG["tabnine"]["install_url"] is not None
|
||||
|
||||
def test_extension_registrar_includes_tabnine(self):
|
||||
"""CommandRegistrar.AGENT_CONFIGS should include tabnine with correct TOML config."""
|
||||
from specify_cli.extensions import CommandRegistrar
|
||||
|
||||
assert "tabnine" in CommandRegistrar.AGENT_CONFIGS
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS["tabnine"]
|
||||
assert cfg["dir"] == ".tabnine/agent/commands"
|
||||
assert cfg["format"] == "toml"
|
||||
assert cfg["args"] == "{{args}}"
|
||||
assert cfg["extension"] == ".toml"
|
||||
|
||||
def test_release_agent_lists_include_tabnine(self):
|
||||
"""Bash and PowerShell release scripts should include tabnine in agent lists."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
||||
assert sh_match is not None
|
||||
sh_agents = sh_match.group(1).split()
|
||||
|
||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
||||
assert ps_match is not None
|
||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
||||
|
||||
assert "tabnine" in sh_agents
|
||||
assert "tabnine" in ps_agents
|
||||
|
||||
def test_release_scripts_generate_tabnine_toml_commands(self):
|
||||
"""Release scripts should generate TOML commands for tabnine in .tabnine/agent/commands."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert ".tabnine/agent/commands" in sh_text
|
||||
assert ".tabnine/agent/commands" in ps_text
|
||||
assert re.search(r"'tabnine'\s*\{.*?\.tabnine/agent/commands", ps_text, re.S) is not None
|
||||
|
||||
def test_github_release_includes_tabnine_packages(self):
|
||||
"""GitHub release script should include tabnine template packages."""
|
||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
||||
|
||||
assert "spec-kit-template-tabnine-sh-" in gh_release_text
|
||||
assert "spec-kit-template-tabnine-ps-" in gh_release_text
|
||||
|
||||
def test_agent_context_scripts_include_tabnine(self):
|
||||
"""Agent context scripts should support tabnine agent type."""
|
||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert "tabnine" in bash_text
|
||||
assert "TABNINE_FILE" in bash_text
|
||||
assert "tabnine" in pwsh_text
|
||||
assert "TABNINE_FILE" in pwsh_text
|
||||
|
||||
def test_ai_help_includes_tabnine(self):
|
||||
"""CLI help text for --ai should include tabnine."""
|
||||
assert "tabnine" in AI_ASSISTANT_HELP
|
||||
@@ -147,6 +147,11 @@ class TestGetSkillsDir:
|
||||
result = _get_skills_dir(project_dir, "gemini")
|
||||
assert result == project_dir / ".gemini" / "skills"
|
||||
|
||||
def test_tabnine_skills_dir(self, project_dir):
|
||||
"""Tabnine should use .tabnine/agent/skills/."""
|
||||
result = _get_skills_dir(project_dir, "tabnine")
|
||||
assert result == project_dir / ".tabnine" / "agent" / "skills"
|
||||
|
||||
def test_copilot_skills_dir(self, project_dir):
|
||||
"""Copilot should use .github/skills/."""
|
||||
result = _get_skills_dir(project_dir, "copilot")
|
||||
@@ -162,6 +167,11 @@ class TestGetSkillsDir:
|
||||
result = _get_skills_dir(project_dir, "cursor-agent")
|
||||
assert result == project_dir / ".cursor" / "skills"
|
||||
|
||||
def test_kiro_cli_skills_dir(self, project_dir):
|
||||
"""Kiro CLI should use .kiro/skills/."""
|
||||
result = _get_skills_dir(project_dir, "kiro-cli")
|
||||
assert result == project_dir / ".kiro" / "skills"
|
||||
|
||||
def test_unknown_agent_uses_default(self, project_dir):
|
||||
"""Unknown agents should fall back to DEFAULT_SKILLS_DIR."""
|
||||
result = _get_skills_dir(project_dir, "nonexistent-agent")
|
||||
@@ -460,8 +470,9 @@ class TestNewProjectCommandSkip:
|
||||
"""Simulate template extraction: create agent commands dir."""
|
||||
agent_cfg = AGENT_CONFIG.get(agent, {})
|
||||
agent_folder = agent_cfg.get("folder", "")
|
||||
commands_subdir = agent_cfg.get("commands_subdir", "commands")
|
||||
if agent_folder:
|
||||
cmds_dir = project_path / agent_folder.rstrip("/") / "commands"
|
||||
cmds_dir = project_path / agent_folder.rstrip("/") / commands_subdir
|
||||
cmds_dir.mkdir(parents=True, exist_ok=True)
|
||||
(cmds_dir / "speckit.specify.md").write_text("# spec")
|
||||
|
||||
@@ -483,6 +494,7 @@ class TestNewProjectCommandSkip:
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||
result = runner.invoke(app, ["init", str(target), "--ai", "claude", "--ai-skills", "--script", "sh", "--no-git"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Skills should have been called
|
||||
mock_skills.assert_called_once()
|
||||
|
||||
@@ -490,6 +502,30 @@ class TestNewProjectCommandSkip:
|
||||
cmds_dir = target / ".claude" / "commands"
|
||||
assert not cmds_dir.exists()
|
||||
|
||||
def test_new_project_nonstandard_commands_subdir_removed_after_skills_succeed(self, tmp_path):
|
||||
"""For non-standard agents, configured commands_subdir should be removed on success."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "new-kiro-proj"
|
||||
|
||||
def fake_download(project_path, *args, **kwargs):
|
||||
self._fake_extract("kiro-cli", project_path)
|
||||
|
||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.install_ai_skills", return_value=True) as mock_skills, \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||
result = runner.invoke(app, ["init", str(target), "--ai", "kiro-cli", "--ai-skills", "--script", "sh", "--no-git"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_skills.assert_called_once()
|
||||
|
||||
prompts_dir = target / ".kiro" / "prompts"
|
||||
assert not prompts_dir.exists()
|
||||
|
||||
def test_commands_preserved_when_skills_fail(self, tmp_path):
|
||||
"""If skills fail, commands should NOT be removed (safety net)."""
|
||||
from typer.testing import CliRunner
|
||||
@@ -508,6 +544,7 @@ class TestNewProjectCommandSkip:
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||
result = runner.invoke(app, ["init", str(target), "--ai", "claude", "--ai-skills", "--script", "sh", "--no-git"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Commands should still exist since skills failed
|
||||
cmds_dir = target / ".claude" / "commands"
|
||||
assert cmds_dir.exists()
|
||||
@@ -538,8 +575,9 @@ class TestNewProjectCommandSkip:
|
||||
patch("specify_cli.install_ai_skills", return_value=True), \
|
||||
patch("specify_cli.is_git_repo", return_value=True), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||
result = runner.invoke(app, ["init", "--here", "--ai", "claude", "--ai-skills", "--script", "sh", "--no-git"])
|
||||
result = runner.invoke(app, ["init", "--here", "--ai", "claude", "--ai-skills", "--script", "sh", "--no-git"], input="y\n")
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Commands must remain for --here
|
||||
assert cmds_dir.exists()
|
||||
assert (cmds_dir / "speckit.specify.md").exists()
|
||||
@@ -631,6 +669,42 @@ class TestCliValidation:
|
||||
assert "--ai-skills" in plain
|
||||
assert "agent skills" in plain.lower()
|
||||
|
||||
def test_kiro_alias_normalized_to_kiro_cli(self, tmp_path):
|
||||
"""--ai kiro should normalize to canonical kiro-cli agent key."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "kiro-alias-proj"
|
||||
|
||||
with patch("specify_cli.download_and_extract_template") as mock_download, \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"init",
|
||||
str(target),
|
||||
"--ai",
|
||||
"kiro",
|
||||
"--ignore-agent-tools",
|
||||
"--script",
|
||||
"sh",
|
||||
"--no-git",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert mock_download.called
|
||||
# download_and_extract_template(project_path, ai_assistant, script_type, ...)
|
||||
assert mock_download.call_args.args[1] == "kiro-cli"
|
||||
|
||||
def test_q_removed_from_agent_config(self):
|
||||
"""Amazon Q legacy key should not remain in AGENT_CONFIG."""
|
||||
assert "q" not in AGENT_CONFIG
|
||||
assert "kiro-cli" in AGENT_CONFIG
|
||||
|
||||
|
||||
class TestParameterOrderingIssue:
|
||||
"""Test fix for GitHub issue #1641: parameter ordering issues."""
|
||||
|
||||
@@ -6,6 +6,7 @@ Tests cover:
|
||||
- Extension registry operations
|
||||
- Extension manager installation/removal
|
||||
- Command registration
|
||||
- Catalog stack (multi-catalog support)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
@@ -16,6 +17,7 @@ from pathlib import Path
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from specify_cli.extensions import (
|
||||
CatalogEntry,
|
||||
ExtensionManifest,
|
||||
ExtensionRegistry,
|
||||
ExtensionManager,
|
||||
@@ -399,6 +401,12 @@ class TestExtensionManager:
|
||||
class TestCommandRegistrar:
|
||||
"""Test CommandRegistrar command registration."""
|
||||
|
||||
def test_kiro_cli_agent_config_present(self):
|
||||
"""Kiro CLI should be mapped to .kiro/prompts and legacy q removed."""
|
||||
assert "kiro-cli" in CommandRegistrar.AGENT_CONFIGS
|
||||
assert CommandRegistrar.AGENT_CONFIGS["kiro-cli"]["dir"] == ".kiro/prompts"
|
||||
assert "q" not in CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
def test_parse_frontmatter_valid(self):
|
||||
"""Test parsing valid YAML frontmatter."""
|
||||
content = """---
|
||||
@@ -520,6 +528,121 @@ $ARGUMENTS
|
||||
assert (claude_dir / "speckit.alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.shortcut.md").exists()
|
||||
|
||||
def test_register_commands_for_copilot(self, extension_dir, project_dir):
|
||||
"""Test registering commands for Copilot agent with .agent.md extension."""
|
||||
# Create .github/agents directory (Copilot project)
|
||||
agents_dir = project_dir / ".github" / "agents"
|
||||
agents_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(extension_dir / "extension.yml")
|
||||
|
||||
registrar = CommandRegistrar()
|
||||
registered = registrar.register_commands_for_agent(
|
||||
"copilot", manifest, extension_dir, project_dir
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test.hello" in registered
|
||||
|
||||
# Verify command file uses .agent.md extension
|
||||
cmd_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify NO plain .md file was created
|
||||
plain_md_file = agents_dir / "speckit.test.hello.md"
|
||||
assert not plain_md_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
assert "description: Test hello command" in content
|
||||
assert "<!-- Extension: test-ext -->" in content
|
||||
|
||||
def test_copilot_companion_prompt_created(self, extension_dir, project_dir):
|
||||
"""Test that companion .prompt.md files are created in .github/prompts/."""
|
||||
agents_dir = project_dir / ".github" / "agents"
|
||||
agents_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(extension_dir / "extension.yml")
|
||||
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent(
|
||||
"copilot", manifest, extension_dir, project_dir
|
||||
)
|
||||
|
||||
# Verify companion .prompt.md file exists
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
assert prompt_file.exists()
|
||||
|
||||
# Verify content has correct agent frontmatter
|
||||
content = prompt_file.read_text()
|
||||
assert content == "---\nagent: speckit.test.hello\n---\n"
|
||||
|
||||
def test_copilot_aliases_get_companion_prompts(self, project_dir, temp_dir):
|
||||
"""Test that aliases also get companion .prompt.md files for Copilot."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "ext-alias-copilot"
|
||||
ext_dir.mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-alias-copilot",
|
||||
"name": "Extension with Alias",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias-copilot.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut-copilot"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "cmd.md").write_text(
|
||||
"---\ndescription: Test\n---\n\nTest"
|
||||
)
|
||||
|
||||
# Set up Copilot project
|
||||
(project_dir / ".github" / "agents").mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registered = registrar.register_commands_for_agent(
|
||||
"copilot", manifest, ext_dir, project_dir
|
||||
)
|
||||
|
||||
assert len(registered) == 2
|
||||
|
||||
# Both primary and alias get companion .prompt.md
|
||||
prompts_dir = project_dir / ".github" / "prompts"
|
||||
assert (prompts_dir / "speckit.alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.shortcut-copilot.prompt.md").exists()
|
||||
|
||||
def test_non_copilot_agent_no_companion_file(self, extension_dir, project_dir):
|
||||
"""Test that non-copilot agents do NOT create .prompt.md files."""
|
||||
claude_dir = project_dir / ".claude" / "commands"
|
||||
claude_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(extension_dir / "extension.yml")
|
||||
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent(
|
||||
"claude", manifest, extension_dir, project_dir
|
||||
)
|
||||
|
||||
# No .github/prompts directory should exist
|
||||
prompts_dir = project_dir / ".github" / "prompts"
|
||||
assert not prompts_dir.exists()
|
||||
|
||||
|
||||
# ===== Utility Function Tests =====
|
||||
|
||||
@@ -596,6 +719,31 @@ class TestIntegration:
|
||||
assert not cmd_file.exists()
|
||||
assert len(manager.list_installed()) == 0
|
||||
|
||||
def test_copilot_cleanup_removes_prompt_files(self, extension_dir, project_dir):
|
||||
"""Test that removing a Copilot extension also removes .prompt.md files."""
|
||||
agents_dir = project_dir / ".github" / "agents"
|
||||
agents_dir.mkdir(parents=True)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=True)
|
||||
|
||||
# Verify copilot was detected and registered
|
||||
metadata = manager.registry.get("test-ext")
|
||||
assert "copilot" in metadata["registered_commands"]
|
||||
|
||||
# Verify files exist before cleanup
|
||||
agent_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
assert agent_file.exists()
|
||||
assert prompt_file.exists()
|
||||
|
||||
# Use the extension manager to remove — exercises the copilot prompt cleanup code
|
||||
result = manager.remove("test-ext")
|
||||
assert result is True
|
||||
|
||||
assert not agent_file.exists()
|
||||
assert not prompt_file.exists()
|
||||
|
||||
def test_multiple_extensions(self, temp_dir, project_dir):
|
||||
"""Test installing multiple extensions."""
|
||||
import yaml
|
||||
@@ -734,10 +882,29 @@ class TestExtensionCatalog:
|
||||
|
||||
def test_search_all_extensions(self, temp_dir):
|
||||
"""Test searching all extensions without filters."""
|
||||
import yaml as yaml_module
|
||||
|
||||
project_dir = temp_dir / "project"
|
||||
project_dir.mkdir()
|
||||
(project_dir / ".specify").mkdir()
|
||||
|
||||
# Use a single-catalog config so community extensions don't interfere
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump(
|
||||
{
|
||||
"catalogs": [
|
||||
{
|
||||
"name": "test-catalog",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
]
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
# Create mock catalog
|
||||
@@ -783,10 +950,29 @@ class TestExtensionCatalog:
|
||||
|
||||
def test_search_by_query(self, temp_dir):
|
||||
"""Test searching by query text."""
|
||||
import yaml as yaml_module
|
||||
|
||||
project_dir = temp_dir / "project"
|
||||
project_dir.mkdir()
|
||||
(project_dir / ".specify").mkdir()
|
||||
|
||||
# Use a single-catalog config so community extensions don't interfere
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump(
|
||||
{
|
||||
"catalogs": [
|
||||
{
|
||||
"name": "test-catalog",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
]
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
# Create mock catalog
|
||||
@@ -828,10 +1014,29 @@ class TestExtensionCatalog:
|
||||
|
||||
def test_search_by_tag(self, temp_dir):
|
||||
"""Test searching by tag."""
|
||||
import yaml as yaml_module
|
||||
|
||||
project_dir = temp_dir / "project"
|
||||
project_dir.mkdir()
|
||||
(project_dir / ".specify").mkdir()
|
||||
|
||||
# Use a single-catalog config so community extensions don't interfere
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump(
|
||||
{
|
||||
"catalogs": [
|
||||
{
|
||||
"name": "test-catalog",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
]
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
# Create mock catalog
|
||||
@@ -880,10 +1085,29 @@ class TestExtensionCatalog:
|
||||
|
||||
def test_search_verified_only(self, temp_dir):
|
||||
"""Test searching verified extensions only."""
|
||||
import yaml as yaml_module
|
||||
|
||||
project_dir = temp_dir / "project"
|
||||
project_dir.mkdir()
|
||||
(project_dir / ".specify").mkdir()
|
||||
|
||||
# Use a single-catalog config so community extensions don't interfere
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump(
|
||||
{
|
||||
"catalogs": [
|
||||
{
|
||||
"name": "test-catalog",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
]
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
# Create mock catalog
|
||||
@@ -925,10 +1149,29 @@ class TestExtensionCatalog:
|
||||
|
||||
def test_get_extension_info(self, temp_dir):
|
||||
"""Test getting specific extension info."""
|
||||
import yaml as yaml_module
|
||||
|
||||
project_dir = temp_dir / "project"
|
||||
project_dir.mkdir()
|
||||
(project_dir / ".specify").mkdir()
|
||||
|
||||
# Use a single-catalog config so community extensions don't interfere
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump(
|
||||
{
|
||||
"catalogs": [
|
||||
{
|
||||
"name": "test-catalog",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
]
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
# Create mock catalog
|
||||
@@ -987,3 +1230,371 @@ class TestExtensionCatalog:
|
||||
|
||||
assert not catalog.cache_file.exists()
|
||||
assert not catalog.cache_metadata_file.exists()
|
||||
|
||||
|
||||
# ===== CatalogEntry Tests =====
|
||||
|
||||
class TestCatalogEntry:
|
||||
"""Test CatalogEntry dataclass."""
|
||||
|
||||
def test_catalog_entry_creation(self):
|
||||
"""Test creating a CatalogEntry."""
|
||||
entry = CatalogEntry(
|
||||
url="https://example.com/catalog.json",
|
||||
name="test",
|
||||
priority=1,
|
||||
install_allowed=True,
|
||||
)
|
||||
assert entry.url == "https://example.com/catalog.json"
|
||||
assert entry.name == "test"
|
||||
assert entry.priority == 1
|
||||
assert entry.install_allowed is True
|
||||
|
||||
|
||||
# ===== Catalog Stack Tests =====
|
||||
|
||||
class TestCatalogStack:
|
||||
"""Test multi-catalog stack support."""
|
||||
|
||||
def _make_project(self, temp_dir: Path) -> Path:
|
||||
"""Create a minimal spec-kit project directory."""
|
||||
project_dir = temp_dir / "project"
|
||||
project_dir.mkdir()
|
||||
(project_dir / ".specify").mkdir()
|
||||
return project_dir
|
||||
|
||||
def _write_catalog_config(self, project_dir: Path, catalogs: list) -> None:
|
||||
"""Write extension-catalogs.yml to project .specify dir."""
|
||||
import yaml as yaml_module
|
||||
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump({"catalogs": catalogs}, f)
|
||||
|
||||
def _write_valid_cache(
|
||||
self, catalog: ExtensionCatalog, extensions: dict, url: str = "http://test.com"
|
||||
) -> None:
|
||||
"""Populate the primary cache file with mock extension data."""
|
||||
catalog_data = {"schema_version": "1.0", "extensions": extensions}
|
||||
catalog.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
catalog.cache_file.write_text(json.dumps(catalog_data))
|
||||
catalog.cache_metadata_file.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"cached_at": datetime.now(timezone.utc).isoformat(),
|
||||
"catalog_url": url,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
# --- get_active_catalogs ---
|
||||
|
||||
def test_default_stack(self, temp_dir):
|
||||
"""Default stack includes default and community catalogs."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
entries = catalog.get_active_catalogs()
|
||||
|
||||
assert len(entries) == 2
|
||||
assert entries[0].url == ExtensionCatalog.DEFAULT_CATALOG_URL
|
||||
assert entries[0].name == "default"
|
||||
assert entries[0].priority == 1
|
||||
assert entries[0].install_allowed is True
|
||||
assert entries[1].url == ExtensionCatalog.COMMUNITY_CATALOG_URL
|
||||
assert entries[1].name == "community"
|
||||
assert entries[1].priority == 2
|
||||
assert entries[1].install_allowed is False
|
||||
|
||||
def test_env_var_overrides_default_stack(self, temp_dir, monkeypatch):
|
||||
"""SPECKIT_CATALOG_URL replaces the entire default stack."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
custom_url = "https://example.com/catalog.json"
|
||||
monkeypatch.setenv("SPECKIT_CATALOG_URL", custom_url)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
entries = catalog.get_active_catalogs()
|
||||
|
||||
assert len(entries) == 1
|
||||
assert entries[0].url == custom_url
|
||||
assert entries[0].install_allowed is True
|
||||
|
||||
def test_env_var_invalid_url_raises(self, temp_dir, monkeypatch):
|
||||
"""SPECKIT_CATALOG_URL with http:// (non-localhost) raises ValidationError."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
monkeypatch.setenv("SPECKIT_CATALOG_URL", "http://example.com/catalog.json")
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
with pytest.raises(ValidationError, match="HTTPS"):
|
||||
catalog.get_active_catalogs()
|
||||
|
||||
def test_project_config_overrides_defaults(self, temp_dir):
|
||||
"""Project-level extension-catalogs.yml overrides default stack."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "custom",
|
||||
"url": "https://example.com/catalog.json",
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
entries = catalog.get_active_catalogs()
|
||||
|
||||
assert len(entries) == 1
|
||||
assert entries[0].url == "https://example.com/catalog.json"
|
||||
assert entries[0].name == "custom"
|
||||
|
||||
def test_project_config_sorted_by_priority(self, temp_dir):
|
||||
"""Catalog entries are sorted by priority (ascending)."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "secondary",
|
||||
"url": "https://example.com/secondary.json",
|
||||
"priority": 5,
|
||||
"install_allowed": False,
|
||||
},
|
||||
{
|
||||
"name": "primary",
|
||||
"url": "https://example.com/primary.json",
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
entries = catalog.get_active_catalogs()
|
||||
|
||||
assert len(entries) == 2
|
||||
assert entries[0].name == "primary"
|
||||
assert entries[1].name == "secondary"
|
||||
|
||||
def test_project_config_invalid_url_raises(self, temp_dir):
|
||||
"""Project config with HTTP (non-localhost) URL raises ValidationError."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "bad",
|
||||
"url": "http://example.com/catalog.json",
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
with pytest.raises(ValidationError, match="HTTPS"):
|
||||
catalog.get_active_catalogs()
|
||||
|
||||
def test_empty_project_config_falls_back_to_defaults(self, temp_dir):
|
||||
"""Empty catalogs list in config falls back to default stack."""
|
||||
import yaml as yaml_module
|
||||
|
||||
project_dir = self._make_project(temp_dir)
|
||||
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||
with open(config_path, "w") as f:
|
||||
yaml_module.dump({"catalogs": []}, f)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
entries = catalog.get_active_catalogs()
|
||||
|
||||
# Falls back to default stack
|
||||
assert len(entries) == 2
|
||||
assert entries[0].url == ExtensionCatalog.DEFAULT_CATALOG_URL
|
||||
|
||||
# --- _load_catalog_config ---
|
||||
|
||||
def test_load_catalog_config_missing_file(self, temp_dir):
|
||||
"""Returns None when config file doesn't exist."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
result = catalog._load_catalog_config(project_dir / ".specify" / "nonexistent.yml")
|
||||
assert result is None
|
||||
|
||||
def test_load_catalog_config_localhost_allowed(self, temp_dir):
|
||||
"""Localhost HTTP URLs are allowed in config."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "local",
|
||||
"url": "http://localhost:8000/catalog.json",
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
entries = catalog.get_active_catalogs()
|
||||
|
||||
assert len(entries) == 1
|
||||
assert entries[0].url == "http://localhost:8000/catalog.json"
|
||||
|
||||
# --- Merge conflict resolution ---
|
||||
|
||||
def test_merge_conflict_higher_priority_wins(self, temp_dir):
|
||||
"""When same extension id is in two catalogs, higher priority wins."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
|
||||
# Write project config with two catalogs
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "primary",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
},
|
||||
{
|
||||
"name": "secondary",
|
||||
"url": ExtensionCatalog.COMMUNITY_CATALOG_URL,
|
||||
"priority": 2,
|
||||
"install_allowed": False,
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
|
||||
# Write primary cache with jira v2.0.0
|
||||
primary_data = {
|
||||
"schema_version": "1.0",
|
||||
"extensions": {
|
||||
"jira": {
|
||||
"name": "Jira Integration",
|
||||
"id": "jira",
|
||||
"version": "2.0.0",
|
||||
"description": "Primary Jira",
|
||||
}
|
||||
},
|
||||
}
|
||||
catalog.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
catalog.cache_file.write_text(json.dumps(primary_data))
|
||||
catalog.cache_metadata_file.write_text(
|
||||
json.dumps({"cached_at": datetime.now(timezone.utc).isoformat(), "catalog_url": "http://test.com"})
|
||||
)
|
||||
|
||||
# Write secondary cache (URL-hash-based) with jira v1.0.0 (should lose)
|
||||
import hashlib
|
||||
|
||||
url_hash = hashlib.sha256(ExtensionCatalog.COMMUNITY_CATALOG_URL.encode()).hexdigest()[:16]
|
||||
secondary_cache = catalog.cache_dir / f"catalog-{url_hash}.json"
|
||||
secondary_meta = catalog.cache_dir / f"catalog-{url_hash}-metadata.json"
|
||||
secondary_data = {
|
||||
"schema_version": "1.0",
|
||||
"extensions": {
|
||||
"jira": {
|
||||
"name": "Jira Integration Community",
|
||||
"id": "jira",
|
||||
"version": "1.0.0",
|
||||
"description": "Community Jira",
|
||||
},
|
||||
"linear": {
|
||||
"name": "Linear",
|
||||
"id": "linear",
|
||||
"version": "0.9.0",
|
||||
"description": "Linear from secondary",
|
||||
},
|
||||
},
|
||||
}
|
||||
secondary_cache.write_text(json.dumps(secondary_data))
|
||||
secondary_meta.write_text(
|
||||
json.dumps({"cached_at": datetime.now(timezone.utc).isoformat(), "catalog_url": ExtensionCatalog.COMMUNITY_CATALOG_URL})
|
||||
)
|
||||
|
||||
results = catalog.search()
|
||||
jira_results = [r for r in results if r["id"] == "jira"]
|
||||
assert len(jira_results) == 1
|
||||
# Primary catalog wins
|
||||
assert jira_results[0]["version"] == "2.0.0"
|
||||
assert jira_results[0]["_catalog_name"] == "primary"
|
||||
assert jira_results[0]["_install_allowed"] is True
|
||||
|
||||
# linear comes from secondary
|
||||
linear_results = [r for r in results if r["id"] == "linear"]
|
||||
assert len(linear_results) == 1
|
||||
assert linear_results[0]["_catalog_name"] == "secondary"
|
||||
assert linear_results[0]["_install_allowed"] is False
|
||||
|
||||
def test_install_allowed_false_from_get_extension_info(self, temp_dir):
|
||||
"""get_extension_info includes _install_allowed from source catalog."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
|
||||
# Single catalog that is install_allowed=False
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "discovery",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
self._write_valid_cache(
|
||||
catalog,
|
||||
{
|
||||
"jira": {
|
||||
"name": "Jira Integration",
|
||||
"id": "jira",
|
||||
"version": "1.0.0",
|
||||
"description": "Jira integration",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
info = catalog.get_extension_info("jira")
|
||||
assert info is not None
|
||||
assert info["_install_allowed"] is False
|
||||
assert info["_catalog_name"] == "discovery"
|
||||
|
||||
def test_search_results_include_catalog_metadata(self, temp_dir):
|
||||
"""Search results include _catalog_name and _install_allowed."""
|
||||
project_dir = self._make_project(temp_dir)
|
||||
self._write_catalog_config(
|
||||
project_dir,
|
||||
[
|
||||
{
|
||||
"name": "org",
|
||||
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
|
||||
"priority": 1,
|
||||
"install_allowed": True,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
catalog = ExtensionCatalog(project_dir)
|
||||
self._write_valid_cache(
|
||||
catalog,
|
||||
{
|
||||
"jira": {
|
||||
"name": "Jira Integration",
|
||||
"id": "jira",
|
||||
"version": "1.0.0",
|
||||
"description": "Jira integration",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
results = catalog.search()
|
||||
assert len(results) == 1
|
||||
assert results[0]["_catalog_name"] == "org"
|
||||
assert results[0]["_install_allowed"] is True
|
||||
|
||||
Reference in New Issue
Block a user