Compare commits

..

2 Commits

Author SHA1 Message Date
Manfred Riem
9b10d440b1 Potential fix for pull request finding
Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com>
2026-03-13 15:14:46 -05:00
github-actions[bot]
2656ee1df7 chore: bump version to 0.3.0 2026-03-13 20:11:58 +00:00
183 changed files with 4188 additions and 17043 deletions

View File

@@ -51,14 +51,6 @@ echo -e "\n🤖 Installing OpenCode CLI..."
run_command "npm install -g opencode-ai@latest"
echo "✅ Done"
echo -e "\n🤖 Installing Junie CLI..."
run_command "npm install -g @jetbrains/junie-cli@latest"
echo "✅ Done"
echo -e "\n🤖 Installing Pi Coding Agent..."
run_command "npm install -g @mariozechner/pi-coding-agent@latest"
echo "✅ Done"
echo -e "\n🤖 Installing Kiro CLI..."
# https://kiro.dev/docs/cli/
KIRO_INSTALLER_URL="https://kiro.dev/install.sh"

View File

@@ -8,7 +8,7 @@ body:
value: |
Thanks for requesting a new agent! Before submitting, please check if the agent is already supported.
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, Tabnine CLI, Antigravity, IBM Bob, Mistral Vibe, Kimi Code, Trae, Pi Coding Agent, iFlow CLI
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, IBM Bob, Antigravity
- type: input
id: agent-name

View File

@@ -7,7 +7,7 @@ contact_links:
url: https://github.com/github/spec-kit/blob/main/README.md
about: Read the Spec Kit documentation and guides
- name: 🛠️ Extension Development Guide
url: https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-DEVELOPMENT-GUIDE.md
url: https://github.com/manfredseee/spec-kit/blob/main/extensions/EXTENSION-DEVELOPMENT-GUIDE.md
about: Learn how to develop and publish Spec Kit extensions
- name: 🤝 Contributing Guide
url: https://github.com/github/spec-kit/blob/main/CONTRIBUTING.md

View File

@@ -12,7 +12,7 @@ body:
- Review the [Extension Publishing Guide](https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-PUBLISHING-GUIDE.md)
- Ensure your extension has a valid `extension.yml` manifest
- Create a GitHub release with a version tag (e.g., v1.0.0)
- Test installation: `specify extension add <extension-name> --from <your-release-url>`
- Test installation: `specify extension add --from <your-release-url>`
- type: input
id: extension-id
@@ -229,7 +229,7 @@ body:
placeholder: |
```bash
# Install extension
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
# Use a command
/speckit.your-extension.command-name arg1 arg2

View File

@@ -1,169 +0,0 @@
name: Preset Submission
description: Submit your preset to the Spec Kit preset catalog
title: "[Preset]: Add "
labels: ["preset-submission", "enhancement", "needs-triage"]
body:
- type: markdown
attributes:
value: |
Thanks for contributing a preset! This template helps you submit your preset to the community catalog.
**Before submitting:**
- Review the [Preset Publishing Guide](https://github.com/github/spec-kit/blob/main/presets/PUBLISHING.md)
- Ensure your preset has a valid `preset.yml` manifest
- Create a GitHub release with a version tag (e.g., v1.0.0)
- Test installation from the release archive: `specify preset add --from <download-url>`
- type: input
id: preset-id
attributes:
label: Preset ID
description: Unique preset identifier (lowercase with hyphens only)
placeholder: "e.g., healthcare-compliance"
validations:
required: true
- type: input
id: preset-name
attributes:
label: Preset Name
description: Human-readable preset name
placeholder: "e.g., Healthcare Compliance"
validations:
required: true
- type: input
id: version
attributes:
label: Version
description: Semantic version number
placeholder: "e.g., 1.0.0"
validations:
required: true
- type: textarea
id: description
attributes:
label: Description
description: Brief description of what your preset does (under 200 characters)
placeholder: Enforces HIPAA-compliant spec workflows with audit templates and compliance checklists
validations:
required: true
- type: input
id: author
attributes:
label: Author
description: Your name or organization
placeholder: "e.g., John Doe or Acme Corp"
validations:
required: true
- type: input
id: repository
attributes:
label: Repository URL
description: GitHub repository URL for your preset
placeholder: "https://github.com/your-org/spec-kit-your-preset"
validations:
required: true
- type: input
id: download-url
attributes:
label: Download URL
description: URL to the GitHub release archive for your preset (e.g., https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip)
placeholder: "https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip"
validations:
required: true
- type: input
id: license
attributes:
label: License
description: Open source license type
placeholder: "e.g., MIT, Apache-2.0"
validations:
required: true
- type: input
id: speckit-version
attributes:
label: Required Spec Kit Version
description: Minimum Spec Kit version required
placeholder: "e.g., >=0.3.0"
validations:
required: true
- type: textarea
id: templates-provided
attributes:
label: Templates Provided
description: List the template overrides your preset provides
placeholder: |
- spec-template.md — adds compliance section
- plan-template.md — includes audit checkpoints
- checklist-template.md — HIPAA compliance checklist
validations:
required: true
- type: textarea
id: commands-provided
attributes:
label: Commands Provided (optional)
description: List any command overrides your preset provides
placeholder: |
- speckit.specify.md — customized for compliance workflows
- type: textarea
id: tags
attributes:
label: Tags
description: 2-5 relevant tags (lowercase, separated by commas)
placeholder: "compliance, healthcare, hipaa, audit"
validations:
required: true
- type: textarea
id: features
attributes:
label: Key Features
description: List the main features and capabilities of your preset
placeholder: |
- HIPAA-compliant spec templates
- Audit trail checklists
- Compliance review workflow
validations:
required: true
- type: checkboxes
id: testing
attributes:
label: Testing Checklist
description: Confirm that your preset has been tested
options:
- label: Preset installs successfully via `specify preset add`
required: true
- label: Template resolution works correctly after installation
required: true
- label: Documentation is complete and accurate
required: true
- label: Tested on at least one real project
required: true
- type: checkboxes
id: requirements
attributes:
label: Submission Requirements
description: Verify your preset meets all requirements
options:
- label: Valid `preset.yml` manifest included
required: true
- label: README.md with description and usage instructions
required: true
- label: LICENSE file included
required: true
- label: GitHub release created with version tag
required: true
- label: Preset ID follows naming conventions (lowercase-with-hyphens)
required: true

View File

@@ -64,5 +64,5 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v5
uses: actions/deploy-pages@v4

View File

@@ -15,7 +15,7 @@ jobs:
uses: actions/checkout@v6
- name: Run markdownlint-cli2
uses: DavidAnson/markdownlint-cli2-action@ce4853d43830c74c1753b39f3cf40f71c2031eb9 # v23
uses: DavidAnson/markdownlint-cli2-action@v19
with:
globs: |
'**/*.md'

View File

@@ -86,10 +86,8 @@ jobs:
if [ -f "CHANGELOG.md" ]; then
DATE=$(date +%Y-%m-%d)
# Get the previous tag by sorting all version tags numerically
# (git describe --tags only finds tags reachable from HEAD,
# which misses tags on unmerged release branches)
PREVIOUS_TAG=$(git tag -l 'v*' --sort=-version:refname | head -n 1)
# Get the previous tag to compare commits
PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
echo "Generating changelog from commits..."
if [[ -n "$PREVIOUS_TAG" ]]; then
@@ -100,16 +98,18 @@ jobs:
COMMITS="- Initial release"
fi
# Create new changelog entry — insert after the marker comment
NEW_ENTRY=$(printf '%s\n' \
"" \
"## [${{ steps.version.outputs.version }}] - $DATE" \
"" \
"### Changed" \
"" \
"$COMMITS")
awk -v entry="$NEW_ENTRY" '/<!-- insert new changelog below this comment -->/ { print; print entry; next } {print}' CHANGELOG.md > CHANGELOG.md.tmp
# Create new changelog entry
{
head -n 8 CHANGELOG.md
echo ""
echo "## [${{ steps.version.outputs.version }}] - $DATE"
echo ""
echo "### Changed"
echo ""
echo "$COMMITS"
echo ""
tail -n +9 CHANGELOG.md
} > CHANGELOG.md.tmp
mv CHANGELOG.md.tmp CHANGELOG.md
echo "✅ Updated CHANGELOG.md with commits since $PREVIOUS_TAG"
@@ -139,22 +139,6 @@ jobs:
git push origin "${{ steps.version.outputs.tag }}"
echo "Branch ${{ env.branch }} and tag ${{ steps.version.outputs.tag }} pushed"
- name: Bump to dev version
id: dev_version
run: |
IFS='.' read -r MAJOR MINOR PATCH <<< "${{ steps.version.outputs.version }}"
NEXT_DEV="$MAJOR.$MINOR.$((PATCH + 1)).dev0"
echo "dev_version=$NEXT_DEV" >> $GITHUB_OUTPUT
sed -i "s/version = \".*\"/version = \"$NEXT_DEV\"/" pyproject.toml
git add pyproject.toml
if git diff --cached --quiet; then
echo "No dev version changes to commit"
else
git commit -m "chore: begin $NEXT_DEV development"
git push origin "${{ env.branch }}"
echo "Bumped to dev version $NEXT_DEV"
fi
- name: Open pull request
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_PAT }}
@@ -162,17 +146,16 @@ jobs:
gh pr create \
--base main \
--head "${{ env.branch }}" \
--title "chore: release ${{ steps.version.outputs.version }}, begin ${{ steps.dev_version.outputs.dev_version }} development" \
--body "Automated release of ${{ steps.version.outputs.version }}.
--title "chore: bump version to ${{ steps.version.outputs.version }}" \
--body "Automated version bump to ${{ steps.version.outputs.version }}.
This PR was created by the Release Trigger workflow. The git tag \`${{ steps.version.outputs.tag }}\` has already been pushed and the release artifacts are being built.
Merging this PR will set \`main\` to \`${{ steps.dev_version.outputs.dev_version }}\` so that development installs are clearly marked as pre-release."
Merge this PR to record the version bump and changelog update on \`main\`."
- name: Summary
run: |
echo "✅ Version bumped to ${{ steps.version.outputs.version }}"
echo "✅ Tag ${{ steps.version.outputs.tag }} created and pushed"
echo "✅ Dev version set to ${{ steps.dev_version.outputs.dev_version }}"
echo "✅ PR opened to merge version bump into main"
echo "🚀 Release workflow is building artifacts from the tag"

View File

@@ -27,63 +27,35 @@ jobs:
- name: Check if release already exists
id: check_release
run: |
VERSION="${{ steps.version.outputs.tag }}"
if gh release view "$VERSION" >/dev/null 2>&1; then
echo "exists=true" >> $GITHUB_OUTPUT
echo "Release $VERSION already exists, skipping..."
else
echo "exists=false" >> $GITHUB_OUTPUT
echo "Release $VERSION does not exist, proceeding..."
fi
chmod +x .github/workflows/scripts/check-release-exists.sh
.github/workflows/scripts/check-release-exists.sh ${{ steps.version.outputs.tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Generate release notes
- name: Create release package variants
if: steps.check_release.outputs.exists == 'false'
run: |
VERSION="${{ steps.version.outputs.tag }}"
VERSION_NO_V=${VERSION#v}
chmod +x .github/workflows/scripts/create-release-packages.sh
.github/workflows/scripts/create-release-packages.sh ${{ steps.version.outputs.tag }}
# Find previous tag
PREVIOUS_TAG=$(git tag -l 'v*' --sort=-version:refname | grep -v "^${VERSION}$" | head -n 1)
- name: Generate release notes
if: steps.check_release.outputs.exists == 'false'
id: release_notes
run: |
chmod +x .github/workflows/scripts/generate-release-notes.sh
# Get the previous tag for changelog generation
PREVIOUS_TAG=$(git describe --tags --abbrev=0 ${{ steps.version.outputs.tag }}^ 2>/dev/null || echo "")
# Default to v0.0.0 if no previous tag is found (e.g., first release)
if [ -z "$PREVIOUS_TAG" ]; then
PREVIOUS_TAG=""
PREVIOUS_TAG="v0.0.0"
fi
# Get commits since previous tag
if [ -z "$PREVIOUS_TAG" ]; then
COMMIT_COUNT=$(git rev-list --count HEAD)
if [ "$COMMIT_COUNT" -gt 20 ]; then
COMMITS=$(git log --oneline --pretty=format:"- %s" --no-merges HEAD~20..HEAD)
else
COMMITS=$(git log --oneline --pretty=format:"- %s" --no-merges)
fi
else
COMMITS=$(git log --oneline --pretty=format:"- %s" --no-merges "$PREVIOUS_TAG"..HEAD)
fi
cat > release_notes.md << NOTES_EOF
## Install
\`\`\`bash
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git@${VERSION}
specify init my-project
\`\`\`
NOTES_EOF
echo "## What's Changed" >> release_notes.md
echo "" >> release_notes.md
echo "$COMMITS" >> release_notes.md
.github/workflows/scripts/generate-release-notes.sh ${{ steps.version.outputs.tag }} "$PREVIOUS_TAG"
- name: Create GitHub Release
if: steps.check_release.outputs.exists == 'false'
run: |
VERSION="${{ steps.version.outputs.tag }}"
VERSION_NO_V=${VERSION#v}
gh release create "$VERSION" \
--title "Spec Kit - $VERSION_NO_V" \
--notes-file release_notes.md
chmod +x .github/workflows/scripts/create-github-release.sh
.github/workflows/scripts/create-github-release.sh ${{ steps.version.outputs.tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -euo pipefail
# check-release-exists.sh
# Check if a GitHub release already exists for the given version
# Usage: check-release-exists.sh <version>
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <version>" >&2
exit 1
fi
VERSION="$1"
if gh release view "$VERSION" >/dev/null 2>&1; then
echo "exists=true" >> $GITHUB_OUTPUT
echo "Release $VERSION already exists, skipping..."
else
echo "exists=false" >> $GITHUB_OUTPUT
echo "Release $VERSION does not exist, proceeding..."
fi

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env bash
set -euo pipefail
# create-github-release.sh
# Create a GitHub release with all template zip files
# Usage: create-github-release.sh <version>
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <version>" >&2
exit 1
fi
VERSION="$1"
# Remove 'v' prefix from version for release title
VERSION_NO_V=${VERSION#v}
gh release create "$VERSION" \
.genreleases/spec-kit-template-copilot-sh-"$VERSION".zip \
.genreleases/spec-kit-template-copilot-ps-"$VERSION".zip \
.genreleases/spec-kit-template-claude-sh-"$VERSION".zip \
.genreleases/spec-kit-template-claude-ps-"$VERSION".zip \
.genreleases/spec-kit-template-gemini-sh-"$VERSION".zip \
.genreleases/spec-kit-template-gemini-ps-"$VERSION".zip \
.genreleases/spec-kit-template-cursor-agent-sh-"$VERSION".zip \
.genreleases/spec-kit-template-cursor-agent-ps-"$VERSION".zip \
.genreleases/spec-kit-template-opencode-sh-"$VERSION".zip \
.genreleases/spec-kit-template-opencode-ps-"$VERSION".zip \
.genreleases/spec-kit-template-qwen-sh-"$VERSION".zip \
.genreleases/spec-kit-template-qwen-ps-"$VERSION".zip \
.genreleases/spec-kit-template-windsurf-sh-"$VERSION".zip \
.genreleases/spec-kit-template-windsurf-ps-"$VERSION".zip \
.genreleases/spec-kit-template-codex-sh-"$VERSION".zip \
.genreleases/spec-kit-template-codex-ps-"$VERSION".zip \
.genreleases/spec-kit-template-kilocode-sh-"$VERSION".zip \
.genreleases/spec-kit-template-kilocode-ps-"$VERSION".zip \
.genreleases/spec-kit-template-auggie-sh-"$VERSION".zip \
.genreleases/spec-kit-template-auggie-ps-"$VERSION".zip \
.genreleases/spec-kit-template-roo-sh-"$VERSION".zip \
.genreleases/spec-kit-template-roo-ps-"$VERSION".zip \
.genreleases/spec-kit-template-codebuddy-sh-"$VERSION".zip \
.genreleases/spec-kit-template-codebuddy-ps-"$VERSION".zip \
.genreleases/spec-kit-template-qodercli-sh-"$VERSION".zip \
.genreleases/spec-kit-template-qodercli-ps-"$VERSION".zip \
.genreleases/spec-kit-template-amp-sh-"$VERSION".zip \
.genreleases/spec-kit-template-amp-ps-"$VERSION".zip \
.genreleases/spec-kit-template-shai-sh-"$VERSION".zip \
.genreleases/spec-kit-template-shai-ps-"$VERSION".zip \
.genreleases/spec-kit-template-tabnine-sh-"$VERSION".zip \
.genreleases/spec-kit-template-tabnine-ps-"$VERSION".zip \
.genreleases/spec-kit-template-kiro-cli-sh-"$VERSION".zip \
.genreleases/spec-kit-template-kiro-cli-ps-"$VERSION".zip \
.genreleases/spec-kit-template-agy-sh-"$VERSION".zip \
.genreleases/spec-kit-template-agy-ps-"$VERSION".zip \
.genreleases/spec-kit-template-bob-sh-"$VERSION".zip \
.genreleases/spec-kit-template-bob-ps-"$VERSION".zip \
.genreleases/spec-kit-template-vibe-sh-"$VERSION".zip \
.genreleases/spec-kit-template-vibe-ps-"$VERSION".zip \
.genreleases/spec-kit-template-kimi-sh-"$VERSION".zip \
.genreleases/spec-kit-template-kimi-ps-"$VERSION".zip \
.genreleases/spec-kit-template-generic-sh-"$VERSION".zip \
.genreleases/spec-kit-template-generic-ps-"$VERSION".zip \
--title "Spec Kit Templates - $VERSION_NO_V" \
--notes-file release_notes.md

View File

@@ -0,0 +1,537 @@
#!/usr/bin/env pwsh
#requires -Version 7.0
<#
.SYNOPSIS
Build Spec Kit template release archives for each supported AI assistant and script type.
.DESCRIPTION
create-release-packages.ps1 (workflow-local)
Build Spec Kit template release archives for each supported AI assistant and script type.
.PARAMETER Version
Version string with leading 'v' (e.g., v0.2.0)
.PARAMETER Agents
Comma or space separated subset of agents to build (default: all)
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, kimi, generic
.PARAMETER Scripts
Comma or space separated subset of script types to build (default: both)
Valid scripts: sh, ps
.EXAMPLE
.\create-release-packages.ps1 -Version v0.2.0
.EXAMPLE
.\create-release-packages.ps1 -Version v0.2.0 -Agents claude,copilot -Scripts sh
.EXAMPLE
.\create-release-packages.ps1 -Version v0.2.0 -Agents claude -Scripts ps
#>
param(
[Parameter(Mandatory=$true, Position=0)]
[string]$Version,
[Parameter(Mandatory=$false)]
[string]$Agents = "",
[Parameter(Mandatory=$false)]
[string]$Scripts = ""
)
$ErrorActionPreference = "Stop"
# Validate version format
if ($Version -notmatch '^v\d+\.\d+\.\d+$') {
Write-Error "Version must look like v0.0.0"
exit 1
}
Write-Host "Building release packages for $Version"
# Create and use .genreleases directory for all build artifacts
$GenReleasesDir = ".genreleases"
if (Test-Path $GenReleasesDir) {
Remove-Item -Path $GenReleasesDir -Recurse -Force -ErrorAction SilentlyContinue
}
New-Item -ItemType Directory -Path $GenReleasesDir -Force | Out-Null
function Rewrite-Paths {
param([string]$Content)
$Content = $Content -replace '(/?)\bmemory/', '.specify/memory/'
$Content = $Content -replace '(/?)\bscripts/', '.specify/scripts/'
$Content = $Content -replace '(/?)\btemplates/', '.specify/templates/'
return $Content
}
function Generate-Commands {
param(
[string]$Agent,
[string]$Extension,
[string]$ArgFormat,
[string]$OutputDir,
[string]$ScriptVariant
)
New-Item -ItemType Directory -Path $OutputDir -Force | Out-Null
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
foreach ($template in $templates) {
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
# Read file content and normalize line endings
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
# Extract description from YAML frontmatter
$description = ""
if ($fileContent -match '(?m)^description:\s*(.+)$') {
$description = $matches[1]
}
# Extract script command from YAML frontmatter
$scriptCommand = ""
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
$scriptCommand = $matches[1]
}
if ([string]::IsNullOrEmpty($scriptCommand)) {
Write-Warning "No script command found for $ScriptVariant in $($template.Name)"
$scriptCommand = "(Missing script command for $ScriptVariant)"
}
# Extract agent_script command from YAML frontmatter if present
$agentScriptCommand = ""
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
$agentScriptCommand = $matches[1].Trim()
}
# Replace {SCRIPT} placeholder with the script command
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
}
# Remove the scripts: and agent_scripts: sections from frontmatter
$lines = $body -split "`n"
$outputLines = @()
$inFrontmatter = $false
$skipScripts = $false
$dashCount = 0
foreach ($line in $lines) {
if ($line -match '^---$') {
$outputLines += $line
$dashCount++
if ($dashCount -eq 1) {
$inFrontmatter = $true
} else {
$inFrontmatter = $false
}
continue
}
if ($inFrontmatter) {
if ($line -match '^(scripts|agent_scripts):$') {
$skipScripts = $true
continue
}
if ($line -match '^[a-zA-Z].*:' -and $skipScripts) {
$skipScripts = $false
}
if ($skipScripts -and $line -match '^\s+') {
continue
}
}
$outputLines += $line
}
$body = $outputLines -join "`n"
# Apply other substitutions
$body = $body -replace '\{ARGS\}', $ArgFormat
$body = $body -replace '__AGENT__', $Agent
$body = Rewrite-Paths -Content $body
# Generate output file based on extension
$outputFile = Join-Path $OutputDir "speckit.$name.$Extension"
switch ($Extension) {
'toml' {
$body = $body -replace '\\', '\\'
$output = "description = `"$description`"`n`nprompt = `"`"`"`n$body`n`"`"`""
Set-Content -Path $outputFile -Value $output -NoNewline
}
'md' {
Set-Content -Path $outputFile -Value $body -NoNewline
}
'agent.md' {
Set-Content -Path $outputFile -Value $body -NoNewline
}
}
}
}
function Generate-CopilotPrompts {
param(
[string]$AgentsDir,
[string]$PromptsDir
)
New-Item -ItemType Directory -Path $PromptsDir -Force | Out-Null
$agentFiles = Get-ChildItem -Path "$AgentsDir/speckit.*.agent.md" -File -ErrorAction SilentlyContinue
foreach ($agentFile in $agentFiles) {
$basename = $agentFile.Name -replace '\.agent\.md$', ''
$promptFile = Join-Path $PromptsDir "$basename.prompt.md"
$content = @"
---
agent: $basename
---
"@
Set-Content -Path $promptFile -Value $content
}
}
# Create Kimi Code skills in .kimi/skills/<name>/SKILL.md format.
# Kimi CLI discovers skills as directories containing a SKILL.md file,
# invoked with /skill:<name> (e.g. /skill:speckit.specify).
function New-KimiSkills {
param(
[string]$SkillsDir,
[string]$ScriptVariant
)
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
foreach ($template in $templates) {
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
$skillName = "speckit.$name"
$skillDir = Join-Path $SkillsDir $skillName
New-Item -ItemType Directory -Force -Path $skillDir | Out-Null
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
# Extract description
$description = "Spec Kit: $name workflow"
if ($fileContent -match '(?m)^description:\s*(.+)$') {
$description = $matches[1]
}
# Extract script command
$scriptCommand = "(Missing script command for $ScriptVariant)"
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
$scriptCommand = $matches[1]
}
# Extract agent_script command from frontmatter if present
$agentScriptCommand = ""
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
$agentScriptCommand = $matches[1].Trim()
}
# Replace {SCRIPT}, strip scripts sections, rewrite paths
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
}
$lines = $body -split "`n"
$outputLines = @()
$inFrontmatter = $false
$skipScripts = $false
$dashCount = 0
foreach ($line in $lines) {
if ($line -match '^---$') {
$outputLines += $line
$dashCount++
$inFrontmatter = ($dashCount -eq 1)
continue
}
if ($inFrontmatter) {
if ($line -match '^(scripts|agent_scripts):$') { $skipScripts = $true; continue }
if ($line -match '^[a-zA-Z].*:' -and $skipScripts) { $skipScripts = $false }
if ($skipScripts -and $line -match '^\s+') { continue }
}
$outputLines += $line
}
$body = $outputLines -join "`n"
$body = $body -replace '\{ARGS\}', '$ARGUMENTS'
$body = $body -replace '__AGENT__', 'kimi'
$body = Rewrite-Paths -Content $body
# Strip existing frontmatter, keep only body
$templateBody = ""
$fmCount = 0
$inBody = $false
foreach ($line in ($body -split "`n")) {
if ($line -match '^---$') {
$fmCount++
if ($fmCount -eq 2) { $inBody = $true }
continue
}
if ($inBody) { $templateBody += "$line`n" }
}
$skillContent = "---`nname: `"$skillName`"`ndescription: `"$description`"`n---`n`n$templateBody"
Set-Content -Path (Join-Path $skillDir "SKILL.md") -Value $skillContent -NoNewline
}
}
function Build-Variant {
param(
[string]$Agent,
[string]$Script
)
$baseDir = Join-Path $GenReleasesDir "sdd-${Agent}-package-${Script}"
Write-Host "Building $Agent ($Script) package..."
New-Item -ItemType Directory -Path $baseDir -Force | Out-Null
# Copy base structure but filter scripts by variant
$specDir = Join-Path $baseDir ".specify"
New-Item -ItemType Directory -Path $specDir -Force | Out-Null
# Copy memory directory
if (Test-Path "memory") {
Copy-Item -Path "memory" -Destination $specDir -Recurse -Force
Write-Host "Copied memory -> .specify"
}
# Only copy the relevant script variant directory
if (Test-Path "scripts") {
$scriptsDestDir = Join-Path $specDir "scripts"
New-Item -ItemType Directory -Path $scriptsDestDir -Force | Out-Null
switch ($Script) {
'sh' {
if (Test-Path "scripts/bash") {
Copy-Item -Path "scripts/bash" -Destination $scriptsDestDir -Recurse -Force
Write-Host "Copied scripts/bash -> .specify/scripts"
}
}
'ps' {
if (Test-Path "scripts/powershell") {
Copy-Item -Path "scripts/powershell" -Destination $scriptsDestDir -Recurse -Force
Write-Host "Copied scripts/powershell -> .specify/scripts"
}
}
}
Get-ChildItem -Path "scripts" -File -ErrorAction SilentlyContinue | ForEach-Object {
Copy-Item -Path $_.FullName -Destination $scriptsDestDir -Force
}
}
# Copy templates (excluding commands directory and vscode-settings.json)
if (Test-Path "templates") {
$templatesDestDir = Join-Path $specDir "templates"
New-Item -ItemType Directory -Path $templatesDestDir -Force | Out-Null
Get-ChildItem -Path "templates" -Recurse -File | Where-Object {
$_.FullName -notmatch 'templates[/\\]commands[/\\]' -and $_.Name -ne 'vscode-settings.json'
} | ForEach-Object {
$relativePath = $_.FullName.Substring((Resolve-Path "templates").Path.Length + 1)
$destFile = Join-Path $templatesDestDir $relativePath
$destFileDir = Split-Path $destFile -Parent
New-Item -ItemType Directory -Path $destFileDir -Force | Out-Null
Copy-Item -Path $_.FullName -Destination $destFile -Force
}
Write-Host "Copied templates -> .specify/templates"
}
# Generate agent-specific command files
switch ($Agent) {
'claude' {
$cmdDir = Join-Path $baseDir ".claude/commands"
Generate-Commands -Agent 'claude' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'gemini' {
$cmdDir = Join-Path $baseDir ".gemini/commands"
Generate-Commands -Agent 'gemini' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
if (Test-Path "agent_templates/gemini/GEMINI.md") {
Copy-Item -Path "agent_templates/gemini/GEMINI.md" -Destination (Join-Path $baseDir "GEMINI.md")
}
}
'copilot' {
$agentsDir = Join-Path $baseDir ".github/agents"
Generate-Commands -Agent 'copilot' -Extension 'agent.md' -ArgFormat '$ARGUMENTS' -OutputDir $agentsDir -ScriptVariant $Script
$promptsDir = Join-Path $baseDir ".github/prompts"
Generate-CopilotPrompts -AgentsDir $agentsDir -PromptsDir $promptsDir
$vscodeDir = Join-Path $baseDir ".vscode"
New-Item -ItemType Directory -Path $vscodeDir -Force | Out-Null
if (Test-Path "templates/vscode-settings.json") {
Copy-Item -Path "templates/vscode-settings.json" -Destination (Join-Path $vscodeDir "settings.json")
}
}
'cursor-agent' {
$cmdDir = Join-Path $baseDir ".cursor/commands"
Generate-Commands -Agent 'cursor-agent' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'qwen' {
$cmdDir = Join-Path $baseDir ".qwen/commands"
Generate-Commands -Agent 'qwen' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
if (Test-Path "agent_templates/qwen/QWEN.md") {
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
}
}
'opencode' {
$cmdDir = Join-Path $baseDir ".opencode/command"
Generate-Commands -Agent 'opencode' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'windsurf' {
$cmdDir = Join-Path $baseDir ".windsurf/workflows"
Generate-Commands -Agent 'windsurf' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'codex' {
$cmdDir = Join-Path $baseDir ".codex/prompts"
Generate-Commands -Agent 'codex' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'kilocode' {
$cmdDir = Join-Path $baseDir ".kilocode/workflows"
Generate-Commands -Agent 'kilocode' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'auggie' {
$cmdDir = Join-Path $baseDir ".augment/commands"
Generate-Commands -Agent 'auggie' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'roo' {
$cmdDir = Join-Path $baseDir ".roo/commands"
Generate-Commands -Agent 'roo' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'codebuddy' {
$cmdDir = Join-Path $baseDir ".codebuddy/commands"
Generate-Commands -Agent 'codebuddy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'amp' {
$cmdDir = Join-Path $baseDir ".agents/commands"
Generate-Commands -Agent 'amp' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'kiro-cli' {
$cmdDir = Join-Path $baseDir ".kiro/prompts"
Generate-Commands -Agent 'kiro-cli' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'bob' {
$cmdDir = Join-Path $baseDir ".bob/commands"
Generate-Commands -Agent 'bob' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'qodercli' {
$cmdDir = Join-Path $baseDir ".qoder/commands"
Generate-Commands -Agent 'qodercli' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'shai' {
$cmdDir = Join-Path $baseDir ".shai/commands"
Generate-Commands -Agent 'shai' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'tabnine' {
$cmdDir = Join-Path $baseDir ".tabnine/agent/commands"
Generate-Commands -Agent 'tabnine' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
$tabnineTemplate = Join-Path 'agent_templates' 'tabnine/TABNINE.md'
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
}
'agy' {
$cmdDir = Join-Path $baseDir ".agent/commands"
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'vibe' {
$cmdDir = Join-Path $baseDir ".vibe/prompts"
Generate-Commands -Agent 'vibe' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'kimi' {
$skillsDir = Join-Path $baseDir ".kimi/skills"
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
New-KimiSkills -SkillsDir $skillsDir -ScriptVariant $Script
}
'generic' {
$cmdDir = Join-Path $baseDir ".speckit/commands"
Generate-Commands -Agent 'generic' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
default {
throw "Unsupported agent '$Agent'."
}
}
# Create zip archive
$zipFile = Join-Path $GenReleasesDir "spec-kit-template-${Agent}-${Script}-${Version}.zip"
Compress-Archive -Path "$baseDir/*" -DestinationPath $zipFile -Force
Write-Host "Created $zipFile"
}
# Define all agents and scripts
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'kimi', 'generic')
$AllScripts = @('sh', 'ps')
function Normalize-List {
param([string]$Input)
if ([string]::IsNullOrEmpty($Input)) {
return @()
}
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
return $items
}
function Validate-Subset {
param(
[string]$Type,
[string[]]$Allowed,
[string[]]$Items
)
$ok = $true
foreach ($item in $Items) {
if ($item -notin $Allowed) {
Write-Error "Unknown $Type '$item' (allowed: $($Allowed -join ', '))"
$ok = $false
}
}
return $ok
}
# Determine agent list
if (-not [string]::IsNullOrEmpty($Agents)) {
$AgentList = Normalize-List -Input $Agents
if (-not (Validate-Subset -Type 'agent' -Allowed $AllAgents -Items $AgentList)) {
exit 1
}
} else {
$AgentList = $AllAgents
}
# Determine script list
if (-not [string]::IsNullOrEmpty($Scripts)) {
$ScriptList = Normalize-List -Input $Scripts
if (-not (Validate-Subset -Type 'script' -Allowed $AllScripts -Items $ScriptList)) {
exit 1
}
} else {
$ScriptList = $AllScripts
}
Write-Host "Agents: $($AgentList -join ', ')"
Write-Host "Scripts: $($ScriptList -join ', ')"
# Build all variants
foreach ($agent in $AgentList) {
foreach ($script in $ScriptList) {
Build-Variant -Agent $agent -Script $script
}
}
Write-Host "`nArchives in ${GenReleasesDir}:"
Get-ChildItem -Path $GenReleasesDir -Filter "spec-kit-template-*-${Version}.zip" | ForEach-Object {
Write-Host " $($_.Name)"
}

View File

@@ -0,0 +1,348 @@
#!/usr/bin/env bash
set -euo pipefail
# create-release-packages.sh (workflow-local)
# Build Spec Kit template release archives for each supported AI assistant and script type.
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
# Version argument should include leading 'v'.
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi generic (default: all)
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
# Examples:
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
# AGENTS="copilot,gemini" $0 v0.2.0
# SCRIPTS=ps $0 v0.2.0
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <version-with-v-prefix>" >&2
exit 1
fi
NEW_VERSION="$1"
if [[ ! $NEW_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Version must look like v0.0.0" >&2
exit 1
fi
echo "Building release packages for $NEW_VERSION"
# Create and use .genreleases directory for all build artifacts
GENRELEASES_DIR=".genreleases"
mkdir -p "$GENRELEASES_DIR"
rm -rf "$GENRELEASES_DIR"/* || true
rewrite_paths() {
sed -E \
-e 's@(/?)memory/@.specify/memory/@g' \
-e 's@(/?)scripts/@.specify/scripts/@g' \
-e 's@(/?)templates/@.specify/templates/@g' \
-e 's@\.specify\.specify/@.specify/@g'
}
generate_commands() {
local agent=$1 ext=$2 arg_format=$3 output_dir=$4 script_variant=$5
mkdir -p "$output_dir"
for template in templates/commands/*.md; do
[[ -f "$template" ]] || continue
local name description script_command agent_script_command body
name=$(basename "$template" .md)
# Normalize line endings
file_content=$(tr -d '\r' < "$template")
# Extract description and script command from YAML frontmatter
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
if [[ -z $script_command ]]; then
echo "Warning: no script command found for $script_variant in $template" >&2
script_command="(Missing script command for $script_variant)"
fi
# Extract agent_script command from YAML frontmatter if present
agent_script_command=$(printf '%s\n' "$file_content" | awk '
/^agent_scripts:$/ { in_agent_scripts=1; next }
in_agent_scripts && /^[[:space:]]*'"$script_variant"':[[:space:]]*/ {
sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, "")
print
exit
}
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
')
# Replace {SCRIPT} placeholder with the script command
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
if [[ -n $agent_script_command ]]; then
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
fi
# Remove the scripts: and agent_scripts: sections from frontmatter while preserving YAML structure
body=$(printf '%s\n' "$body" | awk '
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
in_frontmatter && /^scripts:$/ { skip_scripts=1; next }
in_frontmatter && /^agent_scripts:$/ { skip_scripts=1; next }
in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 }
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
{ print }
')
# Apply other substitutions
body=$(printf '%s\n' "$body" | sed "s/{ARGS}/$arg_format/g" | sed "s/__AGENT__/$agent/g" | rewrite_paths)
case $ext in
toml)
body=$(printf '%s\n' "$body" | sed 's/\\/\\\\/g')
{ echo "description = \"$description\""; echo; echo "prompt = \"\"\""; echo "$body"; echo "\"\"\""; } > "$output_dir/speckit.$name.$ext" ;;
md)
echo "$body" > "$output_dir/speckit.$name.$ext" ;;
agent.md)
echo "$body" > "$output_dir/speckit.$name.$ext" ;;
esac
done
}
generate_copilot_prompts() {
local agents_dir=$1 prompts_dir=$2
mkdir -p "$prompts_dir"
# Generate a .prompt.md file for each .agent.md file
for agent_file in "$agents_dir"/speckit.*.agent.md; do
[[ -f "$agent_file" ]] || continue
local basename=$(basename "$agent_file" .agent.md)
local prompt_file="$prompts_dir/${basename}.prompt.md"
cat > "$prompt_file" <<EOF
---
agent: ${basename}
---
EOF
done
}
# Create Kimi Code skills in .kimi/skills/<name>/SKILL.md format.
# Kimi CLI discovers skills as directories containing a SKILL.md file,
# invoked with /skill:<name> (e.g. /skill:speckit.specify).
create_kimi_skills() {
local skills_dir="$1"
local script_variant="$2"
for template in templates/commands/*.md; do
[[ -f "$template" ]] || continue
local name
name=$(basename "$template" .md)
local skill_name="speckit.${name}"
local skill_dir="${skills_dir}/${skill_name}"
mkdir -p "$skill_dir"
local file_content
file_content=$(tr -d '\r' < "$template")
# Extract description from frontmatter
local description
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
[[ -z "$description" ]] && description="Spec Kit: ${name} workflow"
# Extract script command
local script_command
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
[[ -z "$script_command" ]] && script_command="(Missing script command for $script_variant)"
# Extract agent_script command from frontmatter if present
local agent_script_command
agent_script_command=$(printf '%s\n' "$file_content" | awk '
/^agent_scripts:$/ { in_agent_scripts=1; next }
in_agent_scripts && /^[[:space:]]*'"$script_variant"':[[:space:]]*/ {
sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, "")
print
exit
}
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
')
# Build body: replace placeholders, strip scripts sections, rewrite paths
local body
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
if [[ -n $agent_script_command ]]; then
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
fi
body=$(printf '%s\n' "$body" | awk '
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
in_frontmatter && /^scripts:$/ { skip_scripts=1; next }
in_frontmatter && /^agent_scripts:$/ { skip_scripts=1; next }
in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 }
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
{ print }
')
body=$(printf '%s\n' "$body" | sed 's/{ARGS}/\$ARGUMENTS/g' | sed 's/__AGENT__/kimi/g' | rewrite_paths)
# Strip existing frontmatter and prepend Kimi frontmatter
local template_body
template_body=$(printf '%s\n' "$body" | awk '/^---/{p++; if(p==2){found=1; next}} found')
{
printf -- '---\n'
printf 'name: "%s"\n' "$skill_name"
printf 'description: "%s"\n' "$description"
printf -- '---\n\n'
printf '%s\n' "$template_body"
} > "$skill_dir/SKILL.md"
done
}
build_variant() {
local agent=$1 script=$2
local base_dir="$GENRELEASES_DIR/sdd-${agent}-package-${script}"
echo "Building $agent ($script) package..."
mkdir -p "$base_dir"
# Copy base structure but filter scripts by variant
SPEC_DIR="$base_dir/.specify"
mkdir -p "$SPEC_DIR"
[[ -d memory ]] && { cp -r memory "$SPEC_DIR/"; echo "Copied memory -> .specify"; }
# Only copy the relevant script variant directory
if [[ -d scripts ]]; then
mkdir -p "$SPEC_DIR/scripts"
case $script in
sh)
[[ -d scripts/bash ]] && { cp -r scripts/bash "$SPEC_DIR/scripts/"; echo "Copied scripts/bash -> .specify/scripts"; }
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
;;
ps)
[[ -d scripts/powershell ]] && { cp -r scripts/powershell "$SPEC_DIR/scripts/"; echo "Copied scripts/powershell -> .specify/scripts"; }
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
;;
esac
fi
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -not -name "vscode-settings.json" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; }
case $agent in
claude)
mkdir -p "$base_dir/.claude/commands"
generate_commands claude md "\$ARGUMENTS" "$base_dir/.claude/commands" "$script" ;;
gemini)
mkdir -p "$base_dir/.gemini/commands"
generate_commands gemini toml "{{args}}" "$base_dir/.gemini/commands" "$script"
[[ -f agent_templates/gemini/GEMINI.md ]] && cp agent_templates/gemini/GEMINI.md "$base_dir/GEMINI.md" ;;
copilot)
mkdir -p "$base_dir/.github/agents"
generate_commands copilot agent.md "\$ARGUMENTS" "$base_dir/.github/agents" "$script"
generate_copilot_prompts "$base_dir/.github/agents" "$base_dir/.github/prompts"
mkdir -p "$base_dir/.vscode"
[[ -f templates/vscode-settings.json ]] && cp templates/vscode-settings.json "$base_dir/.vscode/settings.json"
;;
cursor-agent)
mkdir -p "$base_dir/.cursor/commands"
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
qwen)
mkdir -p "$base_dir/.qwen/commands"
generate_commands qwen md "\$ARGUMENTS" "$base_dir/.qwen/commands" "$script"
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
opencode)
mkdir -p "$base_dir/.opencode/command"
generate_commands opencode md "\$ARGUMENTS" "$base_dir/.opencode/command" "$script" ;;
windsurf)
mkdir -p "$base_dir/.windsurf/workflows"
generate_commands windsurf md "\$ARGUMENTS" "$base_dir/.windsurf/workflows" "$script" ;;
codex)
mkdir -p "$base_dir/.codex/prompts"
generate_commands codex md "\$ARGUMENTS" "$base_dir/.codex/prompts" "$script" ;;
kilocode)
mkdir -p "$base_dir/.kilocode/workflows"
generate_commands kilocode md "\$ARGUMENTS" "$base_dir/.kilocode/workflows" "$script" ;;
auggie)
mkdir -p "$base_dir/.augment/commands"
generate_commands auggie md "\$ARGUMENTS" "$base_dir/.augment/commands" "$script" ;;
roo)
mkdir -p "$base_dir/.roo/commands"
generate_commands roo md "\$ARGUMENTS" "$base_dir/.roo/commands" "$script" ;;
codebuddy)
mkdir -p "$base_dir/.codebuddy/commands"
generate_commands codebuddy md "\$ARGUMENTS" "$base_dir/.codebuddy/commands" "$script" ;;
qodercli)
mkdir -p "$base_dir/.qoder/commands"
generate_commands qodercli md "\$ARGUMENTS" "$base_dir/.qoder/commands" "$script" ;;
amp)
mkdir -p "$base_dir/.agents/commands"
generate_commands amp md "\$ARGUMENTS" "$base_dir/.agents/commands" "$script" ;;
shai)
mkdir -p "$base_dir/.shai/commands"
generate_commands shai md "\$ARGUMENTS" "$base_dir/.shai/commands" "$script" ;;
tabnine)
mkdir -p "$base_dir/.tabnine/agent/commands"
generate_commands tabnine toml "{{args}}" "$base_dir/.tabnine/agent/commands" "$script"
[[ -f agent_templates/tabnine/TABNINE.md ]] && cp agent_templates/tabnine/TABNINE.md "$base_dir/TABNINE.md" ;;
kiro-cli)
mkdir -p "$base_dir/.kiro/prompts"
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
agy)
mkdir -p "$base_dir/.agent/commands"
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/commands" "$script" ;;
bob)
mkdir -p "$base_dir/.bob/commands"
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
vibe)
mkdir -p "$base_dir/.vibe/prompts"
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
kimi)
mkdir -p "$base_dir/.kimi/skills"
create_kimi_skills "$base_dir/.kimi/skills" "$script" ;;
generic)
mkdir -p "$base_dir/.speckit/commands"
generate_commands generic md "\$ARGUMENTS" "$base_dir/.speckit/commands" "$script" ;;
esac
( cd "$base_dir" && zip -r "../spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip" . )
echo "Created $GENRELEASES_DIR/spec-kit-template-${agent}-${script}-${NEW_VERSION}.zip"
}
# Determine agent list
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi generic)
ALL_SCRIPTS=(sh ps)
norm_list() {
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?"\n":"") $i);out=1}}}END{printf("\n")}'
}
validate_subset() {
local type=$1; shift; local -n allowed=$1; shift; local items=("$@")
local invalid=0
for it in "${items[@]}"; do
local found=0
for a in "${allowed[@]}"; do [[ $it == "$a" ]] && { found=1; break; }; done
if [[ $found -eq 0 ]]; then
echo "Error: unknown $type '$it' (allowed: ${allowed[*]})" >&2
invalid=1
fi
done
return $invalid
}
if [[ -n ${AGENTS:-} ]]; then
mapfile -t AGENT_LIST < <(printf '%s' "$AGENTS" | norm_list)
validate_subset agent ALL_AGENTS "${AGENT_LIST[@]}" || exit 1
else
AGENT_LIST=("${ALL_AGENTS[@]}")
fi
if [[ -n ${SCRIPTS:-} ]]; then
mapfile -t SCRIPT_LIST < <(printf '%s' "$SCRIPTS" | norm_list)
validate_subset script ALL_SCRIPTS "${SCRIPT_LIST[@]}" || exit 1
else
SCRIPT_LIST=("${ALL_SCRIPTS[@]}")
fi
echo "Agents: ${AGENT_LIST[*]}"
echo "Scripts: ${SCRIPT_LIST[*]}"
for agent in "${AGENT_LIST[@]}"; do
for script in "${SCRIPT_LIST[@]}"; do
build_variant "$agent" "$script"
done
done
echo "Archives in $GENRELEASES_DIR:"
ls -1 "$GENRELEASES_DIR"/spec-kit-template-*-"${NEW_VERSION}".zip

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env bash
set -euo pipefail
# generate-release-notes.sh
# Generate release notes from git history
# Usage: generate-release-notes.sh <new_version> <last_tag>
if [[ $# -ne 2 ]]; then
echo "Usage: $0 <new_version> <last_tag>" >&2
exit 1
fi
NEW_VERSION="$1"
LAST_TAG="$2"
# Get commits since last tag
if [ "$LAST_TAG" = "v0.0.0" ]; then
# Check how many commits we have and use that as the limit
COMMIT_COUNT=$(git rev-list --count HEAD)
if [ "$COMMIT_COUNT" -gt 10 ]; then
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD)
else
COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s")
fi
else
COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD)
fi
# Create release notes
cat > release_notes.md << EOF
This is the latest set of releases that you can use with your agent of choice. We recommend using the Specify CLI to scaffold your projects, however you can download these independently and manage them yourself.
## Changelog
$COMMITS
EOF
echo "Generated release notes:"
cat release_notes.md

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -euo pipefail
# get-next-version.sh
# Calculate the next version based on the latest git tag and output GitHub Actions variables
# Usage: get-next-version.sh
# Get the latest tag, or use v0.0.0 if no tags exist
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
# Extract version number and increment
VERSION=$(echo $LATEST_TAG | sed 's/v//')
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
MAJOR=${VERSION_PARTS[0]:-0}
MINOR=${VERSION_PARTS[1]:-0}
PATCH=${VERSION_PARTS[2]:-0}
# Increment patch version
PATCH=$((PATCH + 1))
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "New version will be: $NEW_VERSION"

161
.github/workflows/scripts/simulate-release.sh vendored Executable file
View File

@@ -0,0 +1,161 @@
#!/usr/bin/env bash
set -euo pipefail
# simulate-release.sh
# Simulate the release process locally without pushing to GitHub
# Usage: simulate-release.sh [version]
# If version is omitted, auto-increments patch version
# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${BLUE}🧪 Simulating Release Process Locally${NC}"
echo "======================================"
echo ""
# Step 1: Determine version
if [[ -n "${1:-}" ]]; then
VERSION="${1#v}"
TAG="v$VERSION"
echo -e "${GREEN}📝 Using manual version: $VERSION${NC}"
else
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
echo -e "${BLUE}Latest tag: $LATEST_TAG${NC}"
VERSION=$(echo $LATEST_TAG | sed 's/v//')
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
MAJOR=${VERSION_PARTS[0]:-0}
MINOR=${VERSION_PARTS[1]:-0}
PATCH=${VERSION_PARTS[2]:-0}
PATCH=$((PATCH + 1))
VERSION="$MAJOR.$MINOR.$PATCH"
TAG="v$VERSION"
echo -e "${GREEN}📝 Auto-incremented to: $VERSION${NC}"
fi
echo ""
# Step 2: Check if tag exists
if git rev-parse "$TAG" >/dev/null 2>&1; then
echo -e "${RED}❌ Error: Tag $TAG already exists!${NC}"
echo " Please use a different version or delete the tag first."
exit 1
fi
echo -e "${GREEN}✓ Tag $TAG is available${NC}"
# Step 3: Backup current state
echo ""
echo -e "${YELLOW}💾 Creating backup of current state...${NC}"
BACKUP_DIR=$(mktemp -d)
cp pyproject.toml "$BACKUP_DIR/pyproject.toml.bak"
cp CHANGELOG.md "$BACKUP_DIR/CHANGELOG.md.bak"
echo -e "${GREEN}✓ Backup created at: $BACKUP_DIR${NC}"
# Step 4: Update pyproject.toml
echo ""
echo -e "${YELLOW}📝 Updating pyproject.toml...${NC}"
sed -i.tmp "s/version = \".*\"/version = \"$VERSION\"/" pyproject.toml
rm -f pyproject.toml.tmp
echo -e "${GREEN}✓ Updated pyproject.toml to version $VERSION${NC}"
# Step 5: Update CHANGELOG.md
echo ""
echo -e "${YELLOW}📝 Updating CHANGELOG.md...${NC}"
DATE=$(date +%Y-%m-%d)
# Get the previous tag to compare commits
PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
if [[ -n "$PREVIOUS_TAG" ]]; then
echo " Generating changelog from commits since $PREVIOUS_TAG"
# Get commits since last tag, format as bullet points
COMMITS=$(git log --oneline "$PREVIOUS_TAG"..HEAD --no-merges --pretty=format:"- %s" 2>/dev/null || echo "- Initial release")
else
echo " No previous tag found - this is the first release"
COMMITS="- Initial release"
fi
# Create temp file with new entry
{
head -n 8 CHANGELOG.md
echo ""
echo "## [$VERSION] - $DATE"
echo ""
echo "### Changed"
echo ""
echo "$COMMITS"
echo ""
tail -n +9 CHANGELOG.md
} > CHANGELOG.md.tmp
mv CHANGELOG.md.tmp CHANGELOG.md
echo -e "${GREEN}✓ Updated CHANGELOG.md with commits since $PREVIOUS_TAG${NC}"
# Step 6: Show what would be committed
echo ""
echo -e "${YELLOW}📋 Changes that would be committed:${NC}"
git diff pyproject.toml CHANGELOG.md
# Step 7: Create temporary tag (no push)
echo ""
echo -e "${YELLOW}🏷️ Creating temporary local tag...${NC}"
git tag -a "$TAG" -m "Simulated release $TAG" 2>/dev/null || true
echo -e "${GREEN}✓ Tag $TAG created locally${NC}"
# Step 8: Simulate release artifact creation
echo ""
echo -e "${YELLOW}📦 Simulating release package creation...${NC}"
echo " (High-level simulation only; packaging script is not executed)"
echo ""
# Check if script exists and is executable
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ -x "$SCRIPT_DIR/create-release-packages.sh" ]]; then
echo -e "${BLUE}In a real release, the following command would be run to create packages:${NC}"
echo " $SCRIPT_DIR/create-release-packages.sh \"$TAG\""
echo ""
echo "This simulation does not enumerate individual package files to avoid"
echo "drifting from the actual behavior of create-release-packages.sh."
else
echo -e "${RED}⚠️ create-release-packages.sh not found or not executable${NC}"
fi
# Step 9: Simulate release notes generation
echo ""
echo -e "${YELLOW}📄 Simulating release notes generation...${NC}"
echo ""
PREVIOUS_TAG=$(git describe --tags --abbrev=0 $TAG^ 2>/dev/null || echo "")
if [[ -n "$PREVIOUS_TAG" ]]; then
echo -e "${BLUE}Changes since $PREVIOUS_TAG:${NC}"
git log --oneline "$PREVIOUS_TAG".."$TAG" | head -n 10
echo ""
else
echo -e "${BLUE}No previous tag found - this would be the first release${NC}"
fi
# Step 10: Summary
echo ""
echo -e "${GREEN}🎉 Simulation Complete!${NC}"
echo "======================================"
echo ""
echo -e "${BLUE}Summary:${NC}"
echo " Version: $VERSION"
echo " Tag: $TAG"
echo " Backup: $BACKUP_DIR"
echo ""
echo -e "${YELLOW}⚠️ SIMULATION ONLY - NO CHANGES PUSHED${NC}"
echo ""
echo -e "${BLUE}Next steps:${NC}"
echo " 1. Review the changes above"
echo " 2. To keep changes: git add pyproject.toml CHANGELOG.md && git commit"
echo " 3. To discard changes: git checkout pyproject.toml CHANGELOG.md && git tag -d $TAG"
echo " 4. To restore from backup: cp $BACKUP_DIR/* ."
echo ""
echo -e "${BLUE}To run the actual release:${NC}"
echo " Go to: https://github.com/github/spec-kit/actions/workflows/release-trigger.yml"
echo " Click 'Run workflow' and enter version: $VERSION"
echo ""

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -euo pipefail
# update-version.sh
# Update version in pyproject.toml (for release artifacts only)
# Usage: update-version.sh <version>
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <version>" >&2
exit 1
fi
VERSION="$1"
# Remove 'v' prefix for Python versioning
PYTHON_VERSION=${VERSION#v}
if [ -f "pyproject.toml" ]; then
sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
else
echo "Warning: pyproject.toml not found, skipping version update"
fi

View File

@@ -39,4 +39,4 @@ jobs:
any-of-labels: ''
# Operations per run (helps avoid rate limits)
operations-per-run: 250
operations-per-run: 100

View File

@@ -16,7 +16,7 @@ jobs:
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
uses: astral-sh/setup-uv@v7
- name: Set up Python
uses: actions/setup-python@v6
@@ -36,7 +36,7 @@ jobs:
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
uses: astral-sh/setup-uv@v7
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6

View File

@@ -30,15 +30,14 @@ Specify supports multiple AI agents by generating agent-specific command files a
| **Claude Code** | `.claude/commands/` | Markdown | `claude` | Anthropic's Claude Code CLI |
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
| **Cursor** | `.cursor/commands/` | Markdown | N/A (IDE-based) | Cursor IDE (`--ai cursor-agent`) |
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (`--ai codex --ai-skills`) |
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
| **Junie** | `.junie/commands/` | Markdown | `junie` | Junie by JetBrains |
| **Kilo Code** | `.kilocode/workflows/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
| **Auggie CLI** | `.augment/commands/` | Markdown | `auggie` | Auggie CLI |
| **Roo Code** | `.roo/commands/` | Markdown | N/A (IDE-based) | Roo Code IDE |
| **Kilo Code** | `.kilocode/rules/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
| **Auggie CLI** | `.augment/rules/` | Markdown | `auggie` | Auggie CLI |
| **Roo Code** | `.roo/rules/` | Markdown | N/A (IDE-based) | Roo Code IDE |
| **CodeBuddy CLI** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy CLI |
| **Qoder CLI** | `.qoder/commands/` | Markdown | `qodercli` | Qoder CLI |
| **Kiro CLI** | `.kiro/prompts/` | Markdown | `kiro-cli` | Kiro CLI |
@@ -46,12 +45,7 @@ Specify supports multiple AI agents by generating agent-specific command files a
| **SHAI** | `.shai/commands/` | Markdown | `shai` | SHAI CLI |
| **Tabnine CLI** | `.tabnine/agent/commands/` | TOML | `tabnine` | Tabnine CLI |
| **Kimi Code** | `.kimi/skills/` | Markdown | `kimi` | Kimi Code CLI (Moonshot AI) |
| **Pi Coding Agent** | `.pi/prompts/` | Markdown | `pi` | Pi terminal coding agent |
| **iFlow CLI** | `.iflow/commands/` | Markdown | `iflow` | iFlow CLI (iflow-ai) |
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
| **Trae** | `.trae/rules/` | Markdown | N/A (IDE-based) | Trae IDE |
| **Antigravity** | `.agent/commands/` | Markdown | N/A (IDE-based) | Antigravity IDE (`--ai agy --ai-skills`) |
| **Mistral Vibe** | `.vibe/prompts/` | Markdown | `vibe` | Mistral Vibe CLI |
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
### Step-by-Step Integration Guide
@@ -90,7 +84,7 @@ This eliminates the need for special-case mappings throughout the codebase.
- `folder`: Directory where agent-specific files are stored (relative to project root)
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
- Most agents use `"commands"` (e.g., `.claude/commands/`)
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli, pi), `"command"` (opencode - singular)
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
- This field enables `--ai-skills` to locate command templates correctly for skill generation
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
@@ -318,40 +312,30 @@ Require a command-line tool to be installed:
- **Claude Code**: `claude` CLI
- **Gemini CLI**: `gemini` CLI
- **Cursor**: `cursor-agent` CLI
- **Qwen Code**: `qwen` CLI
- **opencode**: `opencode` CLI
- **Codex CLI**: `codex` CLI (requires `--ai-skills`)
- **Junie**: `junie` CLI
- **Auggie CLI**: `auggie` CLI
- **Kiro CLI**: `kiro-cli` CLI
- **CodeBuddy CLI**: `codebuddy` CLI
- **Qoder CLI**: `qodercli` CLI
- **Kiro CLI**: `kiro-cli` CLI
- **Amp**: `amp` CLI
- **SHAI**: `shai` CLI
- **Tabnine CLI**: `tabnine` CLI
- **Kimi Code**: `kimi` CLI
- **Mistral Vibe**: `vibe` CLI
- **Pi Coding Agent**: `pi` CLI
- **iFlow CLI**: `iflow` CLI
### IDE-Based Agents
Work within integrated development environments:
- **GitHub Copilot**: Built into VS Code/compatible editors
- **Cursor**: Built into Cursor IDE (`--ai cursor-agent`)
- **Windsurf**: Built into Windsurf IDE
- **Kilo Code**: Built into Kilo Code IDE
- **Roo Code**: Built into Roo Code IDE
- **IBM Bob**: Built into IBM Bob IDE
- **Trae**: Built into Trae IDE
- **Antigravity**: Built into Antigravity IDE (`--ai agy --ai-skills`)
## Command File Formats
### Markdown Format
Used by: Claude, Cursor, GitHub Copilot, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi, Codex, Auggie, CodeBuddy, Qoder, Roo Code, Kilo Code, Trae, Antigravity, Mistral Vibe, iFlow
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen
**Standard format:**
@@ -389,29 +373,10 @@ Command content with {SCRIPT} and {{args}} placeholders.
## Directory Conventions
- **CLI agents**: Usually `.<agent-name>/commands/`
- **Singular command exception**:
- opencode: `.opencode/command/` (singular `command`, not `commands`)
- **Nested path exception**:
- Tabnine: `.tabnine/agent/commands/` (extra `agent/` segment)
- **Shared `.agents/` folder**:
- Amp: `.agents/commands/` (shared folder, not `.amp/`)
- Codex: `.agents/skills/` (shared folder; requires `--ai-skills`; invoked as `$speckit-<command>`)
- **Skills-based exceptions**:
- Kimi Code: `.kimi/skills/` (skills, invoked as `/skill:speckit-<command>`)
- **Prompt-based exceptions**:
- Kiro CLI: `.kiro/prompts/`
- Pi: `.pi/prompts/`
- Mistral Vibe: `.vibe/prompts/`
- **Rules-based exceptions**:
- Trae: `.trae/rules/`
- **IDE agents**: Follow IDE-specific patterns:
- Copilot: `.github/agents/`
- Cursor: `.cursor/commands/`
- Windsurf: `.windsurf/workflows/`
- Kilo Code: `.kilocode/workflows/`
- Roo Code: `.roo/commands/`
- IBM Bob: `.bob/commands/`
- Antigravity: `.agent/skills/` (`--ai-skills` required; `.agent/commands/` is deprecated)
## Argument Patterns

File diff suppressed because it is too large Load Diff

View File

@@ -36,7 +36,7 @@ On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler
> If your pull request introduces a large change that materially impacts the work of the CLI or the rest of the repository (e.g., you're introducing new templates, arguments, or otherwise major changes), make sure that it was **discussed and agreed upon** by the project maintainers. Pull requests with large changes that did not have a prior conversation and agreement will be closed.
1. Fork and clone the repository
1. Configure and install the dependencies: `uv sync --extra test`
1. Configure and install the dependencies: `uv sync`
1. Make sure the CLI works on your machine: `uv run specify --help`
1. Create a new branch: `git checkout -b my-branch-name`
1. Make your change, add tests, and make sure everything still works
@@ -44,9 +44,6 @@ On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler
1. Push to your fork and submit a pull request
1. Wait for your pull request to be reviewed and merged.
For the detailed test workflow, command-selection prompt, and PR reporting template, see [`TESTING.md`](./TESTING.md).
Activate the project virtual environment (see the Setup block in [`TESTING.md`](./TESTING.md)), then install the CLI from your working tree (`uv pip install -e .` after `uv sync --extra test`) or otherwise ensure the shell uses the local `specify` binary before running the manual slash-command tests described below.
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
- Follow the project's coding conventions.
@@ -65,14 +62,6 @@ When working on spec-kit:
3. Test script functionality in the `scripts/` directory
4. Ensure memory files (`memory/constitution.md`) are updated if major process changes are made
### Recommended validation flow
For the smoothest review experience, validate changes in this order:
1. **Run focused automated checks first** — use the quick verification commands in [`TESTING.md`](./TESTING.md) to catch packaging, scaffolding, and configuration regressions early.
2. **Run manual workflow tests second** — if your change affects slash commands or the developer workflow, follow [`TESTING.md`](./TESTING.md) to choose the right commands, run them in an agent, and capture results for your PR.
3. **Use local release packages when debugging packaged output** — if you need to inspect the exact files CI-style packaging produces, generate local release packages as described below.
### Testing template and command changes locally
Running `uv run specify init` pulls released packages, which wont include your local changes.
@@ -96,8 +85,6 @@ To test your templates, commands, and other changes locally, follow these steps:
Navigate to your test project folder and open the agent to verify your implementation.
If you only need to validate generated file structure and content before doing manual agent testing, start with the focused automated checks in [`TESTING.md`](./TESTING.md). Keep this section for the cases where you need to inspect the exact packaged output locally.
## AI contributions in Spec Kit
> [!IMPORTANT]

265
README.md
View File

@@ -9,7 +9,7 @@
</p>
<p align="center">
<a href="https://github.com/github/spec-kit/releases/latest"><img src="https://img.shields.io/github/v/release/github/spec-kit" alt="Latest Release"/></a>
<a href="https://github.com/github/spec-kit/actions/workflows/release.yml"><img src="https://github.com/github/spec-kit/actions/workflows/release.yml/badge.svg" alt="Release"/></a>
<a href="https://github.com/github/spec-kit/stargazers"><img src="https://img.shields.io/github/stars/github/spec-kit?style=social" alt="GitHub stars"/></a>
<a href="https://github.com/github/spec-kit/blob/main/LICENSE"><img src="https://img.shields.io/github/license/github/spec-kit" alt="License"/></a>
<a href="https://github.github.io/spec-kit/"><img src="https://img.shields.io/badge/docs-GitHub_Pages-blue" alt="Documentation"/></a>
@@ -22,13 +22,9 @@
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
- [⚡ Get Started](#-get-started)
- [📽️ Video Overview](#-video-overview)
- [🧩 Community Extensions](#-community-extensions)
- [🎨 Community Presets](#-community-presets)
- [🚶 Community Walkthroughs](#-community-walkthroughs)
- [🛠️ Community Friends](#-community-friends)
- [🤖 Supported AI Agents](#-supported-ai-agents)
- [🔧 Specify CLI Reference](#-specify-cli-reference)
- [🧩 Making Spec Kit Your Own: Extensions & Presets](#-making-spec-kit-your-own-extensions--presets)
- [📚 Core Philosophy](#-core-philosophy)
- [🌟 Development Phases](#-development-phases)
- [🎯 Experimental Goals](#-experimental-goals)
@@ -52,13 +48,9 @@ Choose your preferred installation method:
#### Option 1: Persistent Installation (Recommended)
Install once and use everywhere. Pin a specific release tag for stability (check [Releases](https://github.com/github/spec-kit/releases) for the latest):
Install once and use everywhere:
```bash
# Install a specific stable release (recommended — replace vX.Y.Z with the latest tag)
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git@vX.Y.Z
# Or install latest from main (may include unreleased changes)
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git
```
@@ -80,7 +72,7 @@ specify check
To upgrade Specify, see the [Upgrade Guide](./docs/upgrade.md) for detailed instructions. Quick upgrade:
```bash
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git
```
#### Option 2: One-time Usage
@@ -88,13 +80,13 @@ uv tool install specify-cli --force --from git+https://github.com/github/spec-ki
Run directly without installing:
```bash
# Create new project (pinned to a stable release — replace vX.Y.Z with the latest tag)
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <PROJECT_NAME>
# Create new project
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
# Or initialize in existing project
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init . --ai claude
uvx --from git+https://github.com/github/spec-kit.git specify init . --ai claude
# or
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here --ai claude
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai claude
```
**Benefits of persistent installation:**
@@ -104,13 +96,9 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
- Better tool management with `uv tool list`, `uv tool upgrade`, `uv tool uninstall`
- Cleaner shell configuration
#### Option 3: Enterprise / Air-Gapped Installation
If your environment blocks access to PyPI or GitHub, see the [Enterprise / Air-Gapped Installation](./docs/installation.md#enterprise--air-gapped-installation) guide for step-by-step instructions on using `pip download` to create portable, OS-specific wheel bundles on a connected machine.
### 2. Establish project principles
Launch your AI assistant in the project directory. Most agents expose spec-kit as `/speckit.*` slash commands; Codex CLI in skills mode uses `$speckit-*` instead.
Launch your AI assistant in the project directory. The `/speckit.*` commands are available in the assistant.
Use the **`/speckit.constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
@@ -158,94 +146,8 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
[![Spec Kit video header](/media/spec-kit-video-header.jpg)](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
## 🧩 Community Extensions
> [!NOTE]
> Community extensions are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the extension code itself**. The Community Extensions website is also a third-party resource. Review extension source code before installation and use at your own discretion.
🔍 **Browse and search community extensions on the [Community Extensions website](https://speckit-community.github.io/extensions/).**
The following community-contributed extensions are available in [`catalog.community.json`](extensions/catalog.community.json):
**Categories:**
- `docs` — reads, validates, or generates spec artifacts
- `code` — reviews, validates, or modifies source code
- `process` — orchestrates workflow across phases
- `integration` — syncs with external platforms
- `visibility` — reports on project health or progress
**Effect:**
- `Read-only` — produces reports without modifying files
- `Read+Write` — modifies files, creates artifacts, or updates specs
| Extension | Purpose | Category | Effect | URL |
|-----------|---------|----------|--------|-----|
| AI-Driven Engineering (AIDE) | A structured 7-step workflow for building new projects from scratch with AI assistants — from vision through implementation | `process` | Read+Write | [aide](https://github.com/mnriem/spec-kit-extensions/tree/main/aide) |
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
| Checkpoint Extension | Commit the changes made during the middle of the implementation, so you don't end up with just one very large commit at the end | `code` | Read+Write | [spec-kit-checkpoint](https://github.com/aaronrsun/spec-kit-checkpoint) |
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
| Extensify | Create and validate extensions and extension catalogs | `process` | Read+Write | [extensify](https://github.com/mnriem/spec-kit-extensions/tree/main/extensify) |
| Fix Findings | Automated analyze-fix-reanalyze loop that resolves spec findings until clean | `code` | Read+Write | [spec-kit-fix-findings](https://github.com/Quratulain-bilal/spec-kit-fix-findings) |
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
| MAQA — Multi-Agent & Quality Assurance | Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins. Optional CI gate. | `process` | Read+Write | [spec-kit-maqa-ext](https://github.com/GenieRobot/spec-kit-maqa-ext) |
| MAQA Azure DevOps Integration | Azure DevOps Boards integration for MAQA — syncs User Stories and Task children as features progress | `integration` | Read+Write | [spec-kit-maqa-azure-devops](https://github.com/GenieRobot/spec-kit-maqa-azure-devops) |
| MAQA CI/CD Gate | Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green. | `process` | Read+Write | [spec-kit-maqa-ci](https://github.com/GenieRobot/spec-kit-maqa-ci) |
| MAQA GitHub Projects Integration | GitHub Projects v2 integration for MAQA — syncs draft issues and Status columns as features progress | `integration` | Read+Write | [spec-kit-maqa-github-projects](https://github.com/GenieRobot/spec-kit-maqa-github-projects) |
| MAQA Jira Integration | Jira integration for MAQA — syncs Stories and Subtasks as features progress through the board | `integration` | Read+Write | [spec-kit-maqa-jira](https://github.com/GenieRobot/spec-kit-maqa-jira) |
| MAQA Linear Integration | Linear integration for MAQA — syncs issues and sub-issues across workflow states as features progress | `integration` | Read+Write | [spec-kit-maqa-linear](https://github.com/GenieRobot/spec-kit-maqa-linear) |
| MAQA Trello Integration | Trello board integration for MAQA — populates board from specs, moves cards, real-time checklist ticking | `integration` | Read+Write | [spec-kit-maqa-trello](https://github.com/GenieRobot/spec-kit-maqa-trello) |
| Onboard | Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step | `process` | Read+Write | [spec-kit-onboard](https://github.com/dmux/spec-kit-onboard) |
| Plan Review Gate | Require spec.md and plan.md to be merged via MR/PR before allowing task generation | `process` | Read-only | [spec-kit-plan-review-gate](https://github.com/luno/spec-kit-plan-review-gate) |
| Presetify | Create and validate presets and preset catalogs | `process` | Read+Write | [presetify](https://github.com/mnriem/spec-kit-extensions/tree/main/presetify) |
| Product Forge | Full product lifecycle: research → product spec → SpecKit → implement → verify → test | `process` | Read+Write | [speckit-product-forge](https://github.com/VaiYav/speckit-product-forge) |
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
| QA Testing Extension | Systematic QA testing with browser-driven or CLI-based validation of acceptance criteria from spec | `code` | Read-only | [spec-kit-qa](https://github.com/arunt14/spec-kit-qa) |
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
| Repository Index | Generate index for existing repo for overview, architecture and module level. | `docs` | Read-only | [spec-kit-repoindex](https://github.com/liuyiyu/spec-kit-repoindex) |
| Retro Extension | Sprint retrospective analysis with metrics, spec accuracy assessment, and improvement suggestions | `process` | Read+Write | [spec-kit-retro](https://github.com/arunt14/spec-kit-retro) |
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
| Staff Review Extension | Staff-engineer-level code review that validates implementation against spec, checks security, performance, and test coverage | `code` | Read-only | [spec-kit-staff-review](https://github.com/arunt14/spec-kit-staff-review) |
| Superpowers Bridge | Orchestrates obra/superpowers skills within the spec-kit SDD workflow across the full lifecycle (clarification, TDD, review, verification, critique, debugging, branch completion) | `process` | Read+Write | [superpowers-bridge](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/superpowers-bridge) |
| Ship Release Extension | Automates release pipeline: pre-flight checks, branch sync, changelog generation, CI verification, and PR creation | `process` | Read+Write | [spec-kit-ship](https://github.com/arunt14/spec-kit-ship) |
| Spec Critique Extension | Dual-lens critical review of spec and plan from product strategy and engineering risk perspectives | `docs` | Read-only | [spec-kit-critique](https://github.com/arunt14/spec-kit-critique) |
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
To submit your own extension, see the [Extension Publishing Guide](extensions/EXTENSION-PUBLISHING-GUIDE.md).
## 🎨 Community Presets
> [!NOTE]
> Community presets are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the preset code itself**. Review preset source code before installation and use at your own discretion.
The following community-contributed presets customize how Spec Kit behaves — overriding templates, commands, and terminology without changing any tooling. Presets are available in [`catalog.community.json`](presets/catalog.community.json):
| Preset | Purpose | Provides | Requires | URL |
|--------|---------|----------|----------|-----|
| AIDE In-Place Migration | Adapts the AIDE extension workflow for in-place technology migrations (X → Y pattern) — adds migration objectives, verification gates, knowledge documents, and behavioral equivalence criteria | 2 templates, 8 commands | AIDE extension | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) |
| Pirate Speak (Full) | Transforms all Spec Kit output into pirate speak — specs become "Voyage Manifests", plans become "Battle Plans", tasks become "Crew Assignments" | 6 templates, 9 commands | — | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) |
To build and publish your own preset, see the [Presets Publishing Guide](presets/PUBLISHING.md).
## 🚶 Community Walkthroughs
> [!NOTE]
> Community walkthroughs are independently created and maintained by their respective authors. They are **not reviewed, nor endorsed, nor supported by GitHub**. Review their content before following along and use at your own discretion.
See Spec-Driven Development in action across different scenarios with these community-contributed walkthroughs:
- **[Greenfield .NET CLI tool](https://github.com/mnriem/spec-kit-dotnet-cli-demo)** — Builds a Timezone Utility as a .NET single-binary CLI tool from a blank directory, covering the full spec-kit workflow: constitution, specify, plan, tasks, and multi-pass implement using GitHub Copilot agents.
@@ -256,23 +158,6 @@ See Spec-Driven Development in action across different scenarios with these comm
- **[Brownfield Java runtime extension](https://github.com/mnriem/spec-kit-java-brownfield-demo)** — Extends an existing open-source Jakarta EE runtime (Piranha, ~420,000 lines of Java, XML, JSP, HTML, and config files across 180 Maven modules) with a password-protected Server Admin Console, demonstrating spec-kit on a large multi-module Java project with no prior specs or constitution.
- **[Brownfield Go / React dashboard demo](https://github.com/mnriem/spec-kit-go-brownfield-demo)** — Demonstrates spec-kit driven entirely from the **terminal using GitHub Copilot CLI**. Extends NASA's open-source Hermes ground support system (Go) with a lightweight React-based web telemetry dashboard, showing that the full constitution → specify → plan → tasks → implement workflow works from the terminal.
- **[Greenfield Spring Boot MVC with a custom preset](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo)** — Builds a Spring Boot MVC application from scratch using a custom pirate-speak preset, demonstrating how presets can reshape the entire spec-kit experience: specifications become "Voyage Manifests," plans become "Battle Plans," and tasks become "Crew Assignments" — all generated in full pirate vernacular without changing any tooling.
- **[Greenfield Spring Boot + React with a custom extension](https://github.com/mnriem/spec-kit-aide-extension-demo)** — Walks through the **AIDE extension**, a community extension that adds an alternative spec-driven workflow to spec-kit with high-level specs (vision) and low-level specs (work items) organized in a 7-step iterative lifecycle: vision → roadmap → progress tracking → work queue → work items → execution → feedback loops. Uses a family trading platform (Spring Boot 4, React 19, PostgreSQL, Docker Compose) as the scenario to illustrate how the extension mechanism lets you plug in a different style of spec-driven development without changing any core tooling — truly utilizing the "Kit" in Spec Kit.
## 🛠️ Community Friends
> [!NOTE]
> Community projects listed here are independently created and maintained by their respective authors. They are **not reviewed, nor endorsed, nor supported by GitHub**. Review their source code before installation and use at your own discretion.
Community projects that extend, visualize, or build on Spec Kit:
- **[cc-sdd](https://github.com/rhuss/cc-sdd)** - A Claude Code plugin that adds composable traits on top of Spec Kit with [Superpowers](https://github.com/obra/superpowers)-based quality gates, spec/code review, git worktree isolation, and parallel implementation via agent teams.
- **[Spec Kit Assistant](https://marketplace.visualstudio.com/items?itemName=rfsales.speckit-assistant)** — A VS Code extension that provides a visual orchestrator for the full SDD workflow (constitution → specification → planning → tasks → implementation) with phase status visualization, an interactive task checklist, DAG visualization, and support for Claude, Gemini, GitHub Copilot, and OpenAI backends. Requires the `specify` CLI in your PATH.
## 🤖 Supported AI Agents
| Agent | Support | Notes |
@@ -281,9 +166,9 @@ Community projects that extend, visualize, or build on Spec Kit:
| [Kiro CLI](https://kiro.dev/docs/cli/) | ✅ | Use `--ai kiro-cli` (alias: `--ai kiro`) |
| [Amp](https://ampcode.com/) | ✅ | |
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | Installs skills in `.claude/skills`; invoke spec-kit as `/speckit-constitution`, `/speckit-plan`, etc. |
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | |
| [CodeBuddy CLI](https://www.codebuddy.ai/cli) | ✅ | |
| [Codex CLI](https://github.com/openai/codex) | ✅ | Requires `--ai-skills`. Codex recommends [skills](https://developers.openai.com/codex/skills) and treats [custom prompts](https://developers.openai.com/codex/custom-prompts) as deprecated. Spec-kit installs Codex skills into `.agents/skills` and invokes them as `$speckit-<command>`. |
| [Codex CLI](https://github.com/openai/codex) | ✅ | |
| [Cursor](https://cursor.sh/) | ✅ | |
| [Gemini CLI](https://github.com/google-gemini/gemini-cli) | ✅ | |
| [GitHub Copilot](https://code.visualstudio.com/) | ✅ | |
@@ -291,18 +176,14 @@ Community projects that extend, visualize, or build on Spec Kit:
| [Jules](https://jules.google.com/) | ✅ | |
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
| [opencode](https://opencode.ai/) | ✅ | |
| [Pi Coding Agent](https://pi.dev) | ✅ | Pi doesn't have MCP support out of the box, so `taskstoissues` won't work as intended. MCP support can be added via [extensions](https://github.com/badlogic/pi-mono/tree/main/packages/coding-agent#extensions) |
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
| [Roo Code](https://roocode.com/) | ✅ | |
| [SHAI (OVHcloud)](https://github.com/ovh/shai) | ✅ | |
| [Tabnine CLI](https://docs.tabnine.com/main/getting-started/tabnine-cli) | ✅ | |
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
| [Kimi Code](https://code.kimi.com/) | ✅ | |
| [iFlow CLI](https://docs.iflow.cn/en/cli/quickstart) | ✅ | |
| [Windsurf](https://windsurf.com/) | ✅ | |
| [Junie](https://junie.jetbrains.com/) | ✅ | |
| [Antigravity (agy)](https://antigravity.google/) | ✅ | Requires `--ai-skills` |
| [Trae](https://www.trae.ai/) | ✅ | |
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
## 🔧 Specify CLI Reference
@@ -311,28 +192,27 @@ The `specify` command supports the following options:
### Commands
| Command | Description |
| ------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `init` | Initialize a new Specify project from the latest template |
| `check` | Check for installed tools: `git` plus all CLI-based agents configured in `AGENT_CONFIG` (for example: `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `junie`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`, `kimi`, `iflow`, `pi`, etc.) |
| Command | Description |
| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | Initialize a new Specify project from the latest template |
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`, `kimi`) |
### `specify init` Arguments & Options
| Argument/Option | Type | Description |
| ---------------------- | -------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
| `--ai` | Option | AI assistant to use (see `AGENT_CONFIG` for the full, up-to-date list). Common options include: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `junie`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, `kimi`, `iflow`, `pi`, or `generic` (requires `--ai-commands-dir`) |
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
| `--no-git` | Flag | Skip git repository initialization |
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`). Extension commands are also auto-registered as skills when extensions are added later. |
| `--branch-numbering` | Option | Branch numbering strategy: `sequential` (default — `001`, `002`, `003`) or `timestamp` (`YYYYMMDD-HHMMSS`). Timestamp mode is useful for distributed teams to avoid numbering conflicts |
| Argument/Option | Type | Description |
| ---------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, `kimi`, or `generic` (requires `--ai-commands-dir`) |
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
| `--no-git` | Flag | Skip git repository initialization |
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
### Examples
@@ -367,12 +247,6 @@ specify init my-project --ai vibe
# Initialize with IBM Bob support
specify init my-project --ai bob
# Initialize with Pi Coding Agent support
specify init my-project --ai pi
# Initialize with Codex CLI support
specify init my-project --ai codex --ai-skills
# Initialize with Antigravity support
specify init my-project --ai agy --ai-skills
@@ -401,28 +275,19 @@ specify init my-project --ai claude --debug
# Use GitHub token for API requests (helpful for corporate environments)
specify init my-project --ai claude --github-token ghp_your_token_here
# Claude Code installs skills with the project by default
specify init my-project --ai claude
# Install agent skills with the project
specify init my-project --ai claude --ai-skills
# Initialize in current directory with agent skills
specify init --here --ai gemini --ai-skills
# Use timestamp-based branch numbering (useful for distributed teams)
specify init my-project --ai claude --branch-numbering timestamp
# Check system requirements
specify check
```
### Available Slash Commands
After running `specify init`, your AI coding agent will have access to these structured development commands.
Most agents expose the traditional dotted slash commands shown below, like `/speckit.plan`.
Claude Code installs spec-kit as skills and invokes them as `/speckit-constitution`, `/speckit-specify`, `/speckit-plan`, `/speckit-tasks`, and `/speckit-implement`.
For Codex CLI, `--ai-skills` installs spec-kit as agent skills instead of slash-command prompt files. In Codex skills mode, invoke spec-kit as `$speckit-constitution`, `$speckit-specify`, `$speckit-plan`, `$speckit-tasks`, and `$speckit-implement`.
After running `specify init`, your AI coding agent will have access to these slash commands for structured development:
#### Core Commands
@@ -452,68 +317,6 @@ Additional commands for enhanced quality and validation:
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>\*\*Must be set in the context of the agent you're working with prior to using `/speckit.plan` or follow-up commands. |
## 🧩 Making Spec Kit Your Own: Extensions & Presets
Spec Kit can be tailored to your needs through two complementary systems — **extensions** and **presets** — plus project-local overrides for one-off adjustments:
```mermaid
block-beta
columns 1
overrides["⬆ Highest priority\nProject-Local Overrides\n.specify/templates/overrides/"]
presets["Presets — Customize core & extensions\n.specify/presets/<preset-id>/templates/"]
extensions["Extensions — Add new capabilities\n.specify/extensions/<ext-id>/templates/"]
core["Spec Kit Core — Built-in SDD commands & templates\n.specify/templates/\n⬇ Lowest priority"]
style overrides fill:transparent,stroke:#999
style presets fill:transparent,stroke:#4a9eda
style extensions fill:transparent,stroke:#4a9e4a
style core fill:transparent,stroke:#e6a817
```
**Templates** are resolved at **runtime** — Spec Kit walks the stack top-down and uses the first match. Project-local overrides (`.specify/templates/overrides/`) let you make one-off adjustments for a single project without creating a full preset. **Commands** are applied at **install time** — when you run `specify extension add` or `specify preset add`, command files are written into agent directories (e.g., `.claude/commands/`). If multiple presets or extensions provide the same command, the highest-priority version wins. On removal, the next-highest-priority version is restored automatically. If no overrides or customizations exist, Spec Kit uses its core defaults.
### Extensions — Add New Capabilities
Use **extensions** when you need functionality that goes beyond Spec Kit's core. Extensions introduce new commands and templates — for example, adding domain-specific workflows that are not covered by the built-in SDD commands, integrating with external tools, or adding entirely new development phases. They expand *what Spec Kit can do*.
```bash
# Search available extensions
specify extension search
# Install an extension
specify extension add <extension-name>
```
For example, extensions could add Jira integration, post-implementation code review, V-Model test traceability, or project health diagnostics.
See the [Extensions README](./extensions/README.md) for the full guide and how to build and publish your own. Browse the [community extensions](#-community-extensions) above for what's available.
### Presets — Customize Existing Workflows
Use **presets** when you want to change *how* Spec Kit works without adding new capabilities. Presets override the templates and commands that ship with the core *and* with installed extensions — for example, enforcing a compliance-oriented spec format, using domain-specific terminology, or applying organizational standards to plans and tasks. They customize the artifacts and instructions that Spec Kit and its extensions produce.
```bash
# Search available presets
specify preset search
# Install a preset
specify preset add <preset-name>
```
For example, presets could restructure spec templates to require regulatory traceability, adapt the workflow to fit the methodology you use (e.g., Agile, Kanban, Waterfall, jobs-to-be-done, or domain-driven design), add mandatory security review gates to plans, enforce test-first task ordering, or localize the entire workflow to a different language. The [pirate-speak demo](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo) shows just how deep the customization can go. Multiple presets can be stacked with priority ordering.
See the [Presets README](./presets/README.md) for the full guide, including resolution order, priority, and how to create your own.
### When to Use Which
| Goal | Use |
| --- | --- |
| Add a brand-new command or workflow | Extension |
| Customize the format of specs, plans, or tasks | Preset |
| Integrate an external tool or service | Extension |
| Enforce organizational or regulatory standards | Preset |
| Ship reusable domain-specific templates | Either — presets for template overrides, extensions for templates bundled with new commands |
## 📚 Core Philosophy
Spec-Driven Development is a structured process that emphasizes:
@@ -608,11 +411,11 @@ specify init <project_name> --ai copilot
# Or in current directory:
specify init . --ai claude
specify init . --ai codex --ai-skills
specify init . --ai codex
# or use --here flag
specify init --here --ai claude
specify init --here --ai codex --ai-skills
specify init --here --ai codex
# Force merge into a non-empty current directory
specify init . --force --ai claude
@@ -621,7 +424,7 @@ specify init . --force --ai claude
specify init --here --force --ai claude
```
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, Pi, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
```bash
specify init <project_name> --ai claude --ignore-agent-tools

View File

@@ -1,17 +1,18 @@
# Support
## How to get help
## How to file issues and get help
Please search existing [issues](https://github.com/github/spec-kit/issues) and [discussions](https://github.com/github/spec-kit/discussions) before creating new ones to avoid duplicates.
This project uses GitHub issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new issue.
- Review the [README](./README.md) for getting started instructions and troubleshooting tips
For help or questions about using this project, please:
- Open a [GitHub issue](https://github.com/github/spec-kit/issues/new) for bug reports, feature requests, or questions about the Spec-Driven Development methodology
- Check the [comprehensive guide](./spec-driven.md) for detailed documentation on the Spec-Driven Development process
- Ask in [GitHub Discussions](https://github.com/github/spec-kit/discussions) for questions about using Spec Kit or the Spec-Driven Development methodology
- Open a [GitHub issue](https://github.com/github/spec-kit/issues/new) for bug reports and feature requests
- Review the [README](./README.md) for getting started instructions and troubleshooting tips
## Project Status
**Spec Kit** is under active development and maintained by GitHub staff and the community. We will do our best to respond to support, feature requests, and community questions as time permits.
**Spec Kit** is under active development and maintained by GitHub staff **AND THE COMMUNITY**. We will do our best to respond to support, feature requests, and community questions in a timely manner.
## GitHub Support Policy

View File

@@ -1,133 +0,0 @@
# Testing Guide
This document is the detailed testing companion to [`CONTRIBUTING.md`](./CONTRIBUTING.md).
Use it for three things:
1. running quick automated checks before manual testing,
2. manually testing affected slash commands through an AI agent, and
3. capturing the results in a PR-friendly format.
Any change that affects a slash command's behavior requires manually testing that command through an AI agent and submitting results with the PR.
## Recommended order
1. **Sync your environment** — install the project and test dependencies.
2. **Run focused automated checks** — especially for packaging, scaffolding, agent config, and generated-file changes.
3. **Run manual agent tests** — for any affected slash commands.
4. **Paste results into your PR** — include both command-selection reasoning and manual test results.
## Quick automated checks
Run these before manual testing when your change affects packaging, scaffolding, templates, release artifacts, or agent wiring.
### Environment setup
```bash
cd <spec-kit-repo>
uv sync --extra test
source .venv/bin/activate # On Windows (CMD): .venv\Scripts\activate | (PowerShell): .venv\Scripts\Activate.ps1
```
### Generated package structure and content
```bash
uv run python -m pytest tests/test_core_pack_scaffold.py -q
```
This validates the generated files that CI-style packaging depends on, including directory layout, file names, frontmatter/TOML validity, placeholder replacement, `.specify/` path rewrites, and parity with `create-release-packages.sh`.
### Agent configuration and release wiring consistency
```bash
uv run python -m pytest tests/test_agent_config_consistency.py -q
```
Run this when you change agent metadata, release scripts, context update scripts, or artifact naming.
### Optional single-agent packaging spot check
```bash
AGENTS=copilot SCRIPTS=sh ./.github/workflows/scripts/create-release-packages.sh v1.0.0
```
Inspect `.genreleases/sdd-copilot-package-sh/` and the matching ZIP in `.genreleases/` when you want to review the exact packaged output for one agent/script combination.
## Manual testing process
1. **Identify affected commands** — use the [prompt below](#determining-which-tests-to-run) to have your agent analyze your changed files and determine which commands need testing.
2. **Set up a test project** — scaffold from your local branch (see [Setup](#setup)).
3. **Run each affected command** — invoke it in your agent, verify it completes successfully, and confirm it produces the expected output (files created, scripts executed, artifacts populated).
4. **Run prerequisites first** — commands that depend on earlier commands (e.g., `/speckit.tasks` requires `/speckit.plan` which requires `/speckit.specify`) must be run in order.
5. **Report results** — paste the [reporting template](#reporting-results) into your PR with pass/fail for each command tested.
## Setup
```bash
# Install the project and test dependencies from your local branch
cd <spec-kit-repo>
uv sync --extra test
source .venv/bin/activate # On Windows (CMD): .venv\Scripts\activate | (PowerShell): .venv\Scripts\Activate.ps1
uv pip install -e .
# Ensure the `specify` binary in this environment points at your working tree so the agent runs the branch you're testing.
# Initialize a test project using your local changes
uv run specify init /tmp/speckit-test --ai <agent> --offline
cd /tmp/speckit-test
# Open in your agent
```
If you are testing the packaged output rather than the live source tree, create a local release package first as described in [`CONTRIBUTING.md`](./CONTRIBUTING.md).
## Reporting results
Paste this into your PR:
~~~markdown
## Manual test results
**Agent**: [e.g., GitHub Copilot in VS Code] | **OS/Shell**: [e.g., macOS/zsh]
| Command tested | Notes |
|----------------|-------|
| `/speckit.command` | |
~~~
## Determining which tests to run
Copy this prompt into your agent. Include the agent's response (selected tests plus a brief explanation of the mapping) in your PR.
~~~text
Read TESTING.md, then run `git diff --name-only main` to get my changed files.
For each changed file, determine which slash commands it affects by reading
the command templates in templates/commands/ to understand what each command
invokes. Use these mapping rules:
- templates/commands/X.md → the command it defines
- scripts/bash/Y.sh or scripts/powershell/Y.ps1 → every command that invokes that script (grep templates/commands/ for the script name). Also check transitive dependencies: if the changed script is sourced by other scripts (e.g., common.sh is sourced by create-new-feature.sh, check-prerequisites.sh, setup-plan.sh, update-agent-context.sh), then every command invoking those downstream scripts is also affected
- templates/Z-template.md → every command that consumes that template during execution
- src/specify_cli/*.py → CLI commands (`specify init`, `specify check`, `specify extension *`, `specify preset *`); test the affected CLI command and, for init/scaffolding changes, at minimum test /speckit.specify
- extensions/X/commands/* → the extension command it defines
- extensions/X/scripts/* → every extension command that invokes that script
- extensions/X/extension.yml or config-template.yml → every command in that extension. Also check if the manifest defines hooks (look for `hooks:` entries like `before_specify`, `after_implement`, etc.) — if so, the core commands those hooks attach to are also affected
- presets/*/* → test preset scaffolding via `specify init` with the preset
- pyproject.toml → packaging/bundling; test `specify init` and verify bundled assets
Include prerequisite tests (e.g., T5 requires T3 requires T1).
Output in this format:
### Test selection reasoning
| Changed file | Affects | Test | Why |
|---|---|---|---|
| (path) | (command) | T# | (reason) |
### Required tests
Number each test sequentially (T1, T2, ...). List prerequisite tests first.
- T1: /speckit.command — (reason)
- T2: /speckit.command — (reason)
~~~

View File

@@ -3,7 +3,7 @@
## Prerequisites
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli), [Gemini CLI](https://github.com/google-gemini/gemini-cli), or [Pi Coding Agent](https://pi.dev)
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli) or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
- [uv](https://docs.astral.sh/uv/) for package management
- [Python 3.11+](https://www.python.org/downloads/)
- [Git](https://git-scm.com/downloads)
@@ -12,22 +12,18 @@
### Initialize a New Project
The easiest way to get started is to initialize a new project. Pin a specific release tag for stability (check [Releases](https://github.com/github/spec-kit/releases) for the latest):
The easiest way to get started is to initialize a new project:
```bash
# Install from a specific stable release (recommended — replace vX.Y.Z with the latest tag)
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <PROJECT_NAME>
# Or install latest from main (may include unreleased changes)
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
```
Or initialize in the current directory:
```bash
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init .
uvx --from git+https://github.com/github/spec-kit.git specify init .
# or use the --here flag
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
uvx --from git+https://github.com/github/spec-kit.git specify init --here
```
### Specify AI Agent
@@ -35,11 +31,10 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
You can proactively specify your AI agent during initialization:
```bash
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai claude
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai gemini
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai copilot
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai codebuddy
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai pi
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai codebuddy
```
### Specify Script Type (Shell vs PowerShell)
@@ -55,8 +50,8 @@ Auto behavior:
Force a specific script type:
```bash
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --script sh
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --script ps
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script sh
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script ps
```
### Ignore Agent Tools Check
@@ -64,7 +59,7 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <proje
If you prefer to get the templates without checking for the right tools:
```bash
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai claude --ignore-agent-tools
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude --ignore-agent-tools
```
## Verification
@@ -79,52 +74,6 @@ The `.specify/scripts` directory will contain both `.sh` and `.ps1` scripts.
## Troubleshooting
### Enterprise / Air-Gapped Installation
If your environment blocks access to PyPI (you see 403 errors when running `uv tool install` or `pip install`), you can create a portable wheel bundle on a connected machine and transfer it to the air-gapped target.
**Step 1: Build the wheel on a connected machine (same OS and Python version as the target)**
```bash
# Clone the repository
git clone https://github.com/github/spec-kit.git
cd spec-kit
# Build the wheel
pip install build
python -m build --wheel --outdir dist/
# Download the wheel and all its runtime dependencies
pip download -d dist/ dist/specify_cli-*.whl
```
> **Important:** `pip download` resolves platform-specific wheels (e.g., PyYAML includes native extensions). You must run this step on a machine with the **same OS and Python version** as the air-gapped target. If you need to support multiple platforms, repeat this step on each target OS (Linux, macOS, Windows) and Python version.
**Step 2: Transfer the `dist/` directory to the air-gapped machine**
Copy the entire `dist/` directory (which contains the `specify-cli` wheel and all dependency wheels) to the target machine via USB, network share, or other approved transfer method.
**Step 3: Install on the air-gapped machine**
```bash
pip install --no-index --find-links=./dist specify-cli
```
**Step 4: Initialize a project (no network required)**
```bash
# Initialize a project — no GitHub access needed
specify init my-project --ai claude --offline
```
The `--offline` flag tells the CLI to use the templates, commands, and scripts bundled inside the wheel instead of downloading from GitHub.
> **Deprecation notice:** Starting with v0.6.0, `specify init` will use bundled assets by default and the `--offline` flag will be removed. The GitHub download path will be retired because bundled assets eliminate the need for network access, avoid proxy/firewall issues, and guarantee that templates always match the installed CLI version. No action will be needed — `specify init` will simply work without network access out of the box.
> **Note:** Python 3.11+ is required.
> **Windows note:** Offline scaffolding requires PowerShell 7+ (`pwsh`), not Windows PowerShell 5.x (`powershell.exe`). Install from https://aka.ms/powershell.
### Git Credential Manager on Linux
If you're having issues with Git authentication on Linux, you can install Git Credential Manager:

View File

@@ -8,7 +8,7 @@
| What to Upgrade | Command | When to Use |
|----------------|---------|-------------|
| **CLI Tool Only** | `uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z` | Get latest CLI features without touching project files |
| **CLI Tool Only** | `uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git` | Get latest CLI features without touching project files |
| **Project Files** | `specify init --here --force --ai <your-agent>` | Update slash commands, templates, and scripts in your project |
| **Both** | Run CLI upgrade, then project update | Recommended for major version updates |
@@ -20,18 +20,16 @@ The CLI tool (`specify`) is separate from your project files. Upgrade it to get
### If you installed with `uv tool install`
Upgrade to a specific release (check [Releases](https://github.com/github/spec-kit/releases) for the latest tag):
```bash
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git
```
### If you use one-shot `uvx` commands
Specify the desired release tag:
No upgrade needed—`uvx` always fetches the latest version. Just run your commands as normal:
```bash
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here --ai copilot
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai copilot
```
### Verify the upgrade
@@ -291,9 +289,8 @@ This tells Spec Kit which feature directory to use when creating specs, plans, a
```bash
ls -la .claude/commands/ # Claude Code
ls -la .gemini/commands/ # Gemini
ls -la .cursor/commands/ # Cursor
ls -la .pi/prompts/ # Pi Coding Agent
ls -la .gemini/commands/ # Gemini
ls -la .cursor/commands/ # Cursor
```
3. **Check agent-specific setup:**
@@ -401,7 +398,7 @@ The `specify` CLI tool is used for:
- **Upgrades:** `specify init --here --force` to update templates and commands
- **Diagnostics:** `specify check` to verify tool installation
Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/speckit.plan`, etc.) are **permanently installed** in your project's agent folder (`.claude/`, `.github/prompts/`, `.pi/prompts/`, etc.). Your AI assistant reads these command files directly—no need to run `specify` again.
Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/speckit.plan`, etc.) are **permanently installed** in your project's agent folder (`.claude/`, `.github/prompts/`, etc.). Your AI assistant reads these command files directly—no need to run `specify` again.
**If your agent isn't recognizing slash commands:**
@@ -413,9 +410,6 @@ Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/s
# For Claude
ls -la .claude/commands/
# For Pi
ls -la .pi/prompts/
```
2. **Restart your IDE/editor completely** (not just reload window)

View File

@@ -44,7 +44,7 @@ provides:
- name: string # Required, pattern: ^speckit\.[a-z0-9-]+\.[a-z0-9-]+$
file: string # Required, relative path to command file
description: string # Required
aliases: [string] # Optional, same pattern as name; namespace must match extension.id and must not shadow core or installed extension commands
aliases: [string] # Optional, array of alternate names
config: # Optional, array of config files
- name: string # Config file name
@@ -53,7 +53,7 @@ provides:
required: boolean # Default: false
hooks: # Optional, event hooks
event_name: # e.g., "after_specify", "after_plan", "after_tasks", "after_implement"
event_name: # e.g., "after_tasks", "after_implement"
command: string # Command to execute
optional: boolean # Default: true
prompt: string # Prompt text for optional hooks
@@ -108,7 +108,7 @@ defaults: # Optional, default configuration values
#### `hooks`
- **Type**: object
- **Keys**: Event names (e.g., `after_specify`, `after_plan`, `after_tasks`, `after_implement`, `before_commit`)
- **Keys**: Event names (e.g., `after_tasks`, `after_implement`, `before_commit`)
- **Description**: Hooks that execute at lifecycle events
- **Events**: Defined by core spec-kit commands
@@ -551,16 +551,10 @@ hooks:
Standard events (defined by core):
- `before_specify` - Before specification generation
- `after_specify` - After specification generation
- `before_plan` - Before implementation planning
- `after_plan` - After implementation planning
- `before_tasks` - Before task generation
- `after_tasks` - After task generation
- `before_implement` - Before implementation
- `after_implement` - After implementation
- `before_commit` - Before git commit *(planned - not yet wired into core templates)*
- `after_commit` - After git commit *(planned - not yet wired into core templates)*
- `before_commit` - Before git commit
- `after_commit` - After git commit
### Hook Configuration

View File

@@ -41,7 +41,7 @@ provides:
- name: "speckit.my-ext.hello" # Must follow pattern: speckit.{ext-id}.{cmd}
file: "commands/hello.md"
description: "Say hello"
aliases: ["speckit.my-ext.hi"] # Optional aliases, same pattern
aliases: ["speckit.hello"] # Optional aliases
config: # Optional: Config files
- name: "my-ext-config.yml"
@@ -186,7 +186,7 @@ What the extension provides.
- `name`: Command name (must match `speckit.{ext-id}.{command}`)
- `file`: Path to command file (relative to extension root)
- `description`: Command description (optional)
- `aliases`: Alternative command names (optional, array; each must match `speckit.{ext-id}.{command}`)
- `aliases`: Alternative command names (optional, array)
### Optional Fields
@@ -514,7 +514,7 @@ zip -r spec-kit-my-ext-1.0.0.zip extension.yml commands/ scripts/ docs/
Users install with:
```bash
specify extension add <extension-name> --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
specify extension add --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
```
### Option 3: Community Reference Catalog
@@ -523,7 +523,7 @@ Submit to the community catalog for public discovery:
1. **Fork** spec-kit repository
2. **Add entry** to `extensions/catalog.community.json`
3. **Update** the Community Extensions table in `README.md` with your extension
3. **Update** `extensions/README.md` with your extension
4. **Create PR** following the [Extension Publishing Guide](EXTENSION-PUBLISHING-GUIDE.md)
5. **After merge**, your extension becomes available:
- Users can browse `catalog.community.json` to discover your extension

View File

@@ -122,7 +122,7 @@ Test that users can install from your release:
specify extension add --dev /path/to/your-extension
# Test from GitHub archive
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
```
---
@@ -204,27 +204,14 @@ Edit `extensions/catalog.community.json` and add your extension:
- Use current timestamp for `created_at` and `updated_at`
- Update the top-level `updated_at` to current time
### 3. Update Community Extensions Table
### 3. Update Extensions README
Add your extension to the Community Extensions table in the project root `README.md`:
Add your extension to the Available Extensions table in `extensions/README.md`:
```markdown
| Your Extension Name | Brief description of what it does | `<category>` | <effect> | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
| Your Extension Name | Brief description of what it does | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
```
**(Table) Category** — pick the one that best fits your extension:
- `docs` — reads, validates, or generates spec artifacts
- `code` — reviews, validates, or modifies source code
- `process` — orchestrates workflow across phases
- `integration` — syncs with external platforms
- `visibility` — reports on project health or progress
**Effect** — choose one:
- Read-only — produces reports without modifying files
- Read+Write — modifies files, creates artifacts, or updates specs
Insert your extension in alphabetical order in the table.
### 4. Submit Pull Request
@@ -234,7 +221,7 @@ Insert your extension in alphabetical order in the table.
git checkout -b add-your-extension
# Commit your changes
git add extensions/catalog.community.json README.md
git add extensions/catalog.community.json extensions/README.md
git commit -m "Add your-extension to community catalog
- Extension ID: your-extension
@@ -273,7 +260,7 @@ Brief description of what your extension does.
- [x] All commands working
- [x] No security vulnerabilities
- [x] Added to extensions/catalog.community.json
- [x] Added to Community Extensions table in README.md
- [x] Added to extensions/README.md Available Extensions table
### Testing
Tested on:

View File

@@ -160,7 +160,7 @@ This will:
```bash
# From GitHub release
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
```
### Install from Local Directory (Development)
@@ -187,21 +187,6 @@ Provided commands:
Check: .specify/extensions/jira/
```
### Automatic Agent Skill Registration
If your project was initialized with `--ai-skills`, extension commands are **automatically registered as agent skills** during installation. This ensures that extensions are discoverable by agents that use the [agentskills.io](https://agentskills.io) skill specification.
```text
✓ Extension installed successfully!
Jira Integration (v1.0.0)
...
✓ 3 agent skill(s) auto-registered
```
When an extension is removed, its corresponding skills are also cleaned up automatically. Pre-existing skills that were manually customized are never overwritten.
---
## Using Extensions
@@ -214,8 +199,8 @@ Extensions add commands that appear in your AI agent (Claude Code):
# In Claude Code
> /speckit.jira.specstoissues
# Or use a namespaced alias (if provided)
> /speckit.jira.sync
# Or use short alias (if provided)
> /speckit.specstoissues
```
### Extension Configuration
@@ -402,9 +387,6 @@ settings:
auto_execute_hooks: true
# Hook configuration
# Available events: before_specify, after_specify, before_plan, after_plan,
# before_tasks, after_tasks, before_implement, after_implement
# Planned (not yet wired into core templates): before_commit, after_commit
hooks:
after_tasks:
- extension: jira
@@ -737,7 +719,7 @@ You can still install extensions not in your catalog using `--from`:
specify extension add jira
# Direct URL (bypasses catalog)
specify extension add <extension-name> --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
specify extension add --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
# Local development
specify extension add --dev /path/to/extension
@@ -807,7 +789,7 @@ specify extension add --dev /path/to/extension
2. Install older version of extension:
```bash
specify extension add <extension-name> --from https://github.com/org/ext/archive/v1.0.0.zip
specify extension add --from https://github.com/org/ext/archive/v1.0.0.zip
```
### MCP Tool Not Available

View File

@@ -24,9 +24,6 @@ specify extension search # Now uses your organization's catalog instead of the
### Community Reference Catalog (`catalog.community.json`)
> [!NOTE]
> Community extensions are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the extension code itself**. Review extension source code before installation and use at your own discretion.
- **Purpose**: Browse available community-contributed extensions
- **Status**: Active - contains extensions submitted by the community
- **Location**: `extensions/catalog.community.json`
@@ -62,7 +59,7 @@ Populate your `catalog.json` with approved extensions:
Skip catalog curation - team members install directly using URLs:
```bash
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
```
**Benefits**: Quick for one-off testing or private extensions
@@ -71,14 +68,22 @@ specify extension add <extension-name> --from https://github.com/org/spec-kit-ex
## Available Community Extensions
> [!NOTE]
> Community extensions are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the extension code itself**. The Community Extensions website is also a third-party resource. Review extension source code before installation and use at your own discretion.
The following community-contributed extensions are available in [`catalog.community.json`](catalog.community.json):
🔍 **Browse and search community extensions on the [Community Extensions website](https://speckit-community.github.io/extensions/).**
See the [Community Extensions](../README.md#-community-extensions) section in the main README for the full list of available community-contributed extensions.
For the raw catalog data, see [`catalog.community.json`](catalog.community.json).
| Extension | Purpose | URL |
|-----------|---------|-----|
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | [understanding](https://github.com/Testimonial/understanding) |
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
## Adding Your Extension
@@ -116,7 +121,7 @@ specify extension search # See what's in your catalog
specify extension add <extension-name> # Install by name
# Direct from URL (bypasses catalog)
specify extension add <extension-name> --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
specify extension add --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
# List installed extensions
specify extension list

View File

@@ -223,7 +223,7 @@ provides:
- name: "speckit.jira.specstoissues"
file: "commands/specstoissues.md"
description: "Create Jira hierarchy from spec and tasks"
aliases: ["speckit.jira.sync"] # Alternate names
aliases: ["speckit.specstoissues"] # Alternate names
- name: "speckit.jira.discover-fields"
file: "commands/discover-fields.md"
@@ -359,15 +359,12 @@ specify extension add jira
"installed_at": "2026-01-28T14:30:00Z",
"source": "catalog",
"manifest_hash": "sha256:abc123...",
"enabled": true,
"priority": 10
"enabled": true
}
}
}
```
**Priority Field**: Extensions are ordered by `priority` (lower = higher precedence). Default is 10. Used for template resolution when multiple extensions provide the same template.
### 3. Configuration
```bash
@@ -1087,15 +1084,11 @@ List installed extensions in current project.
$ specify extension list
Installed Extensions:
✓ Jira Integration (v1.0.0)
jira
Create Jira issues from spec-kit artifacts
Commands: 3 | Hooks: 2 | Priority: 10 | Status: Enabled
jira (v1.0.0) - Jira Integration
Commands: 3 | Hooks: 2 | Status: Enabled
✓ Linear Integration (v0.9.0)
linear
Create Linear issues from spec-kit artifacts
Commands: 1 | Hooks: 1 | Priority: 10 | Status: Enabled
linear (v0.9.0) - Linear Integration
Commands: 1 | Hooks: 1 | Status: Enabled
```
**Options:**
@@ -1203,9 +1196,10 @@ Next steps:
**Options:**
- `--from URL`: Install from a remote URL (archive). Does not accept Git repositories directly.
- `--dev`: Install from a local path in development mode (the PATH is the positional `extension` argument).
- `--priority NUMBER`: Set resolution priority (lower = higher precedence, default 10)
- `--from URL`: Install from custom URL or Git repo
- `--version VERSION`: Install specific version
- `--dev PATH`: Install from local path (development mode)
- `--no-register`: Skip command registration (manual setup)
#### `specify extension remove NAME`
@@ -1286,29 +1280,6 @@ $ specify extension disable jira
To re-enable: specify extension enable jira
```
#### `specify extension set-priority NAME PRIORITY`
Change the resolution priority of an installed extension.
```bash
$ specify extension set-priority jira 5
✓ Extension 'Jira Integration' priority changed: 10 → 5
Lower priority = higher precedence in template resolution
```
**Priority Values:**
- Lower numbers = higher precedence (checked first in resolution)
- Default priority is 10
- Must be a positive integer (1 or higher)
**Use Cases:**
- Ensure a critical extension's templates take precedence
- Override default resolution order when multiple extensions provide similar templates
---
## Compatibility & Versioning
@@ -1517,7 +1488,7 @@ specify extension add github-projects
/speckit.github.taskstoissues
```
**Migration alias** (if needed):
**Compatibility shim** (if needed):
```yaml
# extension.yml
@@ -1525,10 +1496,10 @@ provides:
commands:
- name: "speckit.github.taskstoissues"
file: "commands/taskstoissues.md"
aliases: ["speckit.github.sync-taskstoissues"] # Alternate namespaced entry point
aliases: ["speckit.taskstoissues"] # Backward compatibility
```
AI agents register both names, so callers can migrate to the alternate alias without relying on deprecated global shortcuts like `/speckit.taskstoissues`.
AI agent registers both names, so old scripts work.
---

File diff suppressed because it is too large Load Diff

View File

@@ -47,8 +47,8 @@ provides:
- name: "speckit.my-extension.example"
file: "commands/example.md"
description: "Example command that demonstrates functionality"
# Optional: Add aliases in the same namespaced format
aliases: ["speckit.my-extension.example-short"]
# Optional: Add aliases for shorter command names
aliases: ["speckit.example"]
# ADD MORE COMMANDS: Copy this block for each command
# - name: "speckit.my-extension.another-command"

View File

@@ -13,15 +13,13 @@ When Spec Kit needs a template (e.g. `spec-template`), it walks a resolution sta
If no preset is installed, core templates are used — exactly the same behavior as before presets existed.
Template resolution happens **at runtime** — although preset files are copied into `.specify/presets/<id>/` during installation, Spec Kit walks the resolution stack on every template lookup rather than merging templates into a single location.
For detailed resolution and command registration flows, see [ARCHITECTURE.md](ARCHITECTURE.md).
## Command Overrides
Presets can also override the commands that guide the SDD workflow. Templates define *what* gets produced (specs, plans, constitutions); commands define *how* the LLM produces them (the step-by-step instructions).
Unlike templates, command overrides are applied **at install time**. When a preset includes `type: "command"` entries, the commands are registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
When a preset includes `type: "command"` entries, the commands are automatically registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
## Quick Start
@@ -67,9 +65,6 @@ Presets **override**, they don't merge. If two presets both provide `spec-templa
Presets are discovered through catalogs. By default, Spec Kit uses the official and community catalogs:
> [!NOTE]
> Community presets are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the preset code itself**. Review preset source code before installation and use at your own discretion.
```bash
# List active catalogs
specify preset catalog list

View File

@@ -1,58 +1,6 @@
{
"schema_version": "1.0",
"updated_at": "2026-03-24T00:00:00Z",
"updated_at": "2026-03-09T00:00:00Z",
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json",
"presets": {
"aide-in-place": {
"name": "AIDE In-Place Migration",
"id": "aide-in-place",
"version": "1.0.0",
"description": "Adapts the AIDE workflow for in-place technology migrations (X → Y pattern). Overrides vision, roadmap, progress, and work item commands with migration-specific guidance.",
"author": "mnriem",
"repository": "https://github.com/mnriem/spec-kit-presets",
"download_url": "https://github.com/mnriem/spec-kit-presets/releases/download/aide-in-place-v1.0.0/aide-in-place.zip",
"homepage": "https://github.com/mnriem/spec-kit-presets",
"documentation": "https://github.com/mnriem/spec-kit-presets/blob/main/aide-in-place/README.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.2.0",
"extensions": ["aide"]
},
"provides": {
"templates": 2,
"commands": 8
},
"tags": [
"migration",
"in-place",
"brownfield",
"aide"
]
},
"pirate": {
"name": "Pirate Speak (Full)",
"id": "pirate",
"version": "1.0.0",
"description": "Arrr! Transforms all Spec Kit output into pirate speak. Specs, plans, and tasks be written fer scallywags.",
"author": "mnriem",
"repository": "https://github.com/mnriem/spec-kit-presets",
"download_url": "https://github.com/mnriem/spec-kit-presets/releases/download/pirate-v1.0.0/pirate.zip",
"homepage": "https://github.com/mnriem/spec-kit-presets",
"documentation": "https://github.com/mnriem/spec-kit-presets/blob/main/pirate/README.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0"
},
"provides": {
"templates": 6,
"commands": 9
},
"tags": [
"pirate",
"theme",
"fun",
"experimental"
]
}
}
"presets": {}
}

View File

@@ -1,6 +1,6 @@
[project]
name = "specify-cli"
version = "0.4.6.dev0"
version = "0.3.0"
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
requires-python = ">=3.11"
dependencies = [
@@ -14,7 +14,6 @@ dependencies = [
"pyyaml>=6.0",
"packaging>=23.0",
"pathspec>=0.12.0",
"json5>=0.13.0",
]
[project.scripts]
@@ -27,21 +26,6 @@ build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["src/specify_cli"]
[tool.hatch.build.targets.wheel.force-include]
# Bundle core assets so `specify init` works without network access (air-gapped / enterprise)
# Page templates (exclude commands/ — bundled separately below to avoid duplication)
"templates/agent-file-template.md" = "specify_cli/core_pack/templates/agent-file-template.md"
"templates/checklist-template.md" = "specify_cli/core_pack/templates/checklist-template.md"
"templates/constitution-template.md" = "specify_cli/core_pack/templates/constitution-template.md"
"templates/plan-template.md" = "specify_cli/core_pack/templates/plan-template.md"
"templates/spec-template.md" = "specify_cli/core_pack/templates/spec-template.md"
"templates/tasks-template.md" = "specify_cli/core_pack/templates/tasks-template.md"
"templates/vscode-settings.json" = "specify_cli/core_pack/templates/vscode-settings.json"
# Command templates
"templates/commands" = "specify_cli/core_pack/commands"
"scripts/bash" = "specify_cli/core_pack/scripts/bash"
"scripts/powershell" = "specify_cli/core_pack/scripts/powershell"
[project.optional-dependencies]
test = [
"pytest>=7.0",

View File

@@ -168,7 +168,7 @@ if $JSON_MODE; then
if [[ ${#docs[@]} -eq 0 ]]; then
json_docs="[]"
else
json_docs=$(for d in "${docs[@]}"; do printf '"%s",' "$(json_escape "$d")"; done)
json_docs=$(printf '"%s",' "${docs[@]}")
json_docs="[${json_docs%,}]"
fi
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$(json_escape "$FEATURE_DIR")" "$json_docs"

View File

@@ -1,48 +1,15 @@
#!/usr/bin/env bash
# Common functions and variables for all scripts
# Find repository root by searching upward for .specify directory
# This is the primary marker for spec-kit projects
find_specify_root() {
local dir="${1:-$(pwd)}"
# Normalize to absolute path to prevent infinite loop with relative paths
# Use -- to handle paths starting with - (e.g., -P, -L)
dir="$(cd -- "$dir" 2>/dev/null && pwd)" || return 1
local prev_dir=""
while true; do
if [ -d "$dir/.specify" ]; then
echo "$dir"
return 0
fi
# Stop if we've reached filesystem root or dirname stops changing
if [ "$dir" = "/" ] || [ "$dir" = "$prev_dir" ]; then
break
fi
prev_dir="$dir"
dir="$(dirname "$dir")"
done
return 1
}
# Get repository root, prioritizing .specify directory over git
# This prevents using a parent git repo when spec-kit is initialized in a subdirectory
# Get repository root, with fallback for non-git repositories
get_repo_root() {
# First, look for .specify directory (spec-kit's own marker)
local specify_root
if specify_root=$(find_specify_root); then
echo "$specify_root"
return
fi
# Fallback to git if no .specify found
if git rev-parse --show-toplevel >/dev/null 2>&1; then
git rev-parse --show-toplevel
return
else
# Fall back to script location for non-git repos
local script_dir="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
(cd "$script_dir/../../.." && pwd)
fi
# Final fallback to script location for non-git repos
local script_dir="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
(cd "$script_dir/../../.." && pwd)
}
# Get current branch, with fallback for non-git repositories
@@ -53,40 +20,29 @@ get_current_branch() {
return
fi
# Then check git if available at the spec-kit root (not parent)
local repo_root=$(get_repo_root)
if has_git; then
git -C "$repo_root" rev-parse --abbrev-ref HEAD
# Then check git if available
if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
git rev-parse --abbrev-ref HEAD
return
fi
# For non-git repos, try to find the latest feature directory
local repo_root=$(get_repo_root)
local specs_dir="$repo_root/specs"
if [[ -d "$specs_dir" ]]; then
local latest_feature=""
local highest=0
local latest_timestamp=""
for dir in "$specs_dir"/*; do
if [[ -d "$dir" ]]; then
local dirname=$(basename "$dir")
if [[ "$dirname" =~ ^([0-9]{8}-[0-9]{6})- ]]; then
# Timestamp-based branch: compare lexicographically
local ts="${BASH_REMATCH[1]}"
if [[ "$ts" > "$latest_timestamp" ]]; then
latest_timestamp="$ts"
latest_feature=$dirname
fi
elif [[ "$dirname" =~ ^([0-9]{3,})- ]]; then
if [[ "$dirname" =~ ^([0-9]{3})- ]]; then
local number=${BASH_REMATCH[1]}
number=$((10#$number))
if [[ "$number" -gt "$highest" ]]; then
highest=$number
# Only update if no timestamp branch found yet
if [[ -z "$latest_timestamp" ]]; then
latest_feature=$dirname
fi
latest_feature=$dirname
fi
fi
fi
@@ -101,17 +57,9 @@ get_current_branch() {
echo "main" # Final fallback
}
# Check if we have git available at the spec-kit root level
# Returns true only if git is installed and the repo root is inside a git work tree
# Handles both regular repos (.git directory) and worktrees/submodules (.git file)
# Check if we have git available
has_git() {
# First check if git command is available (before calling get_repo_root which may use git)
command -v git >/dev/null 2>&1 || return 1
local repo_root=$(get_repo_root)
# Check if .git exists (directory or file for worktrees/submodules)
[ -e "$repo_root/.git" ] || return 1
# Verify it's actually a valid git work tree
git -C "$repo_root" rev-parse --is-inside-work-tree >/dev/null 2>&1
git rev-parse --show-toplevel >/dev/null 2>&1
}
check_feature_branch() {
@@ -124,15 +72,9 @@ check_feature_branch() {
return 0
fi
# Accept sequential prefix (3+ digits) but exclude malformed timestamps
# Malformed: 7-or-8 digit date + 6-digit time with no trailing slug (e.g. "2026031-143022" or "20260319-143022")
local is_sequential=false
if [[ "$branch" =~ ^[0-9]{3,}- ]] && [[ ! "$branch" =~ ^[0-9]{7}-[0-9]{6}- ]] && [[ ! "$branch" =~ ^[0-9]{7,8}-[0-9]{6}$ ]]; then
is_sequential=true
fi
if [[ "$is_sequential" != "true" ]] && [[ ! "$branch" =~ ^[0-9]{8}-[0-9]{6}- ]]; then
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
echo "Feature branches should be named like: 001-feature-name, 1234-feature-name, or 20260319-143022-feature-name" >&2
echo "Feature branches should be named like: 001-feature-name" >&2
return 1
fi
@@ -148,18 +90,15 @@ find_feature_dir_by_prefix() {
local branch_name="$2"
local specs_dir="$repo_root/specs"
# Extract prefix from branch (e.g., "004" from "004-whatever" or "20260319-143022" from timestamp branches)
local prefix=""
if [[ "$branch_name" =~ ^([0-9]{8}-[0-9]{6})- ]]; then
prefix="${BASH_REMATCH[1]}"
elif [[ "$branch_name" =~ ^([0-9]{3,})- ]]; then
prefix="${BASH_REMATCH[1]}"
else
# If branch doesn't have a recognized prefix, fall back to exact match
# Extract numeric prefix from branch (e.g., "004" from "004-whatever")
if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then
# If branch doesn't have numeric prefix, fall back to exact match
echo "$specs_dir/$branch_name"
return
fi
local prefix="${BASH_REMATCH[1]}"
# Search for directories in specs/ that start with this prefix
local matches=()
if [[ -d "$specs_dir" ]]; then
@@ -180,7 +119,7 @@ find_feature_dir_by_prefix() {
else
# Multiple matches - this shouldn't happen with proper naming convention
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
echo "Please ensure only one spec directory exists per prefix." >&2
echo "Please ensure only one spec directory exists per numeric prefix." >&2
return 1
fi
}
@@ -222,7 +161,7 @@ has_jq() {
}
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
# Handles backslash, double-quote, and JSON-required control character escapes (RFC 8259).
# Handles backslash, double-quote, and control characters (newline, tab, carriage return).
json_escape() {
local s="$1"
s="${s//\\/\\\\}"
@@ -230,23 +169,7 @@ json_escape() {
s="${s//$'\n'/\\n}"
s="${s//$'\t'/\\t}"
s="${s//$'\r'/\\r}"
s="${s//$'\b'/\\b}"
s="${s//$'\f'/\\f}"
# Escape any remaining U+0001-U+001F control characters as \uXXXX.
# (U+0000/NUL cannot appear in bash strings and is excluded.)
# LC_ALL=C ensures ${#s} counts bytes and ${s:$i:1} yields single bytes,
# so multi-byte UTF-8 sequences (first byte >= 0xC0) pass through intact.
local LC_ALL=C
local i char code
for (( i=0; i<${#s}; i++ )); do
char="${s:$i:1}"
printf -v code '%d' "'$char" 2>/dev/null || code=256
if (( code >= 1 && code <= 31 )); then
printf '\\u%04x' "$code"
else
printf '%s' "$char"
fi
done
printf '%s' "$s"
}
check_file() { [[ -f "$1" ]] && echo "$2" || echo "$2"; }
@@ -271,11 +194,9 @@ resolve_template() {
if [ -d "$presets_dir" ]; then
local registry_file="$presets_dir/.registry"
if [ -f "$registry_file" ] && command -v python3 >/dev/null 2>&1; then
# Read preset IDs sorted by priority (lower number = higher precedence).
# The python3 call is wrapped in an if-condition so that set -e does not
# abort the function when python3 exits non-zero (e.g. invalid JSON).
local sorted_presets=""
if sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
# Read preset IDs sorted by priority (lower number = higher precedence)
local sorted_presets
sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
import json, sys, os
try:
with open(os.environ['SPECKIT_REGISTRY']) as f:
@@ -285,17 +206,14 @@ try:
print(pid)
except Exception:
sys.exit(1)
" 2>/dev/null); then
if [ -n "$sorted_presets" ]; then
# python3 succeeded and returned preset IDs — search in priority order
while IFS= read -r preset_id; do
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
[ -f "$candidate" ] && echo "$candidate" && return 0
done <<< "$sorted_presets"
fi
# python3 succeeded but registry has no presets — nothing to search
" 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$sorted_presets" ]; then
while IFS= read -r preset_id; do
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
[ -f "$candidate" ] && echo "$candidate" && return 0
done <<< "$sorted_presets"
else
# python3 failed (missing, or registry parse error) — fall back to unordered directory scan
# python3 returned empty list — fall through to directory scan
for preset in "$presets_dir"/*/; do
[ -d "$preset" ] || continue
local candidate="$preset/templates/${template_name}.md"
@@ -328,9 +246,8 @@ except Exception:
local core="$base/${template_name}.md"
[ -f "$core" ] && echo "$core" && return 0
# Template not found in any location.
# Return 1 so callers can distinguish "not found" from "found".
# Callers running under set -e should use: TEMPLATE=$(resolve_template ...) || true
return 1
# Return success with empty output so callers using set -e don't abort;
# callers check [ -n "$TEMPLATE" ] to detect "not found".
return 0
}

View File

@@ -3,24 +3,15 @@
set -e
JSON_MODE=false
DRY_RUN=false
ALLOW_EXISTING=false
SHORT_NAME=""
BRANCH_NUMBER=""
USE_TIMESTAMP=false
ARGS=()
i=1
while [ $i -le $# ]; do
arg="${!i}"
case "$arg" in
--json)
JSON_MODE=true
;;
--dry-run)
DRY_RUN=true
;;
--allow-existing-branch)
ALLOW_EXISTING=true
--json)
JSON_MODE=true
;;
--short-name)
if [ $((i + 1)) -gt $# ]; then
@@ -49,29 +40,22 @@ while [ $i -le $# ]; do
fi
BRANCH_NUMBER="$next_arg"
;;
--timestamp)
USE_TIMESTAMP=true
;;
--help|-h)
echo "Usage: $0 [--json] [--dry-run] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
--help|-h)
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>"
echo ""
echo "Options:"
echo " --json Output in JSON format"
echo " --dry-run Compute branch name and paths without creating branches, directories, or files"
echo " --allow-existing-branch Switch to branch if it already exists instead of failing"
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
echo " --number N Specify branch number manually (overrides auto-detection)"
echo " --timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
echo " --help, -h Show this help message"
echo ""
echo "Examples:"
echo " $0 'Add user authentication system' --short-name 'user-auth'"
echo " $0 'Implement OAuth2 integration for API' --number 5"
echo " $0 --timestamp --short-name 'user-auth' 'Add user authentication'"
exit 0
;;
*)
ARGS+=("$arg")
*)
ARGS+=("$arg")
;;
esac
i=$((i + 1))
@@ -79,7 +63,7 @@ done
FEATURE_DESCRIPTION="${ARGS[*]}"
if [ -z "$FEATURE_DESCRIPTION" ]; then
echo "Usage: $0 [--json] [--dry-run] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>" >&2
exit 1
fi
@@ -90,6 +74,19 @@ if [ -z "$FEATURE_DESCRIPTION" ]; then
exit 1
fi
# Function to find the repository root by searching for existing project markers
find_repo_root() {
local dir="$1"
while [ "$dir" != "/" ]; do
if [ -d "$dir/.git" ] || [ -d "$dir/.specify" ]; then
echo "$dir"
return 0
fi
dir="$(dirname "$dir")"
done
return 1
}
# Function to get highest number from specs directory
get_highest_from_specs() {
local specs_dir="$1"
@@ -99,13 +96,10 @@ get_highest_from_specs() {
for dir in "$specs_dir"/*; do
[ -d "$dir" ] || continue
dirname=$(basename "$dir")
# Match sequential prefixes (>=3 digits), but skip timestamp dirs.
if echo "$dirname" | grep -Eq '^[0-9]{3,}-' && ! echo "$dirname" | grep -Eq '^[0-9]{8}-[0-9]{6}-'; then
number=$(echo "$dirname" | grep -Eo '^[0-9]+')
number=$((10#$number))
if [ "$number" -gt "$highest" ]; then
highest=$number
fi
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
number=$((10#$number))
if [ "$number" -gt "$highest" ]; then
highest=$number
fi
done
fi
@@ -115,59 +109,39 @@ get_highest_from_specs() {
# Function to get highest number from git branches
get_highest_from_branches() {
git branch -a 2>/dev/null | sed 's/^[* ]*//; s|^remotes/[^/]*/||' | _extract_highest_number
}
# Extract the highest sequential feature number from a list of ref names (one per line).
# Shared by get_highest_from_branches and get_highest_from_remote_refs.
_extract_highest_number() {
local highest=0
while IFS= read -r name; do
[ -z "$name" ] && continue
if echo "$name" | grep -Eq '^[0-9]{3,}-' && ! echo "$name" | grep -Eq '^[0-9]{8}-[0-9]{6}-'; then
number=$(echo "$name" | grep -Eo '^[0-9]+' || echo "0")
number=$((10#$number))
if [ "$number" -gt "$highest" ]; then
highest=$number
# Get all branches (local and remote)
branches=$(git branch -a 2>/dev/null || echo "")
if [ -n "$branches" ]; then
while IFS= read -r branch; do
# Clean branch name: remove leading markers and remote prefixes
clean_branch=$(echo "$branch" | sed 's/^[* ]*//; s|^remotes/[^/]*/||')
# Extract feature number if branch matches pattern ###-*
if echo "$clean_branch" | grep -q '^[0-9]\{3\}-'; then
number=$(echo "$clean_branch" | grep -o '^[0-9]\{3\}' || echo "0")
number=$((10#$number))
if [ "$number" -gt "$highest" ]; then
highest=$number
fi
fi
fi
done
done <<< "$branches"
fi
echo "$highest"
}
# Function to get highest number from remote branches without fetching (side-effect-free)
get_highest_from_remote_refs() {
local highest=0
for remote in $(git remote 2>/dev/null); do
local remote_highest
remote_highest=$(GIT_TERMINAL_PROMPT=0 git ls-remote --heads "$remote" 2>/dev/null | sed 's|.*refs/heads/||' | _extract_highest_number)
if [ "$remote_highest" -gt "$highest" ]; then
highest=$remote_highest
fi
done
echo "$highest"
}
# Function to check existing branches (local and remote) and return next available number.
# When skip_fetch is true, queries remotes via ls-remote (read-only) instead of fetching.
# Function to check existing branches (local and remote) and return next available number
check_existing_branches() {
local specs_dir="$1"
local skip_fetch="${2:-false}"
if [ "$skip_fetch" = true ]; then
# Side-effect-free: query remotes via ls-remote
local highest_remote=$(get_highest_from_remote_refs)
local highest_branch=$(get_highest_from_branches)
if [ "$highest_remote" -gt "$highest_branch" ]; then
highest_branch=$highest_remote
fi
else
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
git fetch --all --prune >/dev/null 2>&1 || true
local highest_branch=$(get_highest_from_branches)
fi
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
git fetch --all --prune 2>/dev/null || true
# Get highest number from ALL branches (not just matching short name)
local highest_branch=$(get_highest_from_branches)
# Get highest number from ALL specs (not just matching short name)
local highest_spec=$(get_highest_from_specs "$specs_dir")
@@ -188,25 +162,39 @@ clean_branch_name() {
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
}
# Resolve repository root using common.sh functions which prioritize .specify over git
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
json_escape() {
local s="$1"
s="${s//\\/\\\\}"
s="${s//\"/\\\"}"
s="${s//$'\n'/\\n}"
s="${s//$'\t'/\\t}"
s="${s//$'\r'/\\r}"
printf '%s' "$s"
}
# Resolve repository root. Prefer git information when available, but fall back
# to searching for repository markers so the workflow still functions in repositories that
# were initialised with --no-git.
SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/common.sh"
REPO_ROOT=$(get_repo_root)
# Check if git is available at this repo root (not a parent)
if has_git; then
if git rev-parse --show-toplevel >/dev/null 2>&1; then
REPO_ROOT=$(git rev-parse --show-toplevel)
HAS_GIT=true
else
REPO_ROOT="$(find_repo_root "$SCRIPT_DIR")"
if [ -z "$REPO_ROOT" ]; then
echo "Error: Could not determine repository root. Please run this script from within the repository." >&2
exit 1
fi
HAS_GIT=false
fi
cd "$REPO_ROOT"
SPECS_DIR="$REPO_ROOT/specs"
if [ "$DRY_RUN" != true ]; then
mkdir -p "$SPECS_DIR"
fi
mkdir -p "$SPECS_DIR"
# Function to generate branch name with stop word filtering and length filtering
generate_branch_name() {
@@ -265,49 +253,29 @@ else
BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
fi
# Warn if --number and --timestamp are both specified
if [ "$USE_TIMESTAMP" = true ] && [ -n "$BRANCH_NUMBER" ]; then
>&2 echo "[specify] Warning: --number is ignored when --timestamp is used"
BRANCH_NUMBER=""
fi
# Determine branch prefix
if [ "$USE_TIMESTAMP" = true ]; then
FEATURE_NUM=$(date +%Y%m%d-%H%M%S)
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
else
# Determine branch number
if [ -z "$BRANCH_NUMBER" ]; then
if [ "$DRY_RUN" = true ] && [ "$HAS_GIT" = true ]; then
# Dry-run: query remotes via ls-remote (side-effect-free, no fetch)
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR" true)
elif [ "$DRY_RUN" = true ]; then
# Dry-run without git: local spec dirs only
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
BRANCH_NUMBER=$((HIGHEST + 1))
elif [ "$HAS_GIT" = true ]; then
# Check existing branches on remotes
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
else
# Fall back to local directory check
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
BRANCH_NUMBER=$((HIGHEST + 1))
fi
# Determine branch number
if [ -z "$BRANCH_NUMBER" ]; then
if [ "$HAS_GIT" = true ]; then
# Check existing branches on remotes
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
else
# Fall back to local directory check
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
BRANCH_NUMBER=$((HIGHEST + 1))
fi
# Force base-10 interpretation to prevent octal conversion (e.g., 010 → 8 in octal, but should be 10 in decimal)
FEATURE_NUM=$(printf "%03d" "$((10#$BRANCH_NUMBER))")
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
fi
# Force base-10 interpretation to prevent octal conversion (e.g., 010 → 8 in octal, but should be 10 in decimal)
FEATURE_NUM=$(printf "%03d" "$((10#$BRANCH_NUMBER))")
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
# GitHub enforces a 244-byte limit on branch names
# Validate and truncate if necessary
MAX_BRANCH_LENGTH=244
if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
# Calculate how much we need to trim from suffix
# Account for prefix length: timestamp (15) + hyphen (1) = 16, or sequential (3) + hyphen (1) = 4
PREFIX_LENGTH=$(( ${#FEATURE_NUM} + 1 ))
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - PREFIX_LENGTH))
# Account for: feature number (3) + hyphen (1) = 4 chars
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
# Truncate suffix at word boundary if possible
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
@@ -322,79 +290,44 @@ if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
>&2 echo "[specify] Truncated to: $BRANCH_NAME (${#BRANCH_NAME} bytes)"
fi
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
SPEC_FILE="$FEATURE_DIR/spec.md"
if [ "$DRY_RUN" != true ]; then
if [ "$HAS_GIT" = true ]; then
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
# Check if branch already exists
if git branch --list "$BRANCH_NAME" | grep -q .; then
if [ "$ALLOW_EXISTING" = true ]; then
# Switch to the existing branch instead of failing
if ! git checkout "$BRANCH_NAME" 2>/dev/null; then
>&2 echo "Error: Failed to switch to existing branch '$BRANCH_NAME'. Please resolve any local changes or conflicts and try again."
exit 1
fi
elif [ "$USE_TIMESTAMP" = true ]; then
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Rerun to get a new timestamp or use a different --short-name."
exit 1
else
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
exit 1
fi
else
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
exit 1
fi
fi
else
>&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
fi
mkdir -p "$FEATURE_DIR"
if [ ! -f "$SPEC_FILE" ]; then
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
cp "$TEMPLATE" "$SPEC_FILE"
if [ "$HAS_GIT" = true ]; then
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
# Check if branch already exists
if git branch --list "$BRANCH_NAME" | grep -q .; then
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
exit 1
else
echo "Warning: Spec template not found; created empty spec file" >&2
touch "$SPEC_FILE"
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
exit 1
fi
fi
# Inform the user how to persist the feature variable in their own shell
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
else
>&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
fi
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
mkdir -p "$FEATURE_DIR"
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT")
SPEC_FILE="$FEATURE_DIR/spec.md"
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
# Inform the user how to persist the feature variable in their own shell
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
if $JSON_MODE; then
if command -v jq >/dev/null 2>&1; then
if [ "$DRY_RUN" = true ]; then
jq -cn \
--arg branch_name "$BRANCH_NAME" \
--arg spec_file "$SPEC_FILE" \
--arg feature_num "$FEATURE_NUM" \
'{BRANCH_NAME:$branch_name,SPEC_FILE:$spec_file,FEATURE_NUM:$feature_num,DRY_RUN:true}'
else
jq -cn \
--arg branch_name "$BRANCH_NAME" \
--arg spec_file "$SPEC_FILE" \
--arg feature_num "$FEATURE_NUM" \
'{BRANCH_NAME:$branch_name,SPEC_FILE:$spec_file,FEATURE_NUM:$feature_num}'
fi
jq -cn \
--arg branch_name "$BRANCH_NAME" \
--arg spec_file "$SPEC_FILE" \
--arg feature_num "$FEATURE_NUM" \
'{BRANCH_NAME:$branch_name,SPEC_FILE:$spec_file,FEATURE_NUM:$feature_num}'
else
if [ "$DRY_RUN" = true ]; then
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s","DRY_RUN":true}\n' "$(json_escape "$BRANCH_NAME")" "$(json_escape "$SPEC_FILE")" "$(json_escape "$FEATURE_NUM")"
else
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$(json_escape "$BRANCH_NAME")" "$(json_escape "$SPEC_FILE")" "$(json_escape "$FEATURE_NUM")"
fi
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$(json_escape "$BRANCH_NAME")" "$(json_escape "$SPEC_FILE")" "$(json_escape "$FEATURE_NUM")"
fi
else
echo "BRANCH_NAME: $BRANCH_NAME"
echo "SPEC_FILE: $SPEC_FILE"
echo "FEATURE_NUM: $FEATURE_NUM"
if [ "$DRY_RUN" != true ]; then
printf '# To persist in your shell: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME"
fi
printf '# To persist in your shell: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME"
fi

View File

@@ -39,7 +39,7 @@ check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
mkdir -p "$FEATURE_DIR"
# Copy plan template if it exists
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT") || true
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT")
if [[ -n "$TEMPLATE" ]] && [[ -f "$TEMPLATE" ]]; then
cp "$TEMPLATE" "$IMPL_PLAN"
echo "Copied plan template to $IMPL_PLAN"

View File

@@ -30,12 +30,12 @@
#
# 5. Multi-Agent Support
# - Handles agent-specific file paths and naming conventions
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Junie, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Kimi Code, Pi Coding Agent, iFlow CLI, Antigravity or Generic
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Kimi Code, Antigravity or Generic
# - Can update single agents or all existing agent files
# - Creates default Claude file if no agent files exist
#
# Usage: ./update-agent-context.sh [agent_type]
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic
# Leave empty to update all existing agent files
set -e
@@ -63,18 +63,17 @@ AGENT_TYPE="${1:-}"
# Agent-specific file paths
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
COPILOT_FILE="$REPO_ROOT/.github/agents/copilot-instructions.md"
CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
QWEN_FILE="$REPO_ROOT/QWEN.md"
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
JUNIE_FILE="$REPO_ROOT/.junie/AGENTS.md"
KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
QODER_FILE="$REPO_ROOT/QODER.md"
# Amp, Kiro CLI, IBM Bob, and Pi all share AGENTS.md — use AGENTS_FILE to avoid
# AMP, Kiro CLI, and IBM Bob all share AGENTS.md — use AGENTS_FILE to avoid
# updating the same file multiple times.
AMP_FILE="$AGENTS_FILE"
SHAI_FILE="$REPO_ROOT/SHAI.md"
@@ -84,8 +83,6 @@ AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
BOB_FILE="$AGENTS_FILE"
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
KIMI_FILE="$REPO_ROOT/KIMI.md"
TRAE_FILE="$REPO_ROOT/.trae/rules/AGENTS.md"
IFLOW_FILE="$REPO_ROOT/IFLOW.md"
# Template file
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
@@ -639,9 +636,6 @@ update_specific_agent() {
windsurf)
update_agent_file "$WINDSURF_FILE" "Windsurf" || return 1
;;
junie)
update_agent_file "$JUNIE_FILE" "Junie" || return 1
;;
kilocode)
update_agent_file "$KILOCODE_FILE" "Kilo Code" || return 1
;;
@@ -681,90 +675,67 @@ update_specific_agent() {
kimi)
update_agent_file "$KIMI_FILE" "Kimi Code" || return 1
;;
trae)
update_agent_file "$TRAE_FILE" "Trae" || return 1
;;
pi)
update_agent_file "$AGENTS_FILE" "Pi Coding Agent" || return 1
;;
iflow)
update_agent_file "$IFLOW_FILE" "iFlow CLI" || return 1
;;
generic)
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
;;
*)
log_error "Unknown agent type '$agent_type'"
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic"
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic"
exit 1
;;
esac
}
# Helper: skip non-existent files and files already updated (dedup by
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
# Uses a linear array instead of associative array for bash 3.2 compatibility.
# Note: defined at top level because bash 3.2 does not support true
# nested/local functions. _updated_paths, _found_agent, and _all_ok are
# initialised exclusively inside update_all_existing_agents so that
# sourcing this script has no side effects on the caller's environment.
_update_if_new() {
local file="$1" name="$2"
[[ -f "$file" ]] || return 0
local real_path
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
local p
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
for p in "${_updated_paths[@]}"; do
[[ "$p" == "$real_path" ]] && return 0
done
fi
# Record the file as seen before attempting the update so that:
# (a) aliases pointing to the same path are not retried on failure
# (b) _found_agent reflects file existence, not update success
_updated_paths+=("$real_path")
_found_agent=true
update_agent_file "$file" "$name"
}
update_all_existing_agents() {
_found_agent=false
_updated_paths=()
local _all_ok=true
local found_agent=false
local _updated_paths=()
_update_if_new "$CLAUDE_FILE" "Claude Code" || _all_ok=false
_update_if_new "$GEMINI_FILE" "Gemini CLI" || _all_ok=false
_update_if_new "$COPILOT_FILE" "GitHub Copilot" || _all_ok=false
_update_if_new "$CURSOR_FILE" "Cursor IDE" || _all_ok=false
_update_if_new "$QWEN_FILE" "Qwen Code" || _all_ok=false
_update_if_new "$AGENTS_FILE" "Codex/opencode" || _all_ok=false
_update_if_new "$AMP_FILE" "Amp" || _all_ok=false
_update_if_new "$KIRO_FILE" "Kiro CLI" || _all_ok=false
_update_if_new "$BOB_FILE" "IBM Bob" || _all_ok=false
_update_if_new "$WINDSURF_FILE" "Windsurf" || _all_ok=false
_update_if_new "$JUNIE_FILE" "Junie" || _all_ok=false
_update_if_new "$KILOCODE_FILE" "Kilo Code" || _all_ok=false
_update_if_new "$AUGGIE_FILE" "Auggie CLI" || _all_ok=false
_update_if_new "$ROO_FILE" "Roo Code" || _all_ok=false
_update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI" || _all_ok=false
_update_if_new "$SHAI_FILE" "SHAI" || _all_ok=false
_update_if_new "$TABNINE_FILE" "Tabnine CLI" || _all_ok=false
_update_if_new "$QODER_FILE" "Qoder CLI" || _all_ok=false
_update_if_new "$AGY_FILE" "Antigravity" || _all_ok=false
_update_if_new "$VIBE_FILE" "Mistral Vibe" || _all_ok=false
_update_if_new "$KIMI_FILE" "Kimi Code" || _all_ok=false
_update_if_new "$TRAE_FILE" "Trae" || _all_ok=false
_update_if_new "$IFLOW_FILE" "iFlow CLI" || _all_ok=false
# Helper: skip non-existent files and files already updated (dedup by
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
# Uses a linear array instead of associative array for bash 3.2 compatibility.
update_if_new() {
local file="$1" name="$2"
[[ -f "$file" ]] || return 0
local real_path
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
local p
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
for p in "${_updated_paths[@]}"; do
[[ "$p" == "$real_path" ]] && return 0
done
fi
update_agent_file "$file" "$name" || return 1
_updated_paths+=("$real_path")
found_agent=true
}
update_if_new "$CLAUDE_FILE" "Claude Code"
update_if_new "$GEMINI_FILE" "Gemini CLI"
update_if_new "$COPILOT_FILE" "GitHub Copilot"
update_if_new "$CURSOR_FILE" "Cursor IDE"
update_if_new "$QWEN_FILE" "Qwen Code"
update_if_new "$AGENTS_FILE" "Codex/opencode"
update_if_new "$AMP_FILE" "Amp"
update_if_new "$KIRO_FILE" "Kiro CLI"
update_if_new "$BOB_FILE" "IBM Bob"
update_if_new "$WINDSURF_FILE" "Windsurf"
update_if_new "$KILOCODE_FILE" "Kilo Code"
update_if_new "$AUGGIE_FILE" "Auggie CLI"
update_if_new "$ROO_FILE" "Roo Code"
update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI"
update_if_new "$SHAI_FILE" "SHAI"
update_if_new "$TABNINE_FILE" "Tabnine CLI"
update_if_new "$QODER_FILE" "Qoder CLI"
update_if_new "$AGY_FILE" "Antigravity"
update_if_new "$VIBE_FILE" "Mistral Vibe"
update_if_new "$KIMI_FILE" "Kimi Code"
# If no agent files exist, create a default Claude file
if [[ "$_found_agent" == false ]]; then
if [[ "$found_agent" == false ]]; then
log_info "No existing agent files found, creating default Claude file..."
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
fi
[[ "$_all_ok" == true ]]
}
print_summary() {
echo
@@ -783,7 +754,7 @@ print_summary() {
fi
echo
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic]"
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic]"
}
#==============================================================================

View File

@@ -1,39 +1,7 @@
#!/usr/bin/env pwsh
# Common PowerShell functions analogous to common.sh
# Find repository root by searching upward for .specify directory
# This is the primary marker for spec-kit projects
function Find-SpecifyRoot {
param([string]$StartDir = (Get-Location).Path)
# Normalize to absolute path to prevent issues with relative paths
# Use -LiteralPath to handle paths with wildcard characters ([, ], *, ?)
$resolved = Resolve-Path -LiteralPath $StartDir -ErrorAction SilentlyContinue
$current = if ($resolved) { $resolved.Path } else { $null }
if (-not $current) { return $null }
while ($true) {
if (Test-Path -LiteralPath (Join-Path $current ".specify") -PathType Container) {
return $current
}
$parent = Split-Path $current -Parent
if ([string]::IsNullOrEmpty($parent) -or $parent -eq $current) {
return $null
}
$current = $parent
}
}
# Get repository root, prioritizing .specify directory over git
# This prevents using a parent git repo when spec-kit is initialized in a subdirectory
function Get-RepoRoot {
# First, look for .specify directory (spec-kit's own marker)
$specifyRoot = Find-SpecifyRoot
if ($specifyRoot) {
return $specifyRoot
}
# Fallback to git if no .specify found
try {
$result = git rev-parse --show-toplevel 2>$null
if ($LASTEXITCODE -eq 0) {
@@ -42,10 +10,9 @@ function Get-RepoRoot {
} catch {
# Git command failed
}
# Final fallback to script location for non-git repos
# Use -LiteralPath to handle paths with wildcard characters
return (Resolve-Path -LiteralPath (Join-Path $PSScriptRoot "../../..")).Path
# Fall back to script location for non-git repos
return (Resolve-Path (Join-Path $PSScriptRoot "../../..")).Path
}
function Get-CurrentBranch {
@@ -53,48 +20,35 @@ function Get-CurrentBranch {
if ($env:SPECIFY_FEATURE) {
return $env:SPECIFY_FEATURE
}
# Then check git if available at the spec-kit root (not parent)
$repoRoot = Get-RepoRoot
if (Test-HasGit) {
try {
$result = git -C $repoRoot rev-parse --abbrev-ref HEAD 2>$null
if ($LASTEXITCODE -eq 0) {
return $result
}
} catch {
# Git command failed
# Then check git if available
try {
$result = git rev-parse --abbrev-ref HEAD 2>$null
if ($LASTEXITCODE -eq 0) {
return $result
}
} catch {
# Git command failed
}
# For non-git repos, try to find the latest feature directory
$repoRoot = Get-RepoRoot
$specsDir = Join-Path $repoRoot "specs"
if (Test-Path $specsDir) {
$latestFeature = ""
$highest = 0
$latestTimestamp = ""
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
if ($_.Name -match '^(\d{8}-\d{6})-') {
# Timestamp-based branch: compare lexicographically
$ts = $matches[1]
if ($ts -gt $latestTimestamp) {
$latestTimestamp = $ts
$latestFeature = $_.Name
}
} elseif ($_.Name -match '^(\d{3,})-') {
$num = [long]$matches[1]
if ($_.Name -match '^(\d{3})-') {
$num = [int]$matches[1]
if ($num -gt $highest) {
$highest = $num
# Only update if no timestamp branch found yet
if (-not $latestTimestamp) {
$latestFeature = $_.Name
}
$latestFeature = $_.Name
}
}
}
if ($latestFeature) {
return $latestFeature
}
@@ -104,23 +58,9 @@ function Get-CurrentBranch {
return "main"
}
# Check if we have git available at the spec-kit root level
# Returns true only if git is installed and the repo root is inside a git work tree
# Handles both regular repos (.git directory) and worktrees/submodules (.git file)
function Test-HasGit {
# First check if git command is available (before calling Get-RepoRoot which may use git)
if (-not (Get-Command git -ErrorAction SilentlyContinue)) {
return $false
}
$repoRoot = Get-RepoRoot
# Check if .git exists (directory or file for worktrees/submodules)
# Use -LiteralPath to handle paths with wildcard characters
if (-not (Test-Path -LiteralPath (Join-Path $repoRoot ".git"))) {
return $false
}
# Verify it's actually a valid git work tree
try {
$null = git -C $repoRoot rev-parse --is-inside-work-tree 2>$null
git rev-parse --show-toplevel 2>$null | Out-Null
return ($LASTEXITCODE -eq 0)
} catch {
return $false
@@ -139,13 +79,9 @@ function Test-FeatureBranch {
return $true
}
# Accept sequential prefix (3+ digits) but exclude malformed timestamps
# Malformed: 7-or-8 digit date + 6-digit time with no trailing slug (e.g. "2026031-143022" or "20260319-143022")
$hasMalformedTimestamp = ($Branch -match '^[0-9]{7}-[0-9]{6}-') -or ($Branch -match '^(?:\d{7}|\d{8})-\d{6}$')
$isSequential = ($Branch -match '^[0-9]{3,}-') -and (-not $hasMalformedTimestamp)
if (-not $isSequential -and $Branch -notmatch '^\d{8}-\d{6}-') {
if ($Branch -notmatch '^[0-9]{3}-') {
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
Write-Output "Feature branches should be named like: 001-feature-name, 1234-feature-name, or 20260319-143022-feature-name"
Write-Output "Feature branches should be named like: 001-feature-name"
return $false
}
return $true

View File

@@ -3,41 +3,33 @@
[CmdletBinding()]
param(
[switch]$Json,
[switch]$AllowExistingBranch,
[switch]$DryRun,
[string]$ShortName,
[Parameter()]
[long]$Number = 0,
[switch]$Timestamp,
[int]$Number = 0,
[switch]$Help,
[Parameter(Position = 0, ValueFromRemainingArguments = $true)]
[Parameter(ValueFromRemainingArguments = $true)]
[string[]]$FeatureDescription
)
$ErrorActionPreference = 'Stop'
# Show help if requested
if ($Help) {
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-DryRun] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] <feature description>"
Write-Host ""
Write-Host "Options:"
Write-Host " -Json Output in JSON format"
Write-Host " -DryRun Compute branch name and paths without creating branches, directories, or files"
Write-Host " -AllowExistingBranch Switch to branch if it already exists instead of failing"
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
Write-Host " -Timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
Write-Host " -Help Show this help message"
Write-Host ""
Write-Host "Examples:"
Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'"
Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'"
Write-Host " ./create-new-feature.ps1 -Timestamp -ShortName 'user-auth' 'Add user authentication'"
exit 0
}
# Check if feature description provided
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-DryRun] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] <feature description>"
exit 1
}
@@ -49,35 +41,39 @@ if ([string]::IsNullOrWhiteSpace($featureDesc)) {
exit 1
}
function Get-HighestNumberFromSpecs {
param([string]$SpecsDir)
[long]$highest = 0
if (Test-Path $SpecsDir) {
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
# Match sequential prefixes (>=3 digits), but skip timestamp dirs.
if ($_.Name -match '^(\d{3,})-' -and $_.Name -notmatch '^\d{8}-\d{6}-') {
[long]$num = 0
if ([long]::TryParse($matches[1], [ref]$num) -and $num -gt $highest) {
$highest = $num
}
# Resolve repository root. Prefer git information when available, but fall back
# to searching for repository markers so the workflow still functions in repositories that
# were initialized with --no-git.
function Find-RepositoryRoot {
param(
[string]$StartDir,
[string[]]$Markers = @('.git', '.specify')
)
$current = Resolve-Path $StartDir
while ($true) {
foreach ($marker in $Markers) {
if (Test-Path (Join-Path $current $marker)) {
return $current
}
}
$parent = Split-Path $current -Parent
if ($parent -eq $current) {
# Reached filesystem root without finding markers
return $null
}
$current = $parent
}
return $highest
}
# Extract the highest sequential feature number from a list of branch/ref names.
# Shared by Get-HighestNumberFromBranches and Get-HighestNumberFromRemoteRefs.
function Get-HighestNumberFromNames {
param([string[]]$Names)
[long]$highest = 0
foreach ($name in $Names) {
if ($name -match '^(\d{3,})-' -and $name -notmatch '^\d{8}-\d{6}-') {
[long]$num = 0
if ([long]::TryParse($matches[1], [ref]$num) -and $num -gt $highest) {
$highest = $num
function Get-HighestNumberFromSpecs {
param([string]$SpecsDir)
$highest = 0
if (Test-Path $SpecsDir) {
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
if ($_.Name -match '^(\d+)') {
$num = [int]$matches[1]
if ($num -gt $highest) { $highest = $num }
}
}
}
@@ -86,68 +82,44 @@ function Get-HighestNumberFromNames {
function Get-HighestNumberFromBranches {
param()
$highest = 0
try {
$branches = git branch -a 2>$null
if ($LASTEXITCODE -eq 0 -and $branches) {
$cleanNames = $branches | ForEach-Object {
$_.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
}
return Get-HighestNumberFromNames -Names $cleanNames
}
} catch {
Write-Verbose "Could not check Git branches: $_"
}
return 0
}
function Get-HighestNumberFromRemoteRefs {
[long]$highest = 0
try {
$remotes = git remote 2>$null
if ($remotes) {
foreach ($remote in $remotes) {
$env:GIT_TERMINAL_PROMPT = '0'
$refs = git ls-remote --heads $remote 2>$null
$env:GIT_TERMINAL_PROMPT = $null
if ($LASTEXITCODE -eq 0 -and $refs) {
$refNames = $refs | ForEach-Object {
if ($_ -match 'refs/heads/(.+)$') { $matches[1] }
} | Where-Object { $_ }
$remoteHighest = Get-HighestNumberFromNames -Names $refNames
if ($remoteHighest -gt $highest) { $highest = $remoteHighest }
if ($LASTEXITCODE -eq 0) {
foreach ($branch in $branches) {
# Clean branch name: remove leading markers and remote prefixes
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
# Extract feature number if branch matches pattern ###-*
if ($cleanBranch -match '^(\d+)-') {
$num = [int]$matches[1]
if ($num -gt $highest) { $highest = $num }
}
}
}
} catch {
Write-Verbose "Could not query remote refs: $_"
# If git command fails, return 0
Write-Verbose "Could not check Git branches: $_"
}
return $highest
}
# Return next available branch number. When SkipFetch is true, queries remotes
# via ls-remote (read-only) instead of fetching.
function Get-NextBranchNumber {
param(
[string]$SpecsDir,
[switch]$SkipFetch
[string]$SpecsDir
)
if ($SkipFetch) {
# Side-effect-free: query remotes via ls-remote
$highestBranch = Get-HighestNumberFromBranches
$highestRemote = Get-HighestNumberFromRemoteRefs
$highestBranch = [Math]::Max($highestBranch, $highestRemote)
} else {
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
try {
git fetch --all --prune 2>$null | Out-Null
} catch {
# Ignore fetch errors
}
$highestBranch = Get-HighestNumberFromBranches
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
try {
git fetch --all --prune 2>$null | Out-Null
} catch {
# Ignore fetch errors
}
# Get highest number from ALL branches (not just matching short name)
$highestBranch = Get-HighestNumberFromBranches
# Get highest number from ALL specs (not just matching short name)
$highestSpec = Get-HighestNumberFromSpecs -SpecsDir $SpecsDir
@@ -160,29 +132,39 @@ function Get-NextBranchNumber {
function ConvertTo-CleanBranchName {
param([string]$Name)
return $Name.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
}
# Load common functions (includes Get-RepoRoot, Test-HasGit, Resolve-Template)
$fallbackRoot = (Find-RepositoryRoot -StartDir $PSScriptRoot)
if (-not $fallbackRoot) {
Write-Error "Error: Could not determine repository root. Please run this script from within the repository."
exit 1
}
# Load common functions (includes Resolve-Template)
. "$PSScriptRoot/common.ps1"
# Use common.ps1 functions which prioritize .specify over git
$repoRoot = Get-RepoRoot
# Check if git is available at this repo root (not a parent)
$hasGit = Test-HasGit
try {
$repoRoot = git rev-parse --show-toplevel 2>$null
if ($LASTEXITCODE -eq 0) {
$hasGit = $true
} else {
throw "Git not available"
}
} catch {
$repoRoot = $fallbackRoot
$hasGit = $false
}
Set-Location $repoRoot
$specsDir = Join-Path $repoRoot 'specs'
if (-not $DryRun) {
New-Item -ItemType Directory -Path $specsDir -Force | Out-Null
}
New-Item -ItemType Directory -Path $specsDir -Force | Out-Null
# Function to generate branch name with stop word filtering and length filtering
function Get-BranchName {
param([string]$Description)
# Common stop words to filter out
$stopWords = @(
'i', 'a', 'an', 'the', 'to', 'for', 'of', 'in', 'on', 'at', 'by', 'with', 'from',
@@ -191,17 +173,17 @@ function Get-BranchName {
'this', 'that', 'these', 'those', 'my', 'your', 'our', 'their',
'want', 'need', 'add', 'get', 'set'
)
# Convert to lowercase and extract words (alphanumeric only)
$cleanName = $Description.ToLower() -replace '[^a-z0-9\s]', ' '
$words = $cleanName -split '\s+' | Where-Object { $_ }
# Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
$meaningfulWords = @()
foreach ($word in $words) {
# Skip stop words
if ($stopWords -contains $word) { continue }
# Keep words that are length >= 3 OR appear as uppercase in original (likely acronyms)
if ($word.Length -ge 3) {
$meaningfulWords += $word
@@ -210,7 +192,7 @@ function Get-BranchName {
$meaningfulWords += $word
}
}
# If we have meaningful words, use first 3-4 of them
if ($meaningfulWords.Count -gt 0) {
$maxWords = if ($meaningfulWords.Count -eq 4) { 4 } else { 3 }
@@ -233,134 +215,94 @@ if ($ShortName) {
$branchSuffix = Get-BranchName -Description $featureDesc
}
# Warn if -Number and -Timestamp are both specified
if ($Timestamp -and $Number -ne 0) {
Write-Warning "[specify] Warning: -Number is ignored when -Timestamp is used"
$Number = 0
}
# Determine branch prefix
if ($Timestamp) {
$featureNum = Get-Date -Format 'yyyyMMdd-HHmmss'
$branchName = "$featureNum-$branchSuffix"
} else {
# Determine branch number
if ($Number -eq 0) {
if ($DryRun -and $hasGit) {
# Dry-run: query remotes via ls-remote (side-effect-free, no fetch)
$Number = Get-NextBranchNumber -SpecsDir $specsDir -SkipFetch
} elseif ($DryRun) {
# Dry-run without git: local spec dirs only
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
} elseif ($hasGit) {
# Check existing branches on remotes
$Number = Get-NextBranchNumber -SpecsDir $specsDir
} else {
# Fall back to local directory check
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
}
# Determine branch number
if ($Number -eq 0) {
if ($hasGit) {
# Check existing branches on remotes
$Number = Get-NextBranchNumber -SpecsDir $specsDir
} else {
# Fall back to local directory check
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
}
$featureNum = ('{0:000}' -f $Number)
$branchName = "$featureNum-$branchSuffix"
}
$featureNum = ('{0:000}' -f $Number)
$branchName = "$featureNum-$branchSuffix"
# GitHub enforces a 244-byte limit on branch names
# Validate and truncate if necessary
$maxBranchLength = 244
if ($branchName.Length -gt $maxBranchLength) {
# Calculate how much we need to trim from suffix
# Account for prefix length: timestamp (15) + hyphen (1) = 16, or sequential (3) + hyphen (1) = 4
$prefixLength = $featureNum.Length + 1
$maxSuffixLength = $maxBranchLength - $prefixLength
# Account for: feature number (3) + hyphen (1) = 4 chars
$maxSuffixLength = $maxBranchLength - 4
# Truncate suffix
$truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength))
# Remove trailing hyphen if truncation created one
$truncatedSuffix = $truncatedSuffix -replace '-$', ''
$originalBranchName = $branchName
$branchName = "$featureNum-$truncatedSuffix"
Write-Warning "[specify] Branch name exceeded GitHub's 244-byte limit"
Write-Warning "[specify] Original: $originalBranchName ($($originalBranchName.Length) bytes)"
Write-Warning "[specify] Truncated to: $branchName ($($branchName.Length) bytes)"
}
$featureDir = Join-Path $specsDir $branchName
$specFile = Join-Path $featureDir 'spec.md'
if (-not $DryRun) {
if ($hasGit) {
$branchCreated = $false
try {
git checkout -q -b $branchName 2>$null | Out-Null
if ($LASTEXITCODE -eq 0) {
$branchCreated = $true
}
} catch {
# Exception during git command
if ($hasGit) {
$branchCreated = $false
try {
git checkout -q -b $branchName 2>$null | Out-Null
if ($LASTEXITCODE -eq 0) {
$branchCreated = $true
}
if (-not $branchCreated) {
# Check if branch already exists
$existingBranch = git branch --list $branchName 2>$null
if ($existingBranch) {
if ($AllowExistingBranch) {
# Switch to the existing branch instead of failing
git checkout -q $branchName 2>$null | Out-Null
if ($LASTEXITCODE -ne 0) {
Write-Error "Error: Branch '$branchName' exists but could not be checked out. Resolve any uncommitted changes or conflicts and try again."
exit 1
}
} elseif ($Timestamp) {
Write-Error "Error: Branch '$branchName' already exists. Rerun to get a new timestamp or use a different -ShortName."
exit 1
} else {
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
exit 1
}
} else {
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
exit 1
}
}
} else {
Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName"
} catch {
# Exception during git command
}
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
if (-not (Test-Path -PathType Leaf $specFile)) {
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
if ($template -and (Test-Path $template)) {
Copy-Item $template $specFile -Force
if (-not $branchCreated) {
# Check if branch already exists
$existingBranch = git branch --list $branchName 2>$null
if ($existingBranch) {
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
exit 1
} else {
New-Item -ItemType File -Path $specFile -Force | Out-Null
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
exit 1
}
}
# Set the SPECIFY_FEATURE environment variable for the current session
$env:SPECIFY_FEATURE = $branchName
} else {
Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName"
}
$featureDir = Join-Path $specsDir $branchName
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
$specFile = Join-Path $featureDir 'spec.md'
if ($template -and (Test-Path $template)) {
Copy-Item $template $specFile -Force
} else {
New-Item -ItemType File -Path $specFile | Out-Null
}
# Set the SPECIFY_FEATURE environment variable for the current session
$env:SPECIFY_FEATURE = $branchName
if ($Json) {
$obj = [PSCustomObject]@{
$obj = [PSCustomObject]@{
BRANCH_NAME = $branchName
SPEC_FILE = $specFile
FEATURE_NUM = $featureNum
HAS_GIT = $hasGit
}
if ($DryRun) {
$obj | Add-Member -NotePropertyName 'DRY_RUN' -NotePropertyValue $true
}
$obj | ConvertTo-Json -Compress
} else {
Write-Output "BRANCH_NAME: $branchName"
Write-Output "SPEC_FILE: $specFile"
Write-Output "FEATURE_NUM: $featureNum"
Write-Output "HAS_GIT: $hasGit"
if (-not $DryRun) {
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
}
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
}

View File

@@ -9,7 +9,7 @@ Mirrors the behavior of scripts/bash/update-agent-context.sh:
2. Plan Data Extraction
3. Agent File Management (create from template or update existing)
4. Content Generation (technology stack, recent changes, timestamp)
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, junie, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, kimi, trae, pi, iflow, generic)
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, kimi, generic)
.PARAMETER AgentType
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
@@ -25,7 +25,7 @@ Relies on common helper functions in common.ps1
#>
param(
[Parameter(Position=0)]
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','junie','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','kimi','trae','pi','iflow','generic')]
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','kimi','generic')]
[string]$AgentType
)
@@ -46,12 +46,11 @@ $NEW_PLAN = $IMPL_PLAN
# Agent file paths
$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md'
$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md'
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/copilot-instructions.md'
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md'
$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md'
$JUNIE_FILE = Join-Path $REPO_ROOT '.junie/AGENTS.md'
$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md'
$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md'
$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md'
@@ -65,8 +64,6 @@ $AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
$VIBE_FILE = Join-Path $REPO_ROOT '.vibe/agents/specify-agents.md'
$KIMI_FILE = Join-Path $REPO_ROOT 'KIMI.md'
$TRAE_FILE = Join-Path $REPO_ROOT '.trae/rules/AGENTS.md'
$IFLOW_FILE = Join-Path $REPO_ROOT 'IFLOW.md'
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
@@ -398,7 +395,6 @@ function Update-SpecificAgent {
'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' }
'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' }
'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' }
'junie' { Update-AgentFile -TargetFile $JUNIE_FILE -AgentName 'Junie' }
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
@@ -412,11 +408,8 @@ function Update-SpecificAgent {
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
'vibe' { Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe' }
'kimi' { Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code' }
'trae' { Update-AgentFile -TargetFile $TRAE_FILE -AgentName 'Trae' }
'pi' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Pi Coding Agent' }
'iflow' { Update-AgentFile -TargetFile $IFLOW_FILE -AgentName 'iFlow CLI' }
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic'; return $false }
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic'; return $false }
}
}
@@ -430,7 +423,6 @@ function Update-AllExistingAgents {
if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true }
if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true }
if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true }
if (Test-Path $JUNIE_FILE) { if (-not (Update-AgentFile -TargetFile $JUNIE_FILE -AgentName 'Junie')) { $ok = $false }; $found = $true }
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
@@ -443,8 +435,6 @@ function Update-AllExistingAgents {
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
if (Test-Path $VIBE_FILE) { if (-not (Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe')) { $ok = $false }; $found = $true }
if (Test-Path $KIMI_FILE) { if (-not (Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code')) { $ok = $false }; $found = $true }
if (Test-Path $TRAE_FILE) { if (-not (Update-AgentFile -TargetFile $TRAE_FILE -AgentName 'Trae')) { $ok = $false }; $found = $true }
if (Test-Path $IFLOW_FILE) { if (-not (Update-AgentFile -TargetFile $IFLOW_FILE -AgentName 'iFlow CLI')) { $ok = $false }; $found = $true }
if (-not $found) {
Write-Info 'No existing agent files found, creating default Claude file...'
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
@@ -459,7 +449,7 @@ function Print-Summary {
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
Write-Host ''
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic]'
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]'
}
function Main {

File diff suppressed because it is too large Load Diff

View File

@@ -9,24 +9,9 @@ command files into agent-specific directories in the correct format.
from pathlib import Path
from typing import Dict, List, Any
import platform
import re
from copy import deepcopy
import yaml
def _build_agent_configs() -> dict[str, Any]:
"""Derive CommandRegistrar.AGENT_CONFIGS from INTEGRATION_REGISTRY."""
from specify_cli.integrations import INTEGRATION_REGISTRY
configs: dict[str, dict[str, Any]] = {}
for key, integration in INTEGRATION_REGISTRY.items():
if key == "generic":
continue
if integration.registrar_config:
configs[key] = dict(integration.registrar_config)
return configs
class CommandRegistrar:
"""Handles registration of commands with AI agents.
@@ -35,26 +20,123 @@ class CommandRegistrar:
and companion files (e.g. Copilot .prompt.md).
"""
# Derived from INTEGRATION_REGISTRY — single source of truth.
# Populated lazily via _ensure_configs() on first use.
AGENT_CONFIGS: dict[str, dict[str, Any]] = {}
_configs_loaded: bool = False
def __init__(self) -> None:
self._ensure_configs()
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
cls._ensure_configs()
@classmethod
def _ensure_configs(cls) -> None:
if not cls._configs_loaded:
try:
cls.AGENT_CONFIGS = _build_agent_configs()
cls._configs_loaded = True
except ImportError:
pass # Circular import during module init; retry on next access
# Agent configurations with directory, format, and argument placeholder
AGENT_CONFIGS = {
"claude": {
"dir": ".claude/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"gemini": {
"dir": ".gemini/commands",
"format": "toml",
"args": "{{args}}",
"extension": ".toml"
},
"copilot": {
"dir": ".github/agents",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".agent.md"
},
"cursor": {
"dir": ".cursor/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"qwen": {
"dir": ".qwen/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"opencode": {
"dir": ".opencode/command",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"codex": {
"dir": ".codex/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"windsurf": {
"dir": ".windsurf/workflows",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kilocode": {
"dir": ".kilocode/workflows",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"auggie": {
"dir": ".augment/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"roo": {
"dir": ".roo/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"codebuddy": {
"dir": ".codebuddy/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"qodercli": {
"dir": ".qoder/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kiro-cli": {
"dir": ".kiro/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"amp": {
"dir": ".agents/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"shai": {
"dir": ".shai/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"tabnine": {
"dir": ".tabnine/agent/commands",
"format": "toml",
"args": "{{args}}",
"extension": ".toml"
},
"bob": {
"dir": ".bob/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kimi": {
"dir": ".kimi/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md"
}
}
@staticmethod
def parse_frontmatter(content: str) -> tuple[dict, str]:
@@ -82,9 +164,6 @@ class CommandRegistrar:
except yaml.YAMLError:
frontmatter = {}
if not isinstance(frontmatter, dict):
frontmatter = {}
return frontmatter, body
@staticmethod
@@ -100,56 +179,25 @@ class CommandRegistrar:
if not fm:
return ""
yaml_str = yaml.dump(fm, default_flow_style=False, sort_keys=False, allow_unicode=True)
yaml_str = yaml.dump(fm, default_flow_style=False, sort_keys=False)
return f"---\n{yaml_str}---\n"
def _adjust_script_paths(self, frontmatter: dict) -> dict:
"""Normalize script paths in frontmatter to generated project locations.
Rewrites known repo-relative and top-level script paths under the
`scripts` and `agent_scripts` keys (for example `../../scripts/`,
`../../templates/`, `../../memory/`, `scripts/`, `templates/`, and
`memory/`) to the `.specify/...` paths used in generated projects.
"""Adjust script paths from extension-relative to repo-relative.
Args:
frontmatter: Frontmatter dictionary
Returns:
Modified frontmatter with normalized project paths
Modified frontmatter with adjusted paths
"""
frontmatter = deepcopy(frontmatter)
for script_key in ("scripts", "agent_scripts"):
scripts = frontmatter.get(script_key)
if not isinstance(scripts, dict):
continue
for key, script_path in scripts.items():
if isinstance(script_path, str):
scripts[key] = self.rewrite_project_relative_paths(script_path)
if "scripts" in frontmatter:
for key in frontmatter["scripts"]:
script_path = frontmatter["scripts"][key]
if script_path.startswith("../../scripts/"):
frontmatter["scripts"][key] = f".specify/scripts/{script_path[14:]}"
return frontmatter
@staticmethod
def rewrite_project_relative_paths(text: str) -> str:
"""Rewrite repo-relative paths to their generated project locations."""
if not isinstance(text, str) or not text:
return text
for old, new in (
("../../memory/", ".specify/memory/"),
("../../scripts/", ".specify/scripts/"),
("../../templates/", ".specify/templates/"),
):
text = text.replace(old, new)
# Only rewrite top-level style references so extension-local paths like
# ".specify/extensions/<ext>/scripts/..." remain intact.
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?memory/', r"\1.specify/memory/", text)
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?scripts/', r"\1.specify/scripts/", text)
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?templates/', r"\1.specify/templates/", text)
return text.replace(".specify/.specify/", ".specify/").replace(".specify.specify/", ".specify/")
def render_markdown_command(
self,
frontmatter: dict,
@@ -198,142 +246,12 @@ class CommandRegistrar:
toml_lines.append(f"# Source: {source_id}")
toml_lines.append("")
# Keep TOML output valid even when body contains triple-quote delimiters.
# Prefer multiline forms, then fall back to escaped basic string.
if '"""' not in body:
toml_lines.append('prompt = """')
toml_lines.append(body)
toml_lines.append('"""')
elif "'''" not in body:
toml_lines.append("prompt = '''")
toml_lines.append(body)
toml_lines.append("'''")
else:
escaped_body = (
body.replace("\\", "\\\\")
.replace('"', '\\"')
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t")
)
toml_lines.append(f'prompt = "{escaped_body}"')
toml_lines.append('prompt = """')
toml_lines.append(body)
toml_lines.append('"""')
return "\n".join(toml_lines)
def render_skill_command(
self,
agent_name: str,
skill_name: str,
frontmatter: dict,
body: str,
source_id: str,
source_file: str,
project_root: Path,
) -> str:
"""Render a command override as a SKILL.md file.
SKILL-target agents should receive the same skills-oriented
frontmatter shape used elsewhere in the project instead of the
original command frontmatter.
Technical debt note:
Spec-kit currently has multiple SKILL.md generators (template packaging,
init-time conversion, and extension/preset overrides). Keep the skill
frontmatter keys aligned (name/description/compatibility/metadata, with
metadata.author and metadata.source subkeys) to avoid drift across agents.
"""
if not isinstance(frontmatter, dict):
frontmatter = {}
if agent_name in {"codex", "kimi"}:
body = self.resolve_skill_placeholders(agent_name, frontmatter, body, project_root)
description = frontmatter.get("description", f"Spec-kit workflow command: {skill_name}")
skill_frontmatter = self.build_skill_frontmatter(
agent_name,
skill_name,
description,
f"{source_id}:{source_file}",
)
return self.render_frontmatter(skill_frontmatter) + "\n" + body
@staticmethod
def build_skill_frontmatter(
agent_name: str,
skill_name: str,
description: str,
source: str,
) -> dict:
"""Build consistent SKILL.md frontmatter across all skill generators."""
skill_frontmatter = {
"name": skill_name,
"description": description,
"compatibility": "Requires spec-kit project structure with .specify/ directory",
"metadata": {
"author": "github-spec-kit",
"source": source,
},
}
if agent_name == "claude":
# Claude skills should only run when explicitly invoked.
skill_frontmatter["disable-model-invocation"] = True
return skill_frontmatter
@staticmethod
def resolve_skill_placeholders(agent_name: str, frontmatter: dict, body: str, project_root: Path) -> str:
"""Resolve script placeholders for skills-backed agents."""
try:
from . import load_init_options
except ImportError:
return body
if not isinstance(frontmatter, dict):
frontmatter = {}
scripts = frontmatter.get("scripts", {}) or {}
agent_scripts = frontmatter.get("agent_scripts", {}) or {}
if not isinstance(scripts, dict):
scripts = {}
if not isinstance(agent_scripts, dict):
agent_scripts = {}
init_opts = load_init_options(project_root)
if not isinstance(init_opts, dict):
init_opts = {}
script_variant = init_opts.get("script")
if script_variant not in {"sh", "ps"}:
fallback_order = []
default_variant = "ps" if platform.system().lower().startswith("win") else "sh"
secondary_variant = "sh" if default_variant == "ps" else "ps"
if default_variant in scripts or default_variant in agent_scripts:
fallback_order.append(default_variant)
if secondary_variant in scripts or secondary_variant in agent_scripts:
fallback_order.append(secondary_variant)
for key in scripts:
if key not in fallback_order:
fallback_order.append(key)
for key in agent_scripts:
if key not in fallback_order:
fallback_order.append(key)
script_variant = fallback_order[0] if fallback_order else None
script_command = scripts.get(script_variant) if script_variant else None
if script_command:
script_command = script_command.replace("{ARGS}", "$ARGUMENTS")
body = body.replace("{SCRIPT}", script_command)
agent_script_command = agent_scripts.get(script_variant) if script_variant else None
if agent_script_command:
agent_script_command = agent_script_command.replace("{ARGS}", "$ARGUMENTS")
body = body.replace("{AGENT_SCRIPT}", agent_script_command)
body = body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", agent_name)
return CommandRegistrar.rewrite_project_relative_paths(body)
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
"""Convert argument placeholder format.
@@ -347,19 +265,6 @@ class CommandRegistrar:
"""
return content.replace(from_placeholder, to_placeholder)
@staticmethod
def _compute_output_name(agent_name: str, cmd_name: str, agent_config: Dict[str, Any]) -> str:
"""Compute the on-disk command or skill name for an agent."""
if agent_config["extension"] != "/SKILL.md":
return cmd_name
short_name = cmd_name
if short_name.startswith("speckit."):
short_name = short_name[len("speckit."):]
short_name = short_name.replace(".", "-")
return f"speckit-{short_name}"
def register_commands(
self,
agent_name: str,
@@ -385,7 +290,6 @@ class CommandRegistrar:
Raises:
ValueError: If agent is not supported
"""
self._ensure_configs()
if agent_name not in self.AGENT_CONFIGS:
raise ValueError(f"Unsupported agent: {agent_name}")
@@ -412,20 +316,14 @@ class CommandRegistrar:
body, "$ARGUMENTS", agent_config["args"]
)
output_name = self._compute_output_name(agent_name, cmd_name, agent_config)
if agent_config["extension"] == "/SKILL.md":
output = self.render_skill_command(
agent_name, output_name, frontmatter, body, source_id, cmd_file, project_root
)
elif agent_config["format"] == "markdown":
if agent_config["format"] == "markdown":
output = self.render_markdown_command(frontmatter, body, source_id, context_note)
elif agent_config["format"] == "toml":
output = self.render_toml_command(frontmatter, body, source_id)
else:
raise ValueError(f"Unsupported format: {agent_config['format']}")
dest_file = commands_dir / f"{output_name}{agent_config['extension']}"
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
dest_file.parent.mkdir(parents=True, exist_ok=True)
dest_file.write_text(output, encoding="utf-8")
@@ -435,15 +333,9 @@ class CommandRegistrar:
registered.append(cmd_name)
for alias in cmd_info.get("aliases", []):
alias_output_name = self._compute_output_name(agent_name, alias, agent_config)
alias_output = output
if agent_config["extension"] == "/SKILL.md":
alias_output = self.render_skill_command(
agent_name, alias_output_name, frontmatter, body, source_id, cmd_file, project_root
)
alias_file = commands_dir / f"{alias_output_name}{agent_config['extension']}"
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
alias_file.parent.mkdir(parents=True, exist_ok=True)
alias_file.write_text(alias_output, encoding="utf-8")
alias_file.write_text(output, encoding="utf-8")
if agent_name == "copilot":
self.write_copilot_prompt(project_root, alias)
registered.append(alias)
@@ -485,9 +377,8 @@ class CommandRegistrar:
"""
results = {}
self._ensure_configs()
for agent_name, agent_config in self.AGENT_CONFIGS.items():
agent_dir = project_root / agent_config["dir"]
agent_dir = project_root / agent_config["dir"].split("/")[0]
if agent_dir.exists():
try:
@@ -513,7 +404,6 @@ class CommandRegistrar:
registered_commands: Dict mapping agent names to command name lists
project_root: Path to project root
"""
self._ensure_configs()
for agent_name, cmd_names in registered_commands.items():
if agent_name not in self.AGENT_CONFIGS:
continue
@@ -522,8 +412,7 @@ class CommandRegistrar:
commands_dir = project_root / agent_config["dir"]
for cmd_name in cmd_names:
output_name = self._compute_output_name(agent_name, cmd_name, agent_config)
cmd_file = commands_dir / f"{output_name}{agent_config['extension']}"
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
if cmd_file.exists():
cmd_file.unlink()
@@ -531,13 +420,3 @@ class CommandRegistrar:
prompt_file = project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
if prompt_file.exists():
prompt_file.unlink()
# Populate AGENT_CONFIGS after class definition.
# Catches ImportError from circular imports during module loading;
# _configs_loaded stays False so the next explicit access retries.
try:
CommandRegistrar._ensure_configs()
except ImportError:
pass

View File

@@ -25,49 +25,6 @@ import yaml
from packaging import version as pkg_version
from packaging.specifiers import SpecifierSet, InvalidSpecifier
_FALLBACK_CORE_COMMAND_NAMES = frozenset({
"analyze",
"checklist",
"clarify",
"constitution",
"implement",
"plan",
"specify",
"tasks",
"taskstoissues",
})
EXTENSION_COMMAND_NAME_PATTERN = re.compile(r"^speckit\.([a-z0-9-]+)\.([a-z0-9-]+)$")
def _load_core_command_names() -> frozenset[str]:
"""Discover bundled core command names from the packaged templates.
Prefer the wheel-time ``core_pack`` bundle when present, and fall back to
the source checkout when running from the repository. If neither is
available, use the baked-in fallback set so validation still works.
"""
candidate_dirs = [
Path(__file__).parent / "core_pack" / "commands",
Path(__file__).resolve().parent.parent.parent / "templates" / "commands",
]
for commands_dir in candidate_dirs:
if not commands_dir.is_dir():
continue
command_names = {
command_file.stem
for command_file in commands_dir.iterdir()
if command_file.is_file() and command_file.suffix == ".md"
}
if command_names:
return frozenset(command_names)
return _FALLBACK_CORE_COMMAND_NAMES
CORE_COMMAND_NAMES = _load_core_command_names()
class ExtensionError(Exception):
"""Base exception for extension-related errors."""
@@ -84,26 +41,6 @@ class CompatibilityError(ExtensionError):
pass
def normalize_priority(value: Any, default: int = 10) -> int:
"""Normalize a stored priority value for sorting and display.
Corrupted registry data may contain missing, non-numeric, or non-positive
values. In those cases, fall back to the default priority.
Args:
value: Priority value to normalize (may be int, str, None, etc.)
default: Default priority to use for invalid values (default: 10)
Returns:
Normalized priority as positive integer (>= 1)
"""
try:
priority = int(value)
except (TypeError, ValueError):
return default
return priority if priority >= 1 else default
@dataclass
class CatalogEntry:
"""Represents a single catalog entry in the catalog stack."""
@@ -192,7 +129,7 @@ class ExtensionManifest:
raise ValidationError("Command missing 'name' or 'file'")
# Validate command name format
if EXTENSION_COMMAND_NAME_PATTERN.match(cmd["name"]) is None:
if not re.match(r'^speckit\.[a-z0-9-]+\.[a-z0-9-]+$', cmd["name"]):
raise ValidationError(
f"Invalid command name '{cmd['name']}': "
"must follow pattern 'speckit.{extension}.{command}'"
@@ -265,17 +202,7 @@ class ExtensionRegistry:
try:
with open(self.registry_path, 'r') as f:
data = json.load(f)
# Validate loaded data is a dict (handles corrupted registry files)
if not isinstance(data, dict):
return {
"schema_version": self.SCHEMA_VERSION,
"extensions": {}
}
# Normalize extensions field (handles corrupted extensions value)
if not isinstance(data.get("extensions"), dict):
data["extensions"] = {}
return data
return json.load(f)
except (json.JSONDecodeError, FileNotFoundError):
# Corrupted or missing registry, start fresh
return {
@@ -297,7 +224,7 @@ class ExtensionRegistry:
metadata: Extension metadata (version, source, etc.)
"""
self.data["extensions"][extension_id] = {
**copy.deepcopy(metadata),
**metadata,
"installed_at": datetime.now(timezone.utc).isoformat()
}
self._save()
@@ -320,16 +247,12 @@ class ExtensionRegistry:
Raises:
KeyError: If extension is not installed
"""
extensions = self.data.get("extensions")
if not isinstance(extensions, dict) or extension_id not in extensions:
if extension_id not in self.data["extensions"]:
raise KeyError(f"Extension '{extension_id}' is not installed")
# Merge new metadata with existing, preserving original installed_at
existing = extensions[extension_id]
# Handle corrupted registry entries (e.g., string/list instead of dict)
if not isinstance(existing, dict):
existing = {}
# Merge: existing fields preserved, new fields override (deep copy to prevent caller mutation)
merged = {**existing, **copy.deepcopy(metadata)}
existing = self.data["extensions"][extension_id]
# Merge: existing fields preserved, new fields override
merged = {**existing, **metadata}
# Always preserve original installed_at based on key existence, not truthiness,
# to handle cases where the field exists but may be falsy (legacy/corruption)
if "installed_at" in existing:
@@ -337,7 +260,7 @@ class ExtensionRegistry:
else:
# If not present in existing, explicitly remove from merged if caller provided it
merged.pop("installed_at", None)
extensions[extension_id] = merged
self.data["extensions"][extension_id] = merged
self._save()
def restore(self, extension_id: str, metadata: dict):
@@ -350,16 +273,8 @@ class ExtensionRegistry:
Args:
extension_id: Extension ID
metadata: Complete extension metadata including installed_at
Raises:
ValueError: If metadata is None or not a dict
"""
if metadata is None or not isinstance(metadata, dict):
raise ValueError(f"Cannot restore '{extension_id}': metadata must be a dict")
# Ensure extensions dict exists (handle corrupted registry)
if not isinstance(self.data.get("extensions"), dict):
self.data["extensions"] = {}
self.data["extensions"][extension_id] = copy.deepcopy(metadata)
self.data["extensions"][extension_id] = dict(metadata)
self._save()
def remove(self, extension_id: str):
@@ -368,11 +283,8 @@ class ExtensionRegistry:
Args:
extension_id: Extension ID
"""
extensions = self.data.get("extensions")
if not isinstance(extensions, dict):
return
if extension_id in extensions:
del extensions[extension_id]
if extension_id in self.data["extensions"]:
del self.data["extensions"][extension_id]
self._save()
def get(self, extension_id: str) -> Optional[dict]:
@@ -385,49 +297,21 @@ class ExtensionRegistry:
extension_id: Extension ID
Returns:
Deep copy of extension metadata, or None if not found or corrupted
Deep copy of extension metadata, or None if not found
"""
extensions = self.data.get("extensions")
if not isinstance(extensions, dict):
return None
entry = extensions.get(extension_id)
# Return None for missing or corrupted (non-dict) entries
if entry is None or not isinstance(entry, dict):
return None
return copy.deepcopy(entry)
entry = self.data["extensions"].get(extension_id)
return copy.deepcopy(entry) if entry is not None else None
def list(self) -> Dict[str, dict]:
"""Get all installed extensions with valid metadata.
"""Get all installed extensions.
Returns a deep copy of extensions with dict metadata only.
Corrupted entries (non-dict values) are filtered out.
Returns a deep copy of the extensions mapping to prevent callers
from accidentally mutating nested internal registry state.
Returns:
Dictionary of extension_id -> metadata (deep copies), empty dict if corrupted
Dictionary of extension_id -> metadata (deep copies)
"""
extensions = self.data.get("extensions", {}) or {}
if not isinstance(extensions, dict):
return {}
# Filter to only valid dict entries to match type contract
return {
ext_id: copy.deepcopy(meta)
for ext_id, meta in extensions.items()
if isinstance(meta, dict)
}
def keys(self) -> set:
"""Get all extension IDs including corrupted entries.
Lightweight method that returns IDs without deep-copying metadata.
Use this when you only need to check which extensions are tracked.
Returns:
Set of extension IDs (includes corrupted entries)
"""
extensions = self.data.get("extensions", {}) or {}
if not isinstance(extensions, dict):
return set()
return set(extensions.keys())
return copy.deepcopy(self.data["extensions"])
def is_installed(self, extension_id: str) -> bool:
"""Check if extension is installed.
@@ -436,44 +320,9 @@ class ExtensionRegistry:
extension_id: Extension ID
Returns:
True if extension is installed, False if not or registry corrupted
True if extension is installed
"""
extensions = self.data.get("extensions")
if not isinstance(extensions, dict):
return False
return extension_id in extensions
def list_by_priority(self, include_disabled: bool = False) -> List[tuple]:
"""Get all installed extensions sorted by priority.
Lower priority number = higher precedence (checked first).
Extensions with equal priority are sorted alphabetically by ID
for deterministic ordering.
Args:
include_disabled: If True, include disabled extensions. Default False.
Returns:
List of (extension_id, metadata_copy) tuples sorted by priority.
Metadata is deep-copied to prevent accidental mutation.
"""
extensions = self.data.get("extensions", {}) or {}
if not isinstance(extensions, dict):
extensions = {}
sortable_extensions = []
for ext_id, meta in extensions.items():
if not isinstance(meta, dict):
continue
# Skip disabled extensions unless explicitly requested
if not include_disabled and not meta.get("enabled", True):
continue
metadata_copy = copy.deepcopy(meta)
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
sortable_extensions.append((ext_id, metadata_copy))
return sorted(
sortable_extensions,
key=lambda item: (item[1]["priority"], item[0]),
)
return extension_id in self.data["extensions"]
class ExtensionManager:
@@ -489,126 +338,6 @@ class ExtensionManager:
self.extensions_dir = project_root / ".specify" / "extensions"
self.registry = ExtensionRegistry(self.extensions_dir)
@staticmethod
def _collect_manifest_command_names(manifest: ExtensionManifest) -> Dict[str, str]:
"""Collect command and alias names declared by a manifest.
Performs install-time validation for extension-specific constraints:
- commands and aliases must use the canonical `speckit.{extension}.{command}` shape
- commands and aliases must use this extension's namespace
- command namespaces must not shadow core commands
- duplicate command/alias names inside one manifest are rejected
Args:
manifest: Parsed extension manifest
Returns:
Mapping of declared command/alias name -> kind ("command"/"alias")
Raises:
ValidationError: If any declared name is invalid
"""
if manifest.id in CORE_COMMAND_NAMES:
raise ValidationError(
f"Extension ID '{manifest.id}' conflicts with core command namespace '{manifest.id}'"
)
declared_names: Dict[str, str] = {}
for cmd in manifest.commands:
primary_name = cmd["name"]
aliases = cmd.get("aliases", [])
if aliases is None:
aliases = []
if not isinstance(aliases, list):
raise ValidationError(
f"Aliases for command '{primary_name}' must be a list"
)
for kind, name in [("command", primary_name)] + [
("alias", alias) for alias in aliases
]:
if not isinstance(name, str):
raise ValidationError(
f"{kind.capitalize()} for command '{primary_name}' must be a string"
)
match = EXTENSION_COMMAND_NAME_PATTERN.match(name)
if match is None:
raise ValidationError(
f"Invalid {kind} '{name}': "
"must follow pattern 'speckit.{extension}.{command}'"
)
namespace = match.group(1)
if namespace != manifest.id:
raise ValidationError(
f"{kind.capitalize()} '{name}' must use extension namespace '{manifest.id}'"
)
if namespace in CORE_COMMAND_NAMES:
raise ValidationError(
f"{kind.capitalize()} '{name}' conflicts with core command namespace '{namespace}'"
)
if name in declared_names:
raise ValidationError(
f"Duplicate command or alias '{name}' in extension manifest"
)
declared_names[name] = kind
return declared_names
def _get_installed_command_name_map(
self,
exclude_extension_id: Optional[str] = None,
) -> Dict[str, str]:
"""Return registered command and alias names for installed extensions."""
installed_names: Dict[str, str] = {}
for ext_id in self.registry.keys():
if ext_id == exclude_extension_id:
continue
manifest = self.get_extension(ext_id)
if manifest is None:
continue
for cmd in manifest.commands:
cmd_name = cmd.get("name")
if isinstance(cmd_name, str):
installed_names.setdefault(cmd_name, ext_id)
aliases = cmd.get("aliases", [])
if not isinstance(aliases, list):
continue
for alias in aliases:
if isinstance(alias, str):
installed_names.setdefault(alias, ext_id)
return installed_names
def _validate_install_conflicts(self, manifest: ExtensionManifest) -> None:
"""Reject installs that would shadow core or installed extension commands."""
declared_names = self._collect_manifest_command_names(manifest)
installed_names = self._get_installed_command_name_map(
exclude_extension_id=manifest.id
)
collisions = [
f"{name} (already provided by extension '{installed_names[name]}')"
for name in sorted(declared_names)
if name in installed_names
]
if collisions:
raise ValidationError(
"Extension commands conflict with installed extensions:\n- "
+ "\n- ".join(collisions)
)
@staticmethod
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
"""Load .extensionignore and return an ignore function for shutil.copytree.
@@ -673,280 +402,6 @@ class ExtensionManager:
return _ignore
def _get_skills_dir(self) -> Optional[Path]:
"""Return the active skills directory for extension skill registration.
Reads ``.specify/init-options.json`` to determine whether skills
are enabled and which agent was selected, then delegates to
the module-level ``_get_skills_dir()`` helper for the concrete path.
Kimi is treated as a native-skills agent: if ``ai == "kimi"`` and
``.kimi/skills`` exists, extension installs should still propagate
command skills even when ``ai_skills`` is false.
Returns:
The skills directory ``Path``, or ``None`` if skills were not
enabled and no native-skills fallback applies.
"""
from . import load_init_options, _get_skills_dir as resolve_skills_dir
opts = load_init_options(self.project_root)
if not isinstance(opts, dict):
opts = {}
agent = opts.get("ai")
if not isinstance(agent, str) or not agent:
return None
ai_skills_enabled = bool(opts.get("ai_skills"))
if not ai_skills_enabled and agent != "kimi":
return None
skills_dir = resolve_skills_dir(self.project_root, agent)
if not skills_dir.is_dir():
return None
return skills_dir
def _register_extension_skills(
self,
manifest: ExtensionManifest,
extension_dir: Path,
) -> List[str]:
"""Generate SKILL.md files for extension commands as agent skills.
For every command in the extension manifest, creates a SKILL.md
file in the agent's skills directory following the agentskills.io
specification. This is only done when ``--ai-skills`` was used
during project initialisation.
Args:
manifest: Extension manifest.
extension_dir: Installed extension directory.
Returns:
List of skill names that were created (for registry storage).
"""
skills_dir = self._get_skills_dir()
if not skills_dir:
return []
from . import load_init_options
from .agents import CommandRegistrar
import yaml
written: List[str] = []
opts = load_init_options(self.project_root)
if not isinstance(opts, dict):
opts = {}
selected_ai = opts.get("ai")
if not isinstance(selected_ai, str) or not selected_ai:
return []
registrar = CommandRegistrar()
for cmd_info in manifest.commands:
cmd_name = cmd_info["name"]
cmd_file_rel = cmd_info["file"]
# Guard against path traversal: reject absolute paths and ensure
# the resolved file stays within the extension directory.
cmd_path = Path(cmd_file_rel)
if cmd_path.is_absolute():
continue
try:
ext_root = extension_dir.resolve()
source_file = (ext_root / cmd_path).resolve()
source_file.relative_to(ext_root) # raises ValueError if outside
except (OSError, ValueError):
continue
if not source_file.is_file():
continue
# Derive skill name from command name using the same hyphenated
# convention as hook rendering and preset skill registration.
short_name_raw = cmd_name
if short_name_raw.startswith("speckit."):
short_name_raw = short_name_raw[len("speckit."):]
skill_name = f"speckit-{short_name_raw.replace('.', '-')}"
# Check if skill already exists before creating the directory
skill_subdir = skills_dir / skill_name
skill_file = skill_subdir / "SKILL.md"
if skill_file.exists():
# Do not overwrite user-customized skills
continue
# Create skill directory; track whether we created it so we can clean
# up safely if reading the source file subsequently fails.
created_now = not skill_subdir.exists()
skill_subdir.mkdir(parents=True, exist_ok=True)
# Parse the command file — guard against IsADirectoryError / decode errors
try:
content = source_file.read_text(encoding="utf-8")
except (OSError, UnicodeDecodeError):
if created_now:
try:
skill_subdir.rmdir() # undo the mkdir; dir is empty at this point
except OSError:
pass # best-effort cleanup
continue
frontmatter, body = registrar.parse_frontmatter(content)
frontmatter = registrar._adjust_script_paths(frontmatter)
body = registrar.resolve_skill_placeholders(
selected_ai, frontmatter, body, self.project_root
)
original_desc = frontmatter.get("description", "")
description = original_desc or f"Extension command: {cmd_name}"
frontmatter_data = registrar.build_skill_frontmatter(
selected_ai,
skill_name,
description,
f"extension:{manifest.id}",
)
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
# Derive a human-friendly title from the command name
short_name = cmd_name
if short_name.startswith("speckit."):
short_name = short_name[len("speckit."):]
title_name = short_name.replace(".", " ").replace("-", " ").title()
skill_content = (
f"---\n"
f"{frontmatter_text}\n"
f"---\n\n"
f"# {title_name} Skill\n\n"
f"{body}\n"
)
skill_file.write_text(skill_content, encoding="utf-8")
written.append(skill_name)
return written
def _unregister_extension_skills(self, skill_names: List[str], extension_id: str) -> None:
"""Remove SKILL.md directories for extension skills.
Called during extension removal to clean up skill files that
were created by ``_register_extension_skills()``.
If ``_get_skills_dir()`` returns ``None`` (e.g. the user removed
init-options.json or toggled ai_skills after installation), we
fall back to scanning all known agent skills directories so that
orphaned skill directories are still cleaned up. In that case
each candidate directory is verified against the SKILL.md
``metadata.source`` field before removal to avoid accidentally
deleting user-created skills with the same name.
Args:
skill_names: List of skill names to remove.
extension_id: Extension ID used to verify ownership during
fallback candidate scanning.
"""
if not skill_names:
return
skills_dir = self._get_skills_dir()
if skills_dir:
# Fast path: we know the exact skills directory
for skill_name in skill_names:
# Guard against path traversal from a corrupted registry entry:
# reject names that are absolute, contain path separators, or
# resolve to a path outside the skills directory.
sn_path = Path(skill_name)
if sn_path.is_absolute() or len(sn_path.parts) != 1:
continue
try:
skill_subdir = (skills_dir / skill_name).resolve()
skill_subdir.relative_to(skills_dir.resolve()) # raises if outside
except (OSError, ValueError):
continue
if not skill_subdir.is_dir():
continue
# Safety check: only delete if SKILL.md exists and its
# metadata.source matches exactly this extension — mirroring
# the fallback branch — so a corrupted registry entry cannot
# delete an unrelated user skill.
skill_md = skill_subdir / "SKILL.md"
if not skill_md.is_file():
continue
try:
import yaml as _yaml
raw = skill_md.read_text(encoding="utf-8")
source = ""
if raw.startswith("---"):
parts = raw.split("---", 2)
if len(parts) >= 3:
fm = _yaml.safe_load(parts[1]) or {}
source = (
fm.get("metadata", {}).get("source", "")
if isinstance(fm, dict)
else ""
)
if source != f"extension:{extension_id}":
continue
except (OSError, UnicodeDecodeError, Exception):
continue
shutil.rmtree(skill_subdir)
else:
# Fallback: scan all possible agent skills directories
from . import AGENT_CONFIG, DEFAULT_SKILLS_DIR
candidate_dirs: set[Path] = set()
for cfg in AGENT_CONFIG.values():
folder = cfg.get("folder", "")
if folder:
candidate_dirs.add(self.project_root / folder.rstrip("/") / "skills")
candidate_dirs.add(self.project_root / DEFAULT_SKILLS_DIR)
for skills_candidate in candidate_dirs:
if not skills_candidate.is_dir():
continue
for skill_name in skill_names:
# Same path-traversal guard as the fast path above
sn_path = Path(skill_name)
if sn_path.is_absolute() or len(sn_path.parts) != 1:
continue
try:
skill_subdir = (skills_candidate / skill_name).resolve()
skill_subdir.relative_to(skills_candidate.resolve()) # raises if outside
except (OSError, ValueError):
continue
if not skill_subdir.is_dir():
continue
# Safety check: only delete if SKILL.md exists and its
# metadata.source matches exactly this extension. If the
# file is missing or unreadable we skip to avoid deleting
# unrelated user-created directories.
skill_md = skill_subdir / "SKILL.md"
if not skill_md.is_file():
continue
try:
import yaml as _yaml
raw = skill_md.read_text(encoding="utf-8")
source = ""
if raw.startswith("---"):
parts = raw.split("---", 2)
if len(parts) >= 3:
fm = _yaml.safe_load(parts[1]) or {}
source = (
fm.get("metadata", {}).get("source", "")
if isinstance(fm, dict)
else ""
)
# Only remove skills explicitly created by this extension
if source != f"extension:{extension_id}":
continue
except (OSError, UnicodeDecodeError, Exception):
# If we can't verify, skip to avoid accidental deletion
continue
shutil.rmtree(skill_subdir)
def check_compatibility(
self,
manifest: ExtensionManifest,
@@ -985,8 +440,7 @@ class ExtensionManager:
self,
source_dir: Path,
speckit_version: str,
register_commands: bool = True,
priority: int = 10,
register_commands: bool = True
) -> ExtensionManifest:
"""Install extension from a local directory.
@@ -994,19 +448,14 @@ class ExtensionManager:
source_dir: Path to extension directory
speckit_version: Current spec-kit version
register_commands: If True, register commands with AI agents
priority: Resolution priority (lower = higher precedence, default 10)
Returns:
Installed extension manifest
Raises:
ValidationError: If manifest is invalid or priority is invalid
ValidationError: If manifest is invalid
CompatibilityError: If extension is incompatible
"""
# Validate priority
if priority < 1:
raise ValidationError("Priority must be a positive integer (1 or higher)")
# Load and validate manifest
manifest_path = source_dir / "extension.yml"
manifest = ExtensionManifest(manifest_path)
@@ -1021,9 +470,6 @@ class ExtensionManager:
f"Use 'specify extension remove {manifest.id}' first."
)
# Reject manifests that would shadow core commands or installed extensions.
self._validate_install_conflicts(manifest)
# Install extension
dest_dir = self.extensions_dir / manifest.id
if dest_dir.exists():
@@ -1041,10 +487,6 @@ class ExtensionManager:
manifest, dest_dir, self.project_root
)
# Auto-register extension commands as agent skills when --ai-skills
# was used during project initialisation (feature parity).
registered_skills = self._register_extension_skills(manifest, dest_dir)
# Register hooks
hook_executor = HookExecutor(self.project_root)
hook_executor.register_hooks(manifest)
@@ -1055,9 +497,7 @@ class ExtensionManager:
"source": "local",
"manifest_hash": manifest.get_hash(),
"enabled": True,
"priority": priority,
"registered_commands": registered_commands,
"registered_skills": registered_skills,
"registered_commands": registered_commands
})
return manifest
@@ -1065,27 +505,21 @@ class ExtensionManager:
def install_from_zip(
self,
zip_path: Path,
speckit_version: str,
priority: int = 10,
speckit_version: str
) -> ExtensionManifest:
"""Install extension from ZIP file.
Args:
zip_path: Path to extension ZIP file
speckit_version: Current spec-kit version
priority: Resolution priority (lower = higher precedence, default 10)
Returns:
Installed extension manifest
Raises:
ValidationError: If manifest is invalid or priority is invalid
ValidationError: If manifest is invalid
CompatibilityError: If extension is incompatible
"""
# Validate priority early
if priority < 1:
raise ValidationError("Priority must be a positive integer (1 or higher)")
with tempfile.TemporaryDirectory() as tmpdir:
temp_path = Path(tmpdir)
@@ -1120,7 +554,7 @@ class ExtensionManager:
raise ValidationError("No extension.yml found in ZIP file")
# Install from extracted directory
return self.install_from_directory(extension_dir, speckit_version, priority=priority)
return self.install_from_directory(extension_dir, speckit_version)
def remove(self, extension_id: str, keep_config: bool = False) -> bool:
"""Remove an installed extension.
@@ -1135,15 +569,9 @@ class ExtensionManager:
if not self.registry.is_installed(extension_id):
return False
# Get registered commands and skills before removal
# Get registered commands before removal
metadata = self.registry.get(extension_id)
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
raw_skills = metadata.get("registered_skills", []) if metadata else []
# Normalize: must be a list of plain strings to avoid corrupted-registry errors
if isinstance(raw_skills, list):
registered_skills = [s for s in raw_skills if isinstance(s, str)]
else:
registered_skills = []
registered_commands = metadata.get("registered_commands", {})
extension_dir = self.extensions_dir / extension_id
@@ -1152,9 +580,6 @@ class ExtensionManager:
registrar = CommandRegistrar()
registrar.unregister_commands(registered_commands, self.project_root)
# Unregister agent skills
self._unregister_extension_skills(registered_skills, extension_id)
if keep_config:
# Preserve config files, only remove non-config files
if extension_dir.exists():
@@ -1207,9 +632,6 @@ class ExtensionManager:
result = []
for ext_id, metadata in self.registry.list().items():
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
if not isinstance(metadata, dict):
metadata = {}
ext_dir = self.extensions_dir / ext_id
manifest_path = ext_dir / "extension.yml"
@@ -1221,7 +643,6 @@ class ExtensionManager:
"version": metadata.get("version", "unknown"),
"description": manifest.description,
"enabled": metadata.get("enabled", True),
"priority": normalize_priority(metadata.get("priority")),
"installed_at": metadata.get("installed_at"),
"command_count": len(manifest.commands),
"hook_count": len(manifest.hooks)
@@ -1234,7 +655,6 @@ class ExtensionManager:
"version": metadata.get("version", "unknown"),
"description": "⚠️ Corrupted extension",
"enabled": False,
"priority": normalize_priority(metadata.get("priority")),
"installed_at": metadata.get("installed_at"),
"command_count": 0,
"hook_count": 0
@@ -1429,8 +849,8 @@ class ExtensionCatalog:
if not config_path.exists():
return None
try:
data = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {}
except (yaml.YAMLError, OSError, UnicodeError) as e:
data = yaml.safe_load(config_path.read_text()) or {}
except (yaml.YAMLError, OSError) as e:
raise ValidationError(
f"Failed to read catalog config {config_path}: {e}"
)
@@ -1921,8 +1341,8 @@ class ConfigManager:
return {}
try:
return yaml.safe_load(file_path.read_text(encoding="utf-8")) or {}
except (yaml.YAMLError, OSError, UnicodeError):
return yaml.safe_load(file_path.read_text()) or {}
except (yaml.YAMLError, OSError):
return {}
def _get_extension_defaults(self) -> Dict[str, Any]:
@@ -2098,55 +1518,6 @@ class HookExecutor:
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.config_file = project_root / ".specify" / "extensions.yml"
self._init_options_cache: Optional[Dict[str, Any]] = None
def _load_init_options(self) -> Dict[str, Any]:
"""Load persisted init options used to determine invocation style.
Uses the shared helper from specify_cli and caches values per executor
instance to avoid repeated filesystem reads during hook rendering.
"""
if self._init_options_cache is None:
from . import load_init_options
payload = load_init_options(self.project_root)
self._init_options_cache = payload if isinstance(payload, dict) else {}
return self._init_options_cache
@staticmethod
def _skill_name_from_command(command: Any) -> str:
"""Map a command id like speckit.plan to speckit-plan skill name."""
if not isinstance(command, str):
return ""
command_id = command.strip()
if not command_id.startswith("speckit."):
return ""
return f"speckit-{command_id[len('speckit.'):].replace('.', '-')}"
def _render_hook_invocation(self, command: Any) -> str:
"""Render an agent-specific invocation string for a hook command."""
if not isinstance(command, str):
return ""
command_id = command.strip()
if not command_id:
return ""
init_options = self._load_init_options()
selected_ai = init_options.get("ai")
codex_skill_mode = selected_ai == "codex" and bool(init_options.get("ai_skills"))
claude_skill_mode = selected_ai == "claude" and bool(init_options.get("ai_skills"))
kimi_skill_mode = selected_ai == "kimi"
skill_name = self._skill_name_from_command(command_id)
if codex_skill_mode and skill_name:
return f"${skill_name}"
if claude_skill_mode and skill_name:
return f"/{skill_name}"
if kimi_skill_mode and skill_name:
return f"/skill:{skill_name}"
return f"/{command_id}"
def get_project_config(self) -> Dict[str, Any]:
"""Load project-level extension configuration.
@@ -2162,8 +1533,8 @@ class HookExecutor:
}
try:
return yaml.safe_load(self.config_file.read_text(encoding="utf-8")) or {}
except (yaml.YAMLError, OSError, UnicodeError):
return yaml.safe_load(self.config_file.read_text()) or {}
except (yaml.YAMLError, OSError):
return {
"installed": [],
"settings": {"auto_execute_hooks": True},
@@ -2178,8 +1549,7 @@ class HookExecutor:
"""
self.config_file.parent.mkdir(parents=True, exist_ok=True)
self.config_file.write_text(
yaml.dump(config, default_flow_style=False, sort_keys=False, allow_unicode=True),
encoding="utf-8",
yaml.dump(config, default_flow_style=False, sort_keys=False)
)
def register_hooks(self, manifest: ExtensionManifest):
@@ -2390,27 +1760,21 @@ class HookExecutor:
for hook in hooks:
extension = hook.get("extension")
command = hook.get("command")
invocation = self._render_hook_invocation(command)
command_text = command if isinstance(command, str) and command.strip() else "<missing command>"
display_invocation = invocation or (
f"/{command_text}" if command_text != "<missing command>" else "/<missing command>"
)
optional = hook.get("optional", True)
prompt = hook.get("prompt", "")
description = hook.get("description", "")
if optional:
lines.append(f"\n**Optional Hook**: {extension}")
lines.append(f"Command: `{display_invocation}`")
lines.append(f"Command: `/{command}`")
if description:
lines.append(f"Description: {description}")
lines.append(f"\nPrompt: {prompt}")
lines.append(f"To execute: `{display_invocation}`")
lines.append(f"To execute: `/{command}`")
else:
lines.append(f"\n**Automatic Hook**: {extension}")
lines.append(f"Executing: `{display_invocation}`")
lines.append(f"EXECUTE_COMMAND: {command_text}")
lines.append(f"EXECUTE_COMMAND_INVOCATION: {display_invocation}")
lines.append(f"Executing: `/{command}`")
lines.append(f"EXECUTE_COMMAND: {command}")
return "\n".join(lines)
@@ -2474,7 +1838,6 @@ class HookExecutor:
"""
return {
"command": hook.get("command"),
"invocation": self._render_hook_invocation(hook.get("command")),
"extension": hook.get("extension"),
"optional": hook.get("optional", True),
"description": hook.get("description", ""),
@@ -2518,3 +1881,4 @@ class HookExecutor:
hook["enabled"] = False
self.save_project_config(config)

View File

@@ -1,105 +0,0 @@
"""Integration registry for AI coding assistants.
Each integration is a self-contained subpackage that handles setup/teardown
for a specific AI assistant (Copilot, Claude, Gemini, etc.).
"""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .base import IntegrationBase
# Maps integration key → IntegrationBase instance.
# Populated by later stages as integrations are migrated.
INTEGRATION_REGISTRY: dict[str, IntegrationBase] = {}
def _register(integration: IntegrationBase) -> None:
"""Register an integration instance in the global registry.
Raises ``ValueError`` for falsy keys and ``KeyError`` for duplicates.
"""
key = integration.key
if not key:
raise ValueError("Cannot register integration with an empty key.")
if key in INTEGRATION_REGISTRY:
raise KeyError(f"Integration with key {key!r} is already registered.")
INTEGRATION_REGISTRY[key] = integration
def get_integration(key: str) -> IntegrationBase | None:
"""Return the integration for *key*, or ``None`` if not registered."""
return INTEGRATION_REGISTRY.get(key)
# -- Register built-in integrations --------------------------------------
def _register_builtins() -> None:
"""Register all built-in integrations.
Package directories use Python-safe identifiers (e.g. ``kiro_cli``,
``cursor_agent``). The user-facing integration key stored in
``IntegrationBase.key`` stays hyphenated (``"kiro-cli"``,
``"cursor-agent"``) to match the actual CLI tool / binary name that
users install and invoke.
"""
# -- Imports (alphabetical) -------------------------------------------
from .agy import AgyIntegration
from .amp import AmpIntegration
from .auggie import AuggieIntegration
from .bob import BobIntegration
from .claude import ClaudeIntegration
from .codex import CodexIntegration
from .codebuddy import CodebuddyIntegration
from .copilot import CopilotIntegration
from .cursor_agent import CursorAgentIntegration
from .gemini import GeminiIntegration
from .generic import GenericIntegration
from .iflow import IflowIntegration
from .junie import JunieIntegration
from .kilocode import KilocodeIntegration
from .kimi import KimiIntegration
from .kiro_cli import KiroCliIntegration
from .opencode import OpencodeIntegration
from .pi import PiIntegration
from .qodercli import QodercliIntegration
from .qwen import QwenIntegration
from .roo import RooIntegration
from .shai import ShaiIntegration
from .tabnine import TabnineIntegration
from .trae import TraeIntegration
from .vibe import VibeIntegration
from .windsurf import WindsurfIntegration
# -- Registration (alphabetical) --------------------------------------
_register(AgyIntegration())
_register(AmpIntegration())
_register(AuggieIntegration())
_register(BobIntegration())
_register(ClaudeIntegration())
_register(CodexIntegration())
_register(CodebuddyIntegration())
_register(CopilotIntegration())
_register(CursorAgentIntegration())
_register(GeminiIntegration())
_register(GenericIntegration())
_register(IflowIntegration())
_register(JunieIntegration())
_register(KilocodeIntegration())
_register(KimiIntegration())
_register(KiroCliIntegration())
_register(OpencodeIntegration())
_register(PiIntegration())
_register(QodercliIntegration())
_register(QwenIntegration())
_register(RooIntegration())
_register(ShaiIntegration())
_register(TabnineIntegration())
_register(TraeIntegration())
_register(VibeIntegration())
_register(WindsurfIntegration())
_register_builtins()

View File

@@ -1,41 +0,0 @@
"""Antigravity (agy) integration — skills-based agent.
Antigravity uses ``.agent/skills/speckit-<name>/SKILL.md`` layout.
Explicit command support was deprecated in version 1.20.5;
``--skills`` defaults to ``True``.
"""
from __future__ import annotations
from ..base import IntegrationOption, SkillsIntegration
class AgyIntegration(SkillsIntegration):
"""Integration for Antigravity IDE."""
key = "agy"
config = {
"name": "Antigravity",
"folder": ".agent/",
"commands_subdir": "skills",
"install_url": None,
"requires_cli": False,
}
registrar_config = {
"dir": ".agent/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md",
}
context_file = "AGENTS.md"
@classmethod
def options(cls) -> list[IntegrationOption]:
return [
IntegrationOption(
"--skills",
is_flag=True,
default=True,
help="Install as agent skills (default for Antigravity since v1.20.5)",
),
]

View File

@@ -1,17 +0,0 @@
# update-context.ps1 — Antigravity (agy) integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
$ErrorActionPreference = 'Stop'
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType agy

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Antigravity (agy) integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
set -euo pipefail
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" agy

View File

@@ -1,21 +0,0 @@
"""Amp CLI integration."""
from ..base import MarkdownIntegration
class AmpIntegration(MarkdownIntegration):
key = "amp"
config = {
"name": "Amp",
"folder": ".agents/",
"commands_subdir": "commands",
"install_url": "https://ampcode.com/manual#install",
"requires_cli": True,
}
registrar_config = {
"dir": ".agents/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = "AGENTS.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Amp integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType amp

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Amp integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" amp

View File

@@ -1,21 +0,0 @@
"""Auggie CLI integration."""
from ..base import MarkdownIntegration
class AuggieIntegration(MarkdownIntegration):
key = "auggie"
config = {
"name": "Auggie CLI",
"folder": ".augment/",
"commands_subdir": "commands",
"install_url": "https://docs.augmentcode.com/cli/setup-auggie/install-auggie-cli",
"requires_cli": True,
}
registrar_config = {
"dir": ".augment/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = ".augment/rules/specify-rules.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Auggie CLI integration: create/update .augment/rules/specify-rules.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType auggie

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Auggie CLI integration: create/update .augment/rules/specify-rules.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" auggie

View File

@@ -1,793 +0,0 @@
"""Base classes for AI-assistant integrations.
Provides:
- ``IntegrationOption`` — declares a CLI option an integration accepts.
- ``IntegrationBase`` — abstract base every integration must implement.
- ``MarkdownIntegration`` — concrete base for standard Markdown-format
integrations (the common case — subclass, set three class attrs, done).
- ``TomlIntegration`` — concrete base for TOML-format integrations
(Gemini, Tabnine — subclass, set three class attrs, done).
- ``SkillsIntegration`` — concrete base for integrations that install
commands as agent skills (``speckit-<name>/SKILL.md`` layout).
"""
from __future__ import annotations
import re
import shutil
from abc import ABC
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from .manifest import IntegrationManifest
# ---------------------------------------------------------------------------
# IntegrationOption
# ---------------------------------------------------------------------------
@dataclass(frozen=True)
class IntegrationOption:
"""Declares an option that an integration accepts via ``--integration-options``.
Attributes:
name: The flag name (e.g. ``"--commands-dir"``).
is_flag: ``True`` for boolean flags (``--skills``).
required: ``True`` if the option must be supplied.
default: Default value when not supplied (``None`` → no default).
help: One-line description shown in ``specify integrate info``.
"""
name: str
is_flag: bool = False
required: bool = False
default: Any = None
help: str = ""
# ---------------------------------------------------------------------------
# IntegrationBase — abstract base class
# ---------------------------------------------------------------------------
class IntegrationBase(ABC):
"""Abstract base class every integration must implement.
Subclasses must set the following class-level attributes:
* ``key`` — unique identifier, matches actual CLI tool name
* ``config`` — dict compatible with ``AGENT_CONFIG`` entries
* ``registrar_config`` — dict compatible with ``CommandRegistrar.AGENT_CONFIGS``
And may optionally set:
* ``context_file`` — path (relative to project root) of the agent
context/instructions file (e.g. ``"CLAUDE.md"``)
"""
# -- Must be set by every subclass ------------------------------------
key: str = ""
"""Unique integration key — should match the actual CLI tool name."""
config: dict[str, Any] | None = None
"""Metadata dict matching the ``AGENT_CONFIG`` shape."""
registrar_config: dict[str, Any] | None = None
"""Registration dict matching ``CommandRegistrar.AGENT_CONFIGS`` shape."""
# -- Optional ---------------------------------------------------------
context_file: str | None = None
"""Relative path to the agent context file (e.g. ``CLAUDE.md``)."""
# -- Public API -------------------------------------------------------
@classmethod
def options(cls) -> list[IntegrationOption]:
"""Return options this integration accepts. Default: none."""
return []
# -- Primitives — building blocks for setup() -------------------------
def shared_commands_dir(self) -> Path | None:
"""Return path to the shared command templates directory.
Checks ``core_pack/commands/`` (wheel install) first, then
``templates/commands/`` (source checkout). Returns ``None``
if neither exists.
"""
import inspect
pkg_dir = Path(inspect.getfile(IntegrationBase)).resolve().parent.parent
for candidate in [
pkg_dir / "core_pack" / "commands",
pkg_dir.parent.parent / "templates" / "commands",
]:
if candidate.is_dir():
return candidate
return None
def shared_templates_dir(self) -> Path | None:
"""Return path to the shared page templates directory.
Contains ``vscode-settings.json``, ``spec-template.md``, etc.
Checks ``core_pack/templates/`` then ``templates/``.
"""
import inspect
pkg_dir = Path(inspect.getfile(IntegrationBase)).resolve().parent.parent
for candidate in [
pkg_dir / "core_pack" / "templates",
pkg_dir.parent.parent / "templates",
]:
if candidate.is_dir():
return candidate
return None
def list_command_templates(self) -> list[Path]:
"""Return sorted list of command template files from the shared directory."""
cmd_dir = self.shared_commands_dir()
if not cmd_dir or not cmd_dir.is_dir():
return []
return sorted(f for f in cmd_dir.iterdir() if f.is_file() and f.suffix == ".md")
def command_filename(self, template_name: str) -> str:
"""Return the destination filename for a command template.
*template_name* is the stem of the source file (e.g. ``"plan"``).
Default: ``speckit.{template_name}.md``. Subclasses override
to change the extension or naming convention.
"""
return f"speckit.{template_name}.md"
def commands_dest(self, project_root: Path) -> Path:
"""Return the absolute path to the commands output directory.
Derived from ``config["folder"]`` and ``config["commands_subdir"]``.
Raises ``ValueError`` if ``config`` or ``folder`` is missing.
"""
if not self.config:
raise ValueError(
f"{type(self).__name__}.config is not set; integration "
"subclasses must define a non-empty 'config' mapping."
)
folder = self.config.get("folder")
if not folder:
raise ValueError(
f"{type(self).__name__}.config is missing required 'folder' entry."
)
subdir = self.config.get("commands_subdir", "commands")
return project_root / folder / subdir
# -- File operations — granular primitives for setup() ----------------
@staticmethod
def copy_command_to_directory(
src: Path,
dest_dir: Path,
filename: str,
) -> Path:
"""Copy a command template to *dest_dir* with the given *filename*.
Creates *dest_dir* if needed. Returns the absolute path of the
written file. The caller can post-process the file before
recording it in the manifest.
"""
dest_dir.mkdir(parents=True, exist_ok=True)
dst = dest_dir / filename
shutil.copy2(src, dst)
return dst
@staticmethod
def record_file_in_manifest(
file_path: Path,
project_root: Path,
manifest: IntegrationManifest,
) -> None:
"""Hash *file_path* and record it in *manifest*.
*file_path* must be inside *project_root*.
"""
rel = file_path.resolve().relative_to(project_root.resolve())
manifest.record_existing(rel)
@staticmethod
def write_file_and_record(
content: str,
dest: Path,
project_root: Path,
manifest: IntegrationManifest,
) -> Path:
"""Write *content* to *dest*, hash it, and record in *manifest*.
Creates parent directories as needed. Writes bytes directly to
avoid platform newline translation (CRLF on Windows). Any
``\r\n`` sequences in *content* are normalised to ``\n`` before
writing. Returns *dest*.
"""
dest.parent.mkdir(parents=True, exist_ok=True)
normalized = content.replace("\r\n", "\n")
dest.write_bytes(normalized.encode("utf-8"))
rel = dest.resolve().relative_to(project_root.resolve())
manifest.record_existing(rel)
return dest
def integration_scripts_dir(self) -> Path | None:
"""Return path to this integration's bundled ``scripts/`` directory.
Looks for a ``scripts/`` sibling of the module that defines the
concrete subclass (not ``IntegrationBase`` itself).
Returns ``None`` if the directory doesn't exist.
"""
import inspect
cls_file = inspect.getfile(type(self))
scripts = Path(cls_file).resolve().parent / "scripts"
return scripts if scripts.is_dir() else None
def install_scripts(
self,
project_root: Path,
manifest: IntegrationManifest,
) -> list[Path]:
"""Copy integration-specific scripts into the project.
Copies files from this integration's ``scripts/`` directory to
``.specify/integrations/<key>/scripts/`` in the project. Shell
scripts are made executable. All copied files are recorded in
*manifest*.
Returns the list of files created.
"""
scripts_src = self.integration_scripts_dir()
if not scripts_src:
return []
created: list[Path] = []
scripts_dest = project_root / ".specify" / "integrations" / self.key / "scripts"
scripts_dest.mkdir(parents=True, exist_ok=True)
for src_script in sorted(scripts_src.iterdir()):
if not src_script.is_file():
continue
dst_script = scripts_dest / src_script.name
shutil.copy2(src_script, dst_script)
if dst_script.suffix == ".sh":
dst_script.chmod(dst_script.stat().st_mode | 0o111)
self.record_file_in_manifest(dst_script, project_root, manifest)
created.append(dst_script)
return created
@staticmethod
def process_template(
content: str,
agent_name: str,
script_type: str,
arg_placeholder: str = "$ARGUMENTS",
) -> str:
"""Process a raw command template into agent-ready content.
Performs the same transformations as the release script:
1. Extract ``scripts.<script_type>`` value from YAML frontmatter
2. Replace ``{SCRIPT}`` with the extracted script command
3. Extract ``agent_scripts.<script_type>`` and replace ``{AGENT_SCRIPT}``
4. Strip ``scripts:`` and ``agent_scripts:`` sections from frontmatter
5. Replace ``{ARGS}`` with *arg_placeholder*
6. Replace ``__AGENT__`` with *agent_name*
7. Rewrite paths: ``scripts/`` → ``.specify/scripts/`` etc.
"""
# 1. Extract script command from frontmatter
script_command = ""
script_pattern = re.compile(
rf"^\s*{re.escape(script_type)}:\s*(.+)$", re.MULTILINE
)
# Find the scripts: block
in_scripts = False
for line in content.splitlines():
if line.strip() == "scripts:":
in_scripts = True
continue
if in_scripts and line and not line[0].isspace():
in_scripts = False
if in_scripts:
m = script_pattern.match(line)
if m:
script_command = m.group(1).strip()
break
# 2. Replace {SCRIPT}
if script_command:
content = content.replace("{SCRIPT}", script_command)
# 3. Extract agent_script command
agent_script_command = ""
in_agent_scripts = False
for line in content.splitlines():
if line.strip() == "agent_scripts:":
in_agent_scripts = True
continue
if in_agent_scripts and line and not line[0].isspace():
in_agent_scripts = False
if in_agent_scripts:
m = script_pattern.match(line)
if m:
agent_script_command = m.group(1).strip()
break
if agent_script_command:
content = content.replace("{AGENT_SCRIPT}", agent_script_command)
# 4. Strip scripts: and agent_scripts: sections from frontmatter
lines = content.splitlines(keepends=True)
output_lines: list[str] = []
in_frontmatter = False
skip_section = False
dash_count = 0
for line in lines:
stripped = line.rstrip("\n\r")
if stripped == "---":
dash_count += 1
if dash_count == 1:
in_frontmatter = True
else:
in_frontmatter = False
skip_section = False
output_lines.append(line)
continue
if in_frontmatter:
if stripped in ("scripts:", "agent_scripts:"):
skip_section = True
continue
if skip_section:
if line[0:1].isspace():
continue # skip indented content under scripts/agent_scripts
skip_section = False
output_lines.append(line)
content = "".join(output_lines)
# 5. Replace {ARGS}
content = content.replace("{ARGS}", arg_placeholder)
# 6. Replace __AGENT__
content = content.replace("__AGENT__", agent_name)
# 7. Rewrite paths — delegate to the shared implementation in
# CommandRegistrar so extension-local paths are preserved and
# boundary rules stay consistent across the codebase.
from specify_cli.agents import CommandRegistrar
content = CommandRegistrar.rewrite_project_relative_paths(content)
return content
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""Install integration command files into *project_root*.
Returns the list of files created. Copies raw templates without
processing. Integrations that need placeholder replacement
(e.g. ``{SCRIPT}``, ``__AGENT__``) should override ``setup()``
and call ``process_template()`` in their own loop — see
``CopilotIntegration`` for an example.
"""
templates = self.list_command_templates()
if not templates:
return []
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
dest = self.commands_dest(project_root).resolve()
try:
dest.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Integration destination {dest} escapes "
f"project root {project_root_resolved}"
) from exc
created: list[Path] = []
for src_file in templates:
dst_name = self.command_filename(src_file.stem)
dst_file = self.copy_command_to_directory(src_file, dest, dst_name)
self.record_file_in_manifest(dst_file, project_root, manifest)
created.append(dst_file)
return created
def teardown(
self,
project_root: Path,
manifest: IntegrationManifest,
*,
force: bool = False,
) -> tuple[list[Path], list[Path]]:
"""Uninstall integration files from *project_root*.
Delegates to ``manifest.uninstall()`` which only removes files
whose hash still matches the recorded value (unless *force*).
Returns ``(removed, skipped)`` file lists.
"""
return manifest.uninstall(project_root, force=force)
# -- Convenience helpers for subclasses -------------------------------
def install(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""High-level install — calls ``setup()`` and returns created files."""
return self.setup(
project_root, manifest, parsed_options=parsed_options, **opts
)
def uninstall(
self,
project_root: Path,
manifest: IntegrationManifest,
*,
force: bool = False,
) -> tuple[list[Path], list[Path]]:
"""High-level uninstall — calls ``teardown()``."""
return self.teardown(project_root, manifest, force=force)
# ---------------------------------------------------------------------------
# MarkdownIntegration — covers ~20 standard agents
# ---------------------------------------------------------------------------
class MarkdownIntegration(IntegrationBase):
"""Concrete base for integrations that use standard Markdown commands.
Subclasses only need to set ``key``, ``config``, ``registrar_config``
(and optionally ``context_file``). Everything else is inherited.
``setup()`` processes command templates (replacing ``{SCRIPT}``,
``{ARGS}``, ``__AGENT__``, rewriting paths) and installs
integration-specific scripts (``update-context.sh`` / ``.ps1``).
"""
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
templates = self.list_command_templates()
if not templates:
return []
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
dest = self.commands_dest(project_root).resolve()
try:
dest.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Integration destination {dest} escapes "
f"project root {project_root_resolved}"
) from exc
dest.mkdir(parents=True, exist_ok=True)
script_type = opts.get("script_type", "sh")
arg_placeholder = self.registrar_config.get("args", "$ARGUMENTS") if self.registrar_config else "$ARGUMENTS"
created: list[Path] = []
for src_file in templates:
raw = src_file.read_text(encoding="utf-8")
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
dst_name = self.command_filename(src_file.stem)
dst_file = self.write_file_and_record(
processed, dest / dst_name, project_root, manifest
)
created.append(dst_file)
created.extend(self.install_scripts(project_root, manifest))
return created
# ---------------------------------------------------------------------------
# TomlIntegration — TOML-format agents (Gemini, Tabnine)
# ---------------------------------------------------------------------------
class TomlIntegration(IntegrationBase):
"""Concrete base for integrations that use TOML command format.
Mirrors ``MarkdownIntegration`` closely: subclasses only need to set
``key``, ``config``, ``registrar_config`` (and optionally
``context_file``). Everything else is inherited.
``setup()`` processes command templates through the same placeholder
pipeline as ``MarkdownIntegration``, then converts the result to
TOML format (``description`` key + ``prompt`` multiline string).
"""
def command_filename(self, template_name: str) -> str:
"""TOML commands use ``.toml`` extension."""
return f"speckit.{template_name}.toml"
@staticmethod
def _extract_description(content: str) -> str:
"""Extract the ``description`` value from YAML frontmatter.
Scans lines between the first pair of ``---`` delimiters for a
top-level ``description:`` key. Returns the value (with
surrounding quotes stripped) or an empty string if not found.
"""
in_frontmatter = False
for line in content.splitlines():
stripped = line.rstrip("\n\r")
if stripped == "---":
if not in_frontmatter:
in_frontmatter = True
continue
break # second ---
if in_frontmatter and stripped.startswith("description:"):
_, _, value = stripped.partition(":")
return value.strip().strip('"').strip("'")
return ""
@staticmethod
def _render_toml(description: str, body: str) -> str:
"""Render a TOML command file from description and body.
Uses multiline basic strings (``\"\"\"``) with backslashes
escaped, matching the output of the release script. Falls back
to multiline literal strings (``'''``) if the body contains
``\"\"\"``, then to an escaped basic string as a last resort.
The body is rstrip'd so the closing delimiter appears on the line
immediately after the last content line — matching the release
script's ``echo "$body"; echo '\"\"\"'`` pattern.
"""
toml_lines: list[str] = []
if description:
desc = description.replace('"', '\\"')
toml_lines.append(f'description = "{desc}"')
toml_lines.append("")
body = body.rstrip("\n")
# Escape backslashes for basic multiline strings.
escaped = body.replace("\\", "\\\\")
if '"""' not in escaped:
toml_lines.append('prompt = """')
toml_lines.append(escaped)
toml_lines.append('"""')
elif "'''" not in body:
toml_lines.append("prompt = '''")
toml_lines.append(body)
toml_lines.append("'''")
else:
escaped_body = (
body.replace("\\", "\\\\")
.replace('"', '\\"')
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t")
)
toml_lines.append(f'prompt = "{escaped_body}"')
return "\n".join(toml_lines) + "\n"
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
templates = self.list_command_templates()
if not templates:
return []
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
dest = self.commands_dest(project_root).resolve()
try:
dest.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Integration destination {dest} escapes "
f"project root {project_root_resolved}"
) from exc
dest.mkdir(parents=True, exist_ok=True)
script_type = opts.get("script_type", "sh")
arg_placeholder = self.registrar_config.get("args", "{{args}}") if self.registrar_config else "{{args}}"
created: list[Path] = []
for src_file in templates:
raw = src_file.read_text(encoding="utf-8")
description = self._extract_description(raw)
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
toml_content = self._render_toml(description, processed)
dst_name = self.command_filename(src_file.stem)
dst_file = self.write_file_and_record(
toml_content, dest / dst_name, project_root, manifest
)
created.append(dst_file)
created.extend(self.install_scripts(project_root, manifest))
return created
# ---------------------------------------------------------------------------
# SkillsIntegration — skills-format agents (Codex, Kimi, Agy)
# ---------------------------------------------------------------------------
class SkillsIntegration(IntegrationBase):
"""Concrete base for integrations that install commands as agent skills.
Skills use the ``speckit-<name>/SKILL.md`` directory layout following
the `agentskills.io <https://agentskills.io/specification>`_ spec.
Subclasses set ``key``, ``config``, ``registrar_config`` (and
optionally ``context_file``) like any integration. They may also
override ``options()`` to declare additional CLI flags (e.g.
``--skills``, ``--migrate-legacy``).
``setup()`` processes each shared command template into a
``speckit-<name>/SKILL.md`` file with skills-oriented frontmatter.
"""
def skills_dest(self, project_root: Path) -> Path:
"""Return the absolute path to the skills output directory.
Derived from ``config["folder"]`` and the configured
``commands_subdir`` (defaults to ``"skills"``).
Raises ``ValueError`` when ``config`` or ``folder`` is missing.
"""
if not self.config:
raise ValueError(
f"{type(self).__name__}.config is not set."
)
folder = self.config.get("folder")
if not folder:
raise ValueError(
f"{type(self).__name__}.config is missing required 'folder' entry."
)
subdir = self.config.get("commands_subdir", "skills")
return project_root / folder / subdir
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""Install command templates as agent skills.
Creates ``speckit-<name>/SKILL.md`` for each shared command
template. Each SKILL.md has normalised frontmatter containing
``name``, ``description``, ``compatibility``, and ``metadata``.
"""
import yaml
templates = self.list_command_templates()
if not templates:
return []
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
skills_dir = self.skills_dest(project_root).resolve()
try:
skills_dir.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Skills destination {skills_dir} escapes "
f"project root {project_root_resolved}"
) from exc
script_type = opts.get("script_type", "sh")
arg_placeholder = (
self.registrar_config.get("args", "$ARGUMENTS")
if self.registrar_config
else "$ARGUMENTS"
)
created: list[Path] = []
for src_file in templates:
raw = src_file.read_text(encoding="utf-8")
# Derive the skill name from the template stem
command_name = src_file.stem # e.g. "plan"
skill_name = f"speckit-{command_name.replace('.', '-')}"
# Parse frontmatter for description
frontmatter: dict[str, Any] = {}
if raw.startswith("---"):
parts = raw.split("---", 2)
if len(parts) >= 3:
try:
fm = yaml.safe_load(parts[1])
if isinstance(fm, dict):
frontmatter = fm
except yaml.YAMLError:
pass
# Process body through the standard template pipeline
processed_body = self.process_template(
raw, self.key, script_type, arg_placeholder
)
# Strip the processed frontmatter — we rebuild it for skills.
# Preserve leading whitespace in the body to match release ZIP
# output byte-for-byte (the template body starts with \n after
# the closing ---).
if processed_body.startswith("---"):
parts = processed_body.split("---", 2)
if len(parts) >= 3:
processed_body = parts[2]
# Select description — use the original template description
# to stay byte-for-byte identical with release ZIP output.
description = frontmatter.get("description", "")
if not description:
description = f"Spec Kit: {command_name} workflow"
# Build SKILL.md with manually formatted frontmatter to match
# the release packaging script output exactly (double-quoted
# values, no yaml.safe_dump quoting differences).
def _quote(v: str) -> str:
escaped = v.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped}"'
skill_content = (
f"---\n"
f"name: {_quote(skill_name)}\n"
f"description: {_quote(description)}\n"
f"compatibility: {_quote('Requires spec-kit project structure with .specify/ directory')}\n"
f"metadata:\n"
f" author: {_quote('github-spec-kit')}\n"
f" source: {_quote('templates/commands/' + src_file.name)}\n"
f"---\n"
f"{processed_body}"
)
# Write speckit-<name>/SKILL.md
skill_dir = skills_dir / skill_name
skill_file = skill_dir / "SKILL.md"
dst = self.write_file_and_record(
skill_content, skill_file, project_root, manifest
)
created.append(dst)
created.extend(self.install_scripts(project_root, manifest))
return created

View File

@@ -1,21 +0,0 @@
"""IBM Bob integration."""
from ..base import MarkdownIntegration
class BobIntegration(MarkdownIntegration):
key = "bob"
config = {
"name": "IBM Bob",
"folder": ".bob/",
"commands_subdir": "commands",
"install_url": None,
"requires_cli": False,
}
registrar_config = {
"dir": ".bob/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = "AGENTS.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — IBM Bob integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType bob

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — IBM Bob integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" bob

View File

@@ -1,109 +0,0 @@
"""Claude Code integration."""
from __future__ import annotations
from pathlib import Path
from typing import Any
import yaml
from ..base import SkillsIntegration
from ..manifest import IntegrationManifest
class ClaudeIntegration(SkillsIntegration):
"""Integration for Claude Code skills."""
key = "claude"
config = {
"name": "Claude Code",
"folder": ".claude/",
"commands_subdir": "skills",
"install_url": "https://docs.anthropic.com/en/docs/claude-code/setup",
"requires_cli": True,
}
registrar_config = {
"dir": ".claude/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md",
}
context_file = "CLAUDE.md"
def command_filename(self, template_name: str) -> str:
"""Claude skills live at .claude/skills/<name>/SKILL.md."""
skill_name = f"speckit-{template_name.replace('.', '-')}"
return f"{skill_name}/SKILL.md"
def _render_skill(self, template_name: str, frontmatter: dict[str, Any], body: str) -> str:
"""Render a processed command template as a Claude skill."""
skill_name = f"speckit-{template_name.replace('.', '-')}"
description = frontmatter.get(
"description",
f"Spec-kit workflow command: {template_name}",
)
skill_frontmatter = self._build_skill_fm(
skill_name, description, f"templates/commands/{template_name}.md"
)
frontmatter_text = yaml.safe_dump(skill_frontmatter, sort_keys=False).strip()
return f"---\n{frontmatter_text}\n---\n\n{body.strip()}\n"
def _build_skill_fm(self, name: str, description: str, source: str) -> dict:
from specify_cli.agents import CommandRegistrar
return CommandRegistrar.build_skill_frontmatter(
self.key, name, description, source
)
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""Install Claude skills into .claude/skills."""
templates = self.list_command_templates()
if not templates:
return []
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
dest = self.skills_dest(project_root).resolve()
try:
dest.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Integration destination {dest} escapes "
f"project root {project_root_resolved}"
) from exc
dest.mkdir(parents=True, exist_ok=True)
script_type = opts.get("script_type", "sh")
arg_placeholder = self.registrar_config.get("args", "$ARGUMENTS")
from specify_cli.agents import CommandRegistrar
registrar = CommandRegistrar()
created: list[Path] = []
for src_file in templates:
raw = src_file.read_text(encoding="utf-8")
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
frontmatter, body = registrar.parse_frontmatter(processed)
if not isinstance(frontmatter, dict):
frontmatter = {}
rendered = self._render_skill(src_file.stem, frontmatter, body)
dst_file = self.write_file_and_record(
rendered,
dest / self.command_filename(src_file.stem),
project_root,
manifest,
)
created.append(dst_file)
created.extend(self.install_scripts(project_root, manifest))
return created

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Claude Code integration: create/update CLAUDE.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType claude

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Claude Code integration: create/update CLAUDE.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" claude

View File

@@ -1,21 +0,0 @@
"""CodeBuddy CLI integration."""
from ..base import MarkdownIntegration
class CodebuddyIntegration(MarkdownIntegration):
key = "codebuddy"
config = {
"name": "CodeBuddy",
"folder": ".codebuddy/",
"commands_subdir": "commands",
"install_url": "https://www.codebuddy.ai/cli",
"requires_cli": True,
}
registrar_config = {
"dir": ".codebuddy/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = "CODEBUDDY.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — CodeBuddy integration: create/update CODEBUDDY.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType codebuddy

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — CodeBuddy integration: create/update CODEBUDDY.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" codebuddy

View File

@@ -1,40 +0,0 @@
"""Codex CLI integration — skills-based agent.
Codex uses the ``.agents/skills/speckit-<name>/SKILL.md`` layout.
Commands are deprecated; ``--skills`` defaults to ``True``.
"""
from __future__ import annotations
from ..base import IntegrationOption, SkillsIntegration
class CodexIntegration(SkillsIntegration):
"""Integration for OpenAI Codex CLI."""
key = "codex"
config = {
"name": "Codex CLI",
"folder": ".agents/",
"commands_subdir": "skills",
"install_url": "https://github.com/openai/codex",
"requires_cli": True,
}
registrar_config = {
"dir": ".agents/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md",
}
context_file = "AGENTS.md"
@classmethod
def options(cls) -> list[IntegrationOption]:
return [
IntegrationOption(
"--skills",
is_flag=True,
default=True,
help="Install as agent skills (default for Codex)",
),
]

View File

@@ -1,17 +0,0 @@
# update-context.ps1 — Codex CLI integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
$ErrorActionPreference = 'Stop'
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType codex

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Codex CLI integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
set -euo pipefail
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" codex

View File

@@ -1,185 +0,0 @@
"""Copilot integration — GitHub Copilot in VS Code.
Copilot has several unique behaviors compared to standard markdown agents:
- Commands use ``.agent.md`` extension (not ``.md``)
- Each command gets a companion ``.prompt.md`` file in ``.github/prompts/``
- Installs ``.vscode/settings.json`` with prompt file recommendations
- Context file lives at ``.github/copilot-instructions.md``
"""
from __future__ import annotations
import json
import shutil
from pathlib import Path
from typing import Any
from ..base import IntegrationBase
from ..manifest import IntegrationManifest
class CopilotIntegration(IntegrationBase):
"""Integration for GitHub Copilot in VS Code."""
key = "copilot"
config = {
"name": "GitHub Copilot",
"folder": ".github/",
"commands_subdir": "agents",
"install_url": None,
"requires_cli": False,
}
registrar_config = {
"dir": ".github/agents",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".agent.md",
}
context_file = ".github/copilot-instructions.md"
def command_filename(self, template_name: str) -> str:
"""Copilot commands use ``.agent.md`` extension."""
return f"speckit.{template_name}.agent.md"
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""Install copilot commands, companion prompts, and VS Code settings.
Uses base class primitives to: read templates, process them
(replace placeholders, strip script blocks, rewrite paths),
write as ``.agent.md``, then add companion prompts and VS Code settings.
"""
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
templates = self.list_command_templates()
if not templates:
return []
dest = self.commands_dest(project_root)
dest_resolved = dest.resolve()
try:
dest_resolved.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Integration destination {dest_resolved} escapes "
f"project root {project_root_resolved}"
) from exc
dest.mkdir(parents=True, exist_ok=True)
created: list[Path] = []
script_type = opts.get("script_type", "sh")
arg_placeholder = self.registrar_config.get("args", "$ARGUMENTS")
# 1. Process and write command files as .agent.md
for src_file in templates:
raw = src_file.read_text(encoding="utf-8")
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
dst_name = self.command_filename(src_file.stem)
dst_file = self.write_file_and_record(
processed, dest / dst_name, project_root, manifest
)
created.append(dst_file)
# 2. Generate companion .prompt.md files from the templates we just wrote
prompts_dir = project_root / ".github" / "prompts"
for src_file in templates:
cmd_name = f"speckit.{src_file.stem}"
prompt_content = f"---\nagent: {cmd_name}\n---\n"
prompt_file = self.write_file_and_record(
prompt_content,
prompts_dir / f"{cmd_name}.prompt.md",
project_root,
manifest,
)
created.append(prompt_file)
# Write .vscode/settings.json
settings_src = self._vscode_settings_path()
if settings_src and settings_src.is_file():
dst_settings = project_root / ".vscode" / "settings.json"
dst_settings.parent.mkdir(parents=True, exist_ok=True)
if dst_settings.exists():
# Merge into existing — don't track since we can't safely
# remove the user's settings file on uninstall.
self._merge_vscode_settings(settings_src, dst_settings)
else:
shutil.copy2(settings_src, dst_settings)
self.record_file_in_manifest(dst_settings, project_root, manifest)
created.append(dst_settings)
# 4. Install integration-specific update-context scripts
created.extend(self.install_scripts(project_root, manifest))
return created
def _vscode_settings_path(self) -> Path | None:
"""Return path to the bundled vscode-settings.json template."""
tpl_dir = self.shared_templates_dir()
if tpl_dir:
candidate = tpl_dir / "vscode-settings.json"
if candidate.is_file():
return candidate
return None
@staticmethod
def _merge_vscode_settings(src: Path, dst: Path) -> None:
"""Merge settings from *src* into existing *dst* JSON file.
Top-level keys from *src* are added only if missing in *dst*.
For dict-valued keys, sub-keys are merged the same way.
If *dst* cannot be parsed (e.g. JSONC with comments), the merge
is skipped to avoid overwriting user settings.
"""
try:
existing = json.loads(dst.read_text(encoding="utf-8"))
except (json.JSONDecodeError, OSError):
# Cannot parse existing file (likely JSONC with comments).
# Skip merge to preserve the user's settings, but show
# what they should add manually.
import logging
template_content = src.read_text(encoding="utf-8")
logging.getLogger(__name__).warning(
"Could not parse %s (may contain JSONC comments). "
"Skipping settings merge to preserve existing file.\n"
"Please add the following settings manually:\n%s",
dst, template_content,
)
return
new_settings = json.loads(src.read_text(encoding="utf-8"))
if not isinstance(existing, dict) or not isinstance(new_settings, dict):
import logging
logging.getLogger(__name__).warning(
"Skipping settings merge: %s or template is not a JSON object.", dst
)
return
changed = False
for key, value in new_settings.items():
if key not in existing:
existing[key] = value
changed = True
elif isinstance(existing[key], dict) and isinstance(value, dict):
for sub_key, sub_value in value.items():
if sub_key not in existing[key]:
existing[key][sub_key] = sub_value
changed = True
if not changed:
return
dst.write_text(
json.dumps(existing, indent=4) + "\n", encoding="utf-8"
)

View File

@@ -1,32 +0,0 @@
# update-context.ps1 — Copilot integration: create/update .github/copilot-instructions.md
#
# This is the copilot-specific implementation that produces the GitHub
# Copilot instructions file. The shared dispatcher reads
# .specify/integration.json and calls this script.
#
# NOTE: This script is not yet active. It will be activated in Stage 7
# when the shared update-agent-context.ps1 replaces its switch statement
# with integration.json-based dispatch. The shared script must also be
# refactored to support SPECKIT_SOURCE_ONLY (guard the Main call) before
# dot-sourcing will work.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
# Invoke shared update-agent-context script as a separate process.
# Dot-sourcing is unsafe until that script guards its Main call.
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType copilot

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Copilot integration: create/update .github/copilot-instructions.md
#
# This is the copilot-specific implementation that produces the GitHub
# Copilot instructions file. The shared dispatcher reads
# .specify/integration.json and calls this script.
#
# NOTE: This script is not yet active. It will be activated in Stage 7
# when the shared update-agent-context.sh replaces its case statement
# with integration.json-based dispatch. The shared script must also be
# refactored to support SPECKIT_SOURCE_ONLY (guard the main logic)
# before sourcing will work.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
# Invoke shared update-agent-context script as a separate process.
# Sourcing is unsafe until that script guards its main logic.
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" copilot

View File

@@ -1,21 +0,0 @@
"""Cursor IDE integration."""
from ..base import MarkdownIntegration
class CursorAgentIntegration(MarkdownIntegration):
key = "cursor-agent"
config = {
"name": "Cursor",
"folder": ".cursor/",
"commands_subdir": "commands",
"install_url": None,
"requires_cli": False,
}
registrar_config = {
"dir": ".cursor/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = ".cursor/rules/specify-rules.mdc"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Cursor integration: create/update .cursor/rules/specify-rules.mdc
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType cursor-agent

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Cursor integration: create/update .cursor/rules/specify-rules.mdc
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" cursor-agent

View File

@@ -1,21 +0,0 @@
"""Gemini CLI integration."""
from ..base import TomlIntegration
class GeminiIntegration(TomlIntegration):
key = "gemini"
config = {
"name": "Gemini CLI",
"folder": ".gemini/",
"commands_subdir": "commands",
"install_url": "https://github.com/google-gemini/gemini-cli",
"requires_cli": True,
}
registrar_config = {
"dir": ".gemini/commands",
"format": "toml",
"args": "{{args}}",
"extension": ".toml",
}
context_file = "GEMINI.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Gemini CLI integration: create/update GEMINI.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType gemini

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Gemini CLI integration: create/update GEMINI.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" gemini

View File

@@ -1,133 +0,0 @@
"""Generic integration — bring your own agent.
Requires ``--commands-dir`` to specify the output directory for command
files. No longer special-cased in the core CLI — just another
integration with its own required option.
"""
from __future__ import annotations
from pathlib import Path
from typing import Any
from ..base import IntegrationOption, MarkdownIntegration
from ..manifest import IntegrationManifest
class GenericIntegration(MarkdownIntegration):
"""Integration for user-specified (generic) agents."""
key = "generic"
config = {
"name": "Generic (bring your own agent)",
"folder": None, # Set dynamically from --commands-dir
"commands_subdir": "commands",
"install_url": None,
"requires_cli": False,
}
registrar_config = {
"dir": "", # Set dynamically from --commands-dir
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = None
@classmethod
def options(cls) -> list[IntegrationOption]:
return [
IntegrationOption(
"--commands-dir",
required=True,
help="Directory for command files (e.g. .myagent/commands/)",
),
]
@staticmethod
def _resolve_commands_dir(
parsed_options: dict[str, Any] | None,
opts: dict[str, Any],
) -> str:
"""Extract ``--commands-dir`` from parsed options or raw_options.
Returns the directory string or raises ``ValueError``.
"""
parsed_options = parsed_options or {}
commands_dir = parsed_options.get("commands_dir")
if commands_dir:
return commands_dir
# Fall back to raw_options (--integration-options="--commands-dir ...")
raw = opts.get("raw_options")
if raw:
import shlex
tokens = shlex.split(raw)
for i, token in enumerate(tokens):
if token == "--commands-dir" and i + 1 < len(tokens):
return tokens[i + 1]
if token.startswith("--commands-dir="):
return token.split("=", 1)[1]
raise ValueError(
"--commands-dir is required for the generic integration"
)
def commands_dest(self, project_root: Path) -> Path:
"""Not supported for GenericIntegration — use setup() directly.
GenericIntegration is stateless; the output directory comes from
``parsed_options`` or ``raw_options`` at call time, not from
instance state.
"""
raise ValueError(
"GenericIntegration.commands_dest() cannot be called directly; "
"the output directory is resolved from parsed_options in setup()"
)
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""Install commands to the user-provided commands directory."""
commands_dir = self._resolve_commands_dir(parsed_options, opts)
templates = self.list_command_templates()
if not templates:
return []
project_root_resolved = project_root.resolve()
if manifest.project_root != project_root_resolved:
raise ValueError(
f"manifest.project_root ({manifest.project_root}) does not match "
f"project_root ({project_root_resolved})"
)
dest = (project_root / commands_dir).resolve()
try:
dest.relative_to(project_root_resolved)
except ValueError as exc:
raise ValueError(
f"Integration destination {dest} escapes "
f"project root {project_root_resolved}"
) from exc
dest.mkdir(parents=True, exist_ok=True)
script_type = opts.get("script_type", "sh")
arg_placeholder = "$ARGUMENTS"
created: list[Path] = []
for src_file in templates:
raw = src_file.read_text(encoding="utf-8")
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
dst_name = self.command_filename(src_file.stem)
dst_file = self.write_file_and_record(
processed, dest / dst_name, project_root, manifest
)
created.append(dst_file)
created.extend(self.install_scripts(project_root, manifest))
return created

View File

@@ -1,17 +0,0 @@
# update-context.ps1 — Generic integration: create/update context file
#
# Thin wrapper that delegates to the shared update-agent-context script.
$ErrorActionPreference = 'Stop'
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType generic

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Generic integration: create/update context file
#
# Thin wrapper that delegates to the shared update-agent-context script.
set -euo pipefail
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" generic

View File

@@ -1,21 +0,0 @@
"""iFlow CLI integration."""
from ..base import MarkdownIntegration
class IflowIntegration(MarkdownIntegration):
key = "iflow"
config = {
"name": "iFlow CLI",
"folder": ".iflow/",
"commands_subdir": "commands",
"install_url": "https://docs.iflow.cn/en/cli/quickstart",
"requires_cli": True,
}
registrar_config = {
"dir": ".iflow/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = "IFLOW.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — iFlow CLI integration: create/update IFLOW.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType iflow

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — iFlow CLI integration: create/update IFLOW.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" iflow

View File

@@ -1,21 +0,0 @@
"""Junie integration (JetBrains)."""
from ..base import MarkdownIntegration
class JunieIntegration(MarkdownIntegration):
key = "junie"
config = {
"name": "Junie",
"folder": ".junie/",
"commands_subdir": "commands",
"install_url": "https://junie.jetbrains.com/",
"requires_cli": True,
}
registrar_config = {
"dir": ".junie/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = ".junie/AGENTS.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Junie integration: create/update .junie/AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType junie

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Junie integration: create/update .junie/AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" junie

View File

@@ -1,21 +0,0 @@
"""Kilo Code integration."""
from ..base import MarkdownIntegration
class KilocodeIntegration(MarkdownIntegration):
key = "kilocode"
config = {
"name": "Kilo Code",
"folder": ".kilocode/",
"commands_subdir": "workflows",
"install_url": None,
"requires_cli": False,
}
registrar_config = {
"dir": ".kilocode/workflows",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = ".kilocode/rules/specify-rules.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Kilo Code integration: create/update .kilocode/rules/specify-rules.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType kilocode

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Kilo Code integration: create/update .kilocode/rules/specify-rules.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" kilocode

View File

@@ -1,124 +0,0 @@
"""Kimi Code integration — skills-based agent (Moonshot AI).
Kimi uses the ``.kimi/skills/speckit-<name>/SKILL.md`` layout with
``/skill:speckit-<name>`` invocation syntax.
Includes legacy migration logic for projects initialised before Kimi
moved from dotted skill directories (``speckit.xxx``) to hyphenated
(``speckit-xxx``).
"""
from __future__ import annotations
import shutil
from pathlib import Path
from typing import Any
from ..base import IntegrationOption, SkillsIntegration
from ..manifest import IntegrationManifest
class KimiIntegration(SkillsIntegration):
"""Integration for Kimi Code CLI (Moonshot AI)."""
key = "kimi"
config = {
"name": "Kimi Code",
"folder": ".kimi/",
"commands_subdir": "skills",
"install_url": "https://code.kimi.com/",
"requires_cli": True,
}
registrar_config = {
"dir": ".kimi/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md",
}
context_file = "KIMI.md"
@classmethod
def options(cls) -> list[IntegrationOption]:
return [
IntegrationOption(
"--skills",
is_flag=True,
default=True,
help="Install as agent skills (default for Kimi)",
),
IntegrationOption(
"--migrate-legacy",
is_flag=True,
default=False,
help="Migrate legacy dotted skill dirs (speckit.xxx → speckit-xxx)",
),
]
def setup(
self,
project_root: Path,
manifest: IntegrationManifest,
parsed_options: dict[str, Any] | None = None,
**opts: Any,
) -> list[Path]:
"""Install skills with optional legacy dotted-name migration."""
parsed_options = parsed_options or {}
# Run base setup first so hyphenated targets (speckit-*) exist,
# then migrate/clean legacy dotted dirs without risking user content loss.
created = super().setup(
project_root, manifest, parsed_options=parsed_options, **opts
)
if parsed_options.get("migrate_legacy", False):
skills_dir = self.skills_dest(project_root)
if skills_dir.is_dir():
_migrate_legacy_kimi_dotted_skills(skills_dir)
return created
def _migrate_legacy_kimi_dotted_skills(skills_dir: Path) -> tuple[int, int]:
"""Migrate legacy Kimi dotted skill dirs (speckit.xxx) to hyphenated format.
Returns ``(migrated_count, removed_count)``.
"""
if not skills_dir.is_dir():
return (0, 0)
migrated_count = 0
removed_count = 0
for legacy_dir in sorted(skills_dir.glob("speckit.*")):
if not legacy_dir.is_dir():
continue
if not (legacy_dir / "SKILL.md").exists():
continue
suffix = legacy_dir.name[len("speckit."):]
if not suffix:
continue
target_dir = skills_dir / f"speckit-{suffix.replace('.', '-')}"
if not target_dir.exists():
shutil.move(str(legacy_dir), str(target_dir))
migrated_count += 1
continue
# Target exists — only remove legacy if SKILL.md is identical
target_skill = target_dir / "SKILL.md"
legacy_skill = legacy_dir / "SKILL.md"
if target_skill.is_file():
try:
if target_skill.read_bytes() == legacy_skill.read_bytes():
has_extra = any(
child.name != "SKILL.md" for child in legacy_dir.iterdir()
)
if not has_extra:
shutil.rmtree(legacy_dir)
removed_count += 1
except OSError:
pass
return (migrated_count, removed_count)

View File

@@ -1,17 +0,0 @@
# update-context.ps1 — Kimi Code integration: create/update KIMI.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
$ErrorActionPreference = 'Stop'
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType kimi

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Kimi Code integration: create/update KIMI.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
set -euo pipefail
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" kimi

View File

@@ -1,21 +0,0 @@
"""Kiro CLI integration."""
from ..base import MarkdownIntegration
class KiroCliIntegration(MarkdownIntegration):
key = "kiro-cli"
config = {
"name": "Kiro CLI",
"folder": ".kiro/",
"commands_subdir": "prompts",
"install_url": "https://kiro.dev/docs/cli/",
"requires_cli": True,
}
registrar_config = {
"dir": ".kiro/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md",
}
context_file = "AGENTS.md"

View File

@@ -1,23 +0,0 @@
# update-context.ps1 — Kiro CLI integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
$ErrorActionPreference = 'Stop'
# Derive repo root from script location (walks up to find .specify/)
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null }
# If git did not return a repo root, or the git root does not contain .specify,
# fall back to walking up from the script directory to find the initialized project root.
if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = $scriptDir
$fsRoot = [System.IO.Path]::GetPathRoot($repoRoot)
while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) {
$repoRoot = Split-Path -Parent $repoRoot
}
}
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType kiro-cli

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# update-context.sh — Kiro CLI integration: create/update AGENTS.md
#
# Thin wrapper that delegates to the shared update-agent-context script.
# Activated in Stage 7 when the shared script uses integration.json dispatch.
#
# Until then, this delegates to the shared script as a subprocess.
set -euo pipefail
# Derive repo root from script location (walks up to find .specify/)
_script_dir="$(cd "$(dirname "$0")" && pwd)"
_root="$_script_dir"
while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done
if [ -z "${REPO_ROOT:-}" ]; then
if [ -d "$_root/.specify" ]; then
REPO_ROOT="$_root"
else
git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then
REPO_ROOT="$git_root"
else
REPO_ROOT="$_root"
fi
fi
fi
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" kiro-cli

View File

@@ -1,265 +0,0 @@
"""Hash-tracked installation manifest for integrations.
Each installed integration records the files it created together with
their SHA-256 hashes. On uninstall only files whose hash still matches
the recorded value are removed — modified files are left in place and
reported to the caller.
"""
from __future__ import annotations
import hashlib
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
def _sha256(path: Path) -> str:
"""Return the hex SHA-256 digest of *path*."""
h = hashlib.sha256()
with open(path, "rb") as fh:
for chunk in iter(lambda: fh.read(8192), b""):
h.update(chunk)
return h.hexdigest()
def _validate_rel_path(rel: Path, root: Path) -> Path:
"""Resolve *rel* against *root* and verify it stays within *root*.
Raises ``ValueError`` if *rel* is absolute, contains ``..`` segments
that escape *root*, or otherwise resolves outside the project root.
"""
if rel.is_absolute():
raise ValueError(
f"Absolute paths are not allowed in manifests: {rel}"
)
resolved = (root / rel).resolve()
root_resolved = root.resolve()
try:
resolved.relative_to(root_resolved)
except ValueError:
raise ValueError(
f"Path {rel} resolves to {resolved} which is outside "
f"the project root {root_resolved}"
) from None
return resolved
class IntegrationManifest:
"""Tracks files installed by a single integration.
Parameters:
key: Integration identifier (e.g. ``"copilot"``).
project_root: Absolute path to the project directory.
version: CLI version string recorded in the manifest.
"""
def __init__(self, key: str, project_root: Path, version: str = "") -> None:
self.key = key
self.project_root = project_root.resolve()
self.version = version
self._files: dict[str, str] = {} # rel_path → sha256 hex
self._installed_at: str = ""
# -- Manifest file location -------------------------------------------
@property
def manifest_path(self) -> Path:
"""Path to the on-disk manifest JSON."""
return self.project_root / ".specify" / "integrations" / f"{self.key}.manifest.json"
# -- Recording files --------------------------------------------------
def record_file(self, rel_path: str | Path, content: bytes | str) -> Path:
"""Write *content* to *rel_path* (relative to project root) and record its hash.
Creates parent directories as needed. Returns the absolute path
of the written file.
Raises ``ValueError`` if *rel_path* resolves outside the project root.
"""
rel = Path(rel_path)
abs_path = _validate_rel_path(rel, self.project_root)
abs_path.parent.mkdir(parents=True, exist_ok=True)
if isinstance(content, str):
content = content.encode("utf-8")
abs_path.write_bytes(content)
normalized = abs_path.relative_to(self.project_root).as_posix()
self._files[normalized] = hashlib.sha256(content).hexdigest()
return abs_path
def record_existing(self, rel_path: str | Path) -> None:
"""Record the hash of an already-existing file at *rel_path*.
Raises ``ValueError`` if *rel_path* resolves outside the project root.
"""
rel = Path(rel_path)
abs_path = _validate_rel_path(rel, self.project_root)
normalized = abs_path.relative_to(self.project_root).as_posix()
self._files[normalized] = _sha256(abs_path)
# -- Querying ---------------------------------------------------------
@property
def files(self) -> dict[str, str]:
"""Return a copy of the ``{rel_path: sha256}`` mapping."""
return dict(self._files)
def check_modified(self) -> list[str]:
"""Return relative paths of tracked files whose content changed on disk."""
modified: list[str] = []
for rel, expected_hash in self._files.items():
rel_path = Path(rel)
# Skip paths that are absolute or attempt to escape the project root
if rel_path.is_absolute() or ".." in rel_path.parts:
continue
abs_path = self.project_root / rel_path
if not abs_path.exists() and not abs_path.is_symlink():
continue
# Treat symlinks and non-regular-files as modified
if abs_path.is_symlink() or not abs_path.is_file():
modified.append(rel)
continue
if _sha256(abs_path) != expected_hash:
modified.append(rel)
return modified
# -- Uninstall --------------------------------------------------------
def uninstall(
self,
project_root: Path | None = None,
*,
force: bool = False,
) -> tuple[list[Path], list[Path]]:
"""Remove tracked files whose hash still matches.
Parameters:
project_root: Override for the project root.
force: If ``True``, remove files even if modified.
Returns:
``(removed, skipped)`` — absolute paths.
"""
root = (project_root or self.project_root).resolve()
removed: list[Path] = []
skipped: list[Path] = []
for rel, expected_hash in self._files.items():
# Use non-resolved path for deletion so symlinks themselves
# are removed, not their targets.
path = root / rel
# Validate containment lexically (without following symlinks)
# by collapsing .. segments via Path resolution on the string parts.
try:
normed = Path(os.path.normpath(path))
normed.relative_to(root)
except (ValueError, OSError):
continue
if not path.exists() and not path.is_symlink():
continue
# Skip directories — manifest only tracks files
if not path.is_file() and not path.is_symlink():
skipped.append(path)
continue
# Never follow symlinks when comparing hashes. Only remove
# symlinks when forced, to avoid acting on tampered entries.
if path.is_symlink():
if not force:
skipped.append(path)
continue
else:
if not force and _sha256(path) != expected_hash:
skipped.append(path)
continue
try:
path.unlink()
except OSError:
skipped.append(path)
continue
removed.append(path)
# Clean up empty parent directories up to project root
parent = path.parent
while parent != root:
try:
parent.rmdir() # only succeeds if empty
except OSError:
break
parent = parent.parent
# Remove the manifest file itself
manifest = root / ".specify" / "integrations" / f"{self.key}.manifest.json"
if manifest.exists():
manifest.unlink()
parent = manifest.parent
while parent != root:
try:
parent.rmdir()
except OSError:
break
parent = parent.parent
return removed, skipped
# -- Persistence ------------------------------------------------------
def save(self) -> Path:
"""Write the manifest to disk. Returns the manifest path."""
self._installed_at = self._installed_at or datetime.now(timezone.utc).isoformat()
data: dict[str, Any] = {
"integration": self.key,
"version": self.version,
"installed_at": self._installed_at,
"files": self._files,
}
path = self.manifest_path
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
return path
@classmethod
def load(cls, key: str, project_root: Path) -> IntegrationManifest:
"""Load an existing manifest from disk.
Raises ``FileNotFoundError`` if the manifest does not exist.
"""
inst = cls(key, project_root)
path = inst.manifest_path
try:
data = json.loads(path.read_text(encoding="utf-8"))
except json.JSONDecodeError as exc:
raise ValueError(
f"Integration manifest at {path} contains invalid JSON"
) from exc
if not isinstance(data, dict):
raise ValueError(
f"Integration manifest at {path} must be a JSON object, "
f"got {type(data).__name__}"
)
files = data.get("files", {})
if not isinstance(files, dict) or not all(
isinstance(k, str) and isinstance(v, str) for k, v in files.items()
):
raise ValueError(
f"Integration manifest 'files' at {path} must be a "
"mapping of string paths to string hashes"
)
inst.version = data.get("version", "")
inst._installed_at = data.get("installed_at", "")
inst._files = files
stored_key = data.get("integration", "")
if stored_key and stored_key != key:
raise ValueError(
f"Manifest at {path} belongs to integration {stored_key!r}, "
f"not {key!r}"
)
return inst

Some files were not shown because too many files have changed in this diff Show More