mirror of
https://github.com/github/spec-kit.git
synced 2026-03-20 20:33:08 +00:00
Compare commits
13 Commits
main
...
f5f8311415
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5f8311415 | ||
|
|
6da1375396 | ||
|
|
1c143e64b1 | ||
|
|
da6e7d2283 | ||
|
|
3ffef55954 | ||
|
|
52f137ce84 | ||
|
|
445eefe5ba | ||
|
|
35ced30747 | ||
|
|
914a06a89f | ||
|
|
abf4aebdb3 | ||
|
|
6003a232d8 | ||
|
|
2e8a4d6432 | ||
|
|
65ecaa9fe4 |
@@ -8,15 +8,15 @@ run_command() {
|
|||||||
local command_to_run="$*"
|
local command_to_run="$*"
|
||||||
local output
|
local output
|
||||||
local exit_code
|
local exit_code
|
||||||
|
|
||||||
# Capture all output (stdout and stderr)
|
# Capture all output (stdout and stderr)
|
||||||
output=$(eval "$command_to_run" 2>&1) || exit_code=$?
|
output=$(eval "$command_to_run" 2>&1) || exit_code=$?
|
||||||
exit_code=${exit_code:-0}
|
exit_code=${exit_code:-0}
|
||||||
|
|
||||||
if [ $exit_code -ne 0 ]; then
|
if [ $exit_code -ne 0 ]; then
|
||||||
echo -e "\033[0;31m[ERROR] Command failed (Exit Code $exit_code): $command_to_run\033[0m" >&2
|
echo -e "\033[0;31m[ERROR] Command failed (Exit Code $exit_code): $command_to_run\033[0m" >&2
|
||||||
echo -e "\033[0;31m$output\033[0m" >&2
|
echo -e "\033[0;31m$output\033[0m" >&2
|
||||||
|
|
||||||
exit $exit_code
|
exit $exit_code
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -51,17 +51,9 @@ echo -e "\n🤖 Installing OpenCode CLI..."
|
|||||||
run_command "npm install -g opencode-ai@latest"
|
run_command "npm install -g opencode-ai@latest"
|
||||||
echo "✅ Done"
|
echo "✅ Done"
|
||||||
|
|
||||||
echo -e "\n🤖 Installing Junie CLI..."
|
|
||||||
run_command "npm install -g @jetbrains/junie-cli@latest"
|
|
||||||
echo "✅ Done"
|
|
||||||
|
|
||||||
echo -e "\n🤖 Installing Pi Coding Agent..."
|
|
||||||
run_command "npm install -g @mariozechner/pi-coding-agent@latest"
|
|
||||||
echo "✅ Done"
|
|
||||||
|
|
||||||
echo -e "\n🤖 Installing Kiro CLI..."
|
echo -e "\n🤖 Installing Kiro CLI..."
|
||||||
# https://kiro.dev/docs/cli/
|
# https://kiro.dev/docs/cli/
|
||||||
KIRO_INSTALLER_URL="https://kiro.dev/install.sh"
|
KIRO_INSTALLER_URL="https://cli.kiro.dev/install"
|
||||||
KIRO_INSTALLER_SHA256="7487a65cf310b7fb59b357c4b5e6e3f3259d383f4394ecedb39acf70f307cffb"
|
KIRO_INSTALLER_SHA256="7487a65cf310b7fb59b357c4b5e6e3f3259d383f4394ecedb39acf70f307cffb"
|
||||||
KIRO_INSTALLER_PATH="$(mktemp)"
|
KIRO_INSTALLER_PATH="$(mktemp)"
|
||||||
|
|
||||||
@@ -88,11 +80,6 @@ fi
|
|||||||
run_command "$kiro_binary --help > /dev/null"
|
run_command "$kiro_binary --help > /dev/null"
|
||||||
echo "✅ Done"
|
echo "✅ Done"
|
||||||
|
|
||||||
echo -e "\n🤖 Installing Kimi CLI..."
|
|
||||||
# https://code.kimi.com
|
|
||||||
run_command "pipx install kimi-cli"
|
|
||||||
echo "✅ Done"
|
|
||||||
|
|
||||||
echo -e "\n🤖 Installing CodeBuddy CLI..."
|
echo -e "\n🤖 Installing CodeBuddy CLI..."
|
||||||
run_command "npm install -g @tencent-ai/codebuddy-code@latest"
|
run_command "npm install -g @tencent-ai/codebuddy-code@latest"
|
||||||
echo "✅ Done"
|
echo "✅ Done"
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/agent_request.yml
vendored
2
.github/ISSUE_TEMPLATE/agent_request.yml
vendored
@@ -8,7 +8,7 @@ body:
|
|||||||
value: |
|
value: |
|
||||||
Thanks for requesting a new agent! Before submitting, please check if the agent is already supported.
|
Thanks for requesting a new agent! Before submitting, please check if the agent is already supported.
|
||||||
|
|
||||||
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, Tabnine CLI, Antigravity, IBM Bob, Mistral Vibe, Kimi Code, Trae, Pi Coding Agent, iFlow CLI
|
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, IBM Bob, Antigravity
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: agent-name
|
id: agent-name
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -7,7 +7,7 @@ contact_links:
|
|||||||
url: https://github.com/github/spec-kit/blob/main/README.md
|
url: https://github.com/github/spec-kit/blob/main/README.md
|
||||||
about: Read the Spec Kit documentation and guides
|
about: Read the Spec Kit documentation and guides
|
||||||
- name: 🛠️ Extension Development Guide
|
- name: 🛠️ Extension Development Guide
|
||||||
url: https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-DEVELOPMENT-GUIDE.md
|
url: https://github.com/manfredseee/spec-kit/blob/main/extensions/EXTENSION-DEVELOPMENT-GUIDE.md
|
||||||
about: Learn how to develop and publish Spec Kit extensions
|
about: Learn how to develop and publish Spec Kit extensions
|
||||||
- name: 🤝 Contributing Guide
|
- name: 🤝 Contributing Guide
|
||||||
url: https://github.com/github/spec-kit/blob/main/CONTRIBUTING.md
|
url: https://github.com/github/spec-kit/blob/main/CONTRIBUTING.md
|
||||||
|
|||||||
169
.github/ISSUE_TEMPLATE/preset_submission.yml
vendored
169
.github/ISSUE_TEMPLATE/preset_submission.yml
vendored
@@ -1,169 +0,0 @@
|
|||||||
name: Preset Submission
|
|
||||||
description: Submit your preset to the Spec Kit preset catalog
|
|
||||||
title: "[Preset]: Add "
|
|
||||||
labels: ["preset-submission", "enhancement", "needs-triage"]
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thanks for contributing a preset! This template helps you submit your preset to the community catalog.
|
|
||||||
|
|
||||||
**Before submitting:**
|
|
||||||
- Review the [Preset Publishing Guide](https://github.com/github/spec-kit/blob/main/presets/PUBLISHING.md)
|
|
||||||
- Ensure your preset has a valid `preset.yml` manifest
|
|
||||||
- Create a GitHub release with a version tag (e.g., v1.0.0)
|
|
||||||
- Test installation from the release archive: `specify preset add --from <download-url>`
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: preset-id
|
|
||||||
attributes:
|
|
||||||
label: Preset ID
|
|
||||||
description: Unique preset identifier (lowercase with hyphens only)
|
|
||||||
placeholder: "e.g., healthcare-compliance"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: preset-name
|
|
||||||
attributes:
|
|
||||||
label: Preset Name
|
|
||||||
description: Human-readable preset name
|
|
||||||
placeholder: "e.g., Healthcare Compliance"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: version
|
|
||||||
attributes:
|
|
||||||
label: Version
|
|
||||||
description: Semantic version number
|
|
||||||
placeholder: "e.g., 1.0.0"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
description: Brief description of what your preset does (under 200 characters)
|
|
||||||
placeholder: Enforces HIPAA-compliant spec workflows with audit templates and compliance checklists
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: author
|
|
||||||
attributes:
|
|
||||||
label: Author
|
|
||||||
description: Your name or organization
|
|
||||||
placeholder: "e.g., John Doe or Acme Corp"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: repository
|
|
||||||
attributes:
|
|
||||||
label: Repository URL
|
|
||||||
description: GitHub repository URL for your preset
|
|
||||||
placeholder: "https://github.com/your-org/spec-kit-your-preset"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: download-url
|
|
||||||
attributes:
|
|
||||||
label: Download URL
|
|
||||||
description: URL to the GitHub release archive for your preset (e.g., https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip)
|
|
||||||
placeholder: "https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: license
|
|
||||||
attributes:
|
|
||||||
label: License
|
|
||||||
description: Open source license type
|
|
||||||
placeholder: "e.g., MIT, Apache-2.0"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: speckit-version
|
|
||||||
attributes:
|
|
||||||
label: Required Spec Kit Version
|
|
||||||
description: Minimum Spec Kit version required
|
|
||||||
placeholder: "e.g., >=0.3.0"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: templates-provided
|
|
||||||
attributes:
|
|
||||||
label: Templates Provided
|
|
||||||
description: List the template overrides your preset provides
|
|
||||||
placeholder: |
|
|
||||||
- spec-template.md — adds compliance section
|
|
||||||
- plan-template.md — includes audit checkpoints
|
|
||||||
- checklist-template.md — HIPAA compliance checklist
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: commands-provided
|
|
||||||
attributes:
|
|
||||||
label: Commands Provided (optional)
|
|
||||||
description: List any command overrides your preset provides
|
|
||||||
placeholder: |
|
|
||||||
- speckit.specify.md — customized for compliance workflows
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: tags
|
|
||||||
attributes:
|
|
||||||
label: Tags
|
|
||||||
description: 2-5 relevant tags (lowercase, separated by commas)
|
|
||||||
placeholder: "compliance, healthcare, hipaa, audit"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: features
|
|
||||||
attributes:
|
|
||||||
label: Key Features
|
|
||||||
description: List the main features and capabilities of your preset
|
|
||||||
placeholder: |
|
|
||||||
- HIPAA-compliant spec templates
|
|
||||||
- Audit trail checklists
|
|
||||||
- Compliance review workflow
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: checkboxes
|
|
||||||
id: testing
|
|
||||||
attributes:
|
|
||||||
label: Testing Checklist
|
|
||||||
description: Confirm that your preset has been tested
|
|
||||||
options:
|
|
||||||
- label: Preset installs successfully via `specify preset add`
|
|
||||||
required: true
|
|
||||||
- label: Template resolution works correctly after installation
|
|
||||||
required: true
|
|
||||||
- label: Documentation is complete and accurate
|
|
||||||
required: true
|
|
||||||
- label: Tested on at least one real project
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: checkboxes
|
|
||||||
id: requirements
|
|
||||||
attributes:
|
|
||||||
label: Submission Requirements
|
|
||||||
description: Verify your preset meets all requirements
|
|
||||||
options:
|
|
||||||
- label: Valid `preset.yml` manifest included
|
|
||||||
required: true
|
|
||||||
- label: README.md with description and usage instructions
|
|
||||||
required: true
|
|
||||||
- label: LICENSE file included
|
|
||||||
required: true
|
|
||||||
- label: GitHub release created with version tag
|
|
||||||
required: true
|
|
||||||
- label: Preset ID follows naming conventions (lowercase-with-hyphens)
|
|
||||||
required: true
|
|
||||||
8
.github/workflows/release-trigger.yml
vendored
8
.github/workflows/release-trigger.yml
vendored
@@ -86,10 +86,8 @@ jobs:
|
|||||||
if [ -f "CHANGELOG.md" ]; then
|
if [ -f "CHANGELOG.md" ]; then
|
||||||
DATE=$(date +%Y-%m-%d)
|
DATE=$(date +%Y-%m-%d)
|
||||||
|
|
||||||
# Get the previous tag by sorting all version tags numerically
|
# Get the previous tag to compare commits
|
||||||
# (git describe --tags only finds tags reachable from HEAD,
|
PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
|
||||||
# which misses tags on unmerged release branches)
|
|
||||||
PREVIOUS_TAG=$(git tag -l 'v*' --sort=-version:refname | head -n 1)
|
|
||||||
|
|
||||||
echo "Generating changelog from commits..."
|
echo "Generating changelog from commits..."
|
||||||
if [[ -n "$PREVIOUS_TAG" ]]; then
|
if [[ -n "$PREVIOUS_TAG" ]]; then
|
||||||
@@ -106,7 +104,7 @@ jobs:
|
|||||||
echo ""
|
echo ""
|
||||||
echo "## [${{ steps.version.outputs.version }}] - $DATE"
|
echo "## [${{ steps.version.outputs.version }}] - $DATE"
|
||||||
echo ""
|
echo ""
|
||||||
echo "### Changes"
|
echo "### Changed"
|
||||||
echo ""
|
echo ""
|
||||||
echo "$COMMITS"
|
echo "$COMMITS"
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
10
.github/workflows/scripts/create-github-release.sh
vendored
Executable file → Normal file
10
.github/workflows/scripts/create-github-release.sh
vendored
Executable file → Normal file
@@ -30,8 +30,6 @@ gh release create "$VERSION" \
|
|||||||
.genreleases/spec-kit-template-qwen-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-qwen-ps-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-windsurf-sh-"$VERSION".zip \
|
.genreleases/spec-kit-template-windsurf-sh-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-windsurf-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-windsurf-ps-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-junie-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-junie-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-codex-sh-"$VERSION".zip \
|
.genreleases/spec-kit-template-codex-sh-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-codex-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-codex-ps-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-kilocode-sh-"$VERSION".zip \
|
.genreleases/spec-kit-template-kilocode-sh-"$VERSION".zip \
|
||||||
@@ -58,14 +56,6 @@ gh release create "$VERSION" \
|
|||||||
.genreleases/spec-kit-template-bob-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-bob-ps-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-vibe-sh-"$VERSION".zip \
|
.genreleases/spec-kit-template-vibe-sh-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-vibe-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-vibe-ps-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-kimi-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-kimi-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-trae-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-trae-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-pi-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-pi-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-iflow-sh-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-iflow-ps-"$VERSION".zip \
|
|
||||||
.genreleases/spec-kit-template-generic-sh-"$VERSION".zip \
|
.genreleases/spec-kit-template-generic-sh-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-generic-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-generic-ps-"$VERSION".zip \
|
||||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||||
|
|||||||
@@ -8,13 +8,13 @@
|
|||||||
.DESCRIPTION
|
.DESCRIPTION
|
||||||
create-release-packages.ps1 (workflow-local)
|
create-release-packages.ps1 (workflow-local)
|
||||||
Build Spec Kit template release archives for each supported AI assistant and script type.
|
Build Spec Kit template release archives for each supported AI assistant and script type.
|
||||||
|
|
||||||
.PARAMETER Version
|
.PARAMETER Version
|
||||||
Version string with leading 'v' (e.g., v0.2.0)
|
Version string with leading 'v' (e.g., v0.2.0)
|
||||||
|
|
||||||
.PARAMETER Agents
|
.PARAMETER Agents
|
||||||
Comma or space separated subset of agents to build (default: all)
|
Comma or space separated subset of agents to build (default: all)
|
||||||
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, junie, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, kimi, trae, pi, iflow, generic
|
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, generic
|
||||||
|
|
||||||
.PARAMETER Scripts
|
.PARAMETER Scripts
|
||||||
Comma or space separated subset of script types to build (default: both)
|
Comma or space separated subset of script types to build (default: both)
|
||||||
@@ -33,10 +33,10 @@
|
|||||||
param(
|
param(
|
||||||
[Parameter(Mandatory=$true, Position=0)]
|
[Parameter(Mandatory=$true, Position=0)]
|
||||||
[string]$Version,
|
[string]$Version,
|
||||||
|
|
||||||
[Parameter(Mandatory=$false)]
|
[Parameter(Mandatory=$false)]
|
||||||
[string]$Agents = "",
|
[string]$Agents = "",
|
||||||
|
|
||||||
[Parameter(Mandatory=$false)]
|
[Parameter(Mandatory=$false)]
|
||||||
[string]$Scripts = ""
|
[string]$Scripts = ""
|
||||||
)
|
)
|
||||||
@@ -60,7 +60,7 @@ New-Item -ItemType Directory -Path $GenReleasesDir -Force | Out-Null
|
|||||||
|
|
||||||
function Rewrite-Paths {
|
function Rewrite-Paths {
|
||||||
param([string]$Content)
|
param([string]$Content)
|
||||||
|
|
||||||
$Content = $Content -replace '(/?)\bmemory/', '.specify/memory/'
|
$Content = $Content -replace '(/?)\bmemory/', '.specify/memory/'
|
||||||
$Content = $Content -replace '(/?)\bscripts/', '.specify/scripts/'
|
$Content = $Content -replace '(/?)\bscripts/', '.specify/scripts/'
|
||||||
$Content = $Content -replace '(/?)\btemplates/', '.specify/templates/'
|
$Content = $Content -replace '(/?)\btemplates/', '.specify/templates/'
|
||||||
@@ -75,55 +75,55 @@ function Generate-Commands {
|
|||||||
[string]$OutputDir,
|
[string]$OutputDir,
|
||||||
[string]$ScriptVariant
|
[string]$ScriptVariant
|
||||||
)
|
)
|
||||||
|
|
||||||
New-Item -ItemType Directory -Path $OutputDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $OutputDir -Force | Out-Null
|
||||||
|
|
||||||
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
|
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
|
||||||
|
|
||||||
foreach ($template in $templates) {
|
foreach ($template in $templates) {
|
||||||
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
|
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
|
||||||
|
|
||||||
# Read file content and normalize line endings
|
# Read file content and normalize line endings
|
||||||
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
|
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
|
||||||
|
|
||||||
# Extract description from YAML frontmatter
|
# Extract description from YAML frontmatter
|
||||||
$description = ""
|
$description = ""
|
||||||
if ($fileContent -match '(?m)^description:\s*(.+)$') {
|
if ($fileContent -match '(?m)^description:\s*(.+)$') {
|
||||||
$description = $matches[1]
|
$description = $matches[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Extract script command from YAML frontmatter
|
# Extract script command from YAML frontmatter
|
||||||
$scriptCommand = ""
|
$scriptCommand = ""
|
||||||
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
|
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
|
||||||
$scriptCommand = $matches[1]
|
$scriptCommand = $matches[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if ([string]::IsNullOrEmpty($scriptCommand)) {
|
if ([string]::IsNullOrEmpty($scriptCommand)) {
|
||||||
Write-Warning "No script command found for $ScriptVariant in $($template.Name)"
|
Write-Warning "No script command found for $ScriptVariant in $($template.Name)"
|
||||||
$scriptCommand = "(Missing script command for $ScriptVariant)"
|
$scriptCommand = "(Missing script command for $ScriptVariant)"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Extract agent_script command from YAML frontmatter if present
|
# Extract agent_script command from YAML frontmatter if present
|
||||||
$agentScriptCommand = ""
|
$agentScriptCommand = ""
|
||||||
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
|
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
|
||||||
$agentScriptCommand = $matches[1].Trim()
|
$agentScriptCommand = $matches[1].Trim()
|
||||||
}
|
}
|
||||||
|
|
||||||
# Replace {SCRIPT} placeholder with the script command
|
# Replace {SCRIPT} placeholder with the script command
|
||||||
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
|
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
|
||||||
|
|
||||||
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
|
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
|
||||||
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
|
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
|
||||||
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
|
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
|
||||||
}
|
}
|
||||||
|
|
||||||
# Remove the scripts: and agent_scripts: sections from frontmatter
|
# Remove the scripts: and agent_scripts: sections from frontmatter
|
||||||
$lines = $body -split "`n"
|
$lines = $body -split "`n"
|
||||||
$outputLines = @()
|
$outputLines = @()
|
||||||
$inFrontmatter = $false
|
$inFrontmatter = $false
|
||||||
$skipScripts = $false
|
$skipScripts = $false
|
||||||
$dashCount = 0
|
$dashCount = 0
|
||||||
|
|
||||||
foreach ($line in $lines) {
|
foreach ($line in $lines) {
|
||||||
if ($line -match '^---$') {
|
if ($line -match '^---$') {
|
||||||
$outputLines += $line
|
$outputLines += $line
|
||||||
@@ -135,7 +135,7 @@ function Generate-Commands {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ($inFrontmatter) {
|
if ($inFrontmatter) {
|
||||||
if ($line -match '^(scripts|agent_scripts):$') {
|
if ($line -match '^(scripts|agent_scripts):$') {
|
||||||
$skipScripts = $true
|
$skipScripts = $true
|
||||||
@@ -148,20 +148,20 @@ function Generate-Commands {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
$outputLines += $line
|
$outputLines += $line
|
||||||
}
|
}
|
||||||
|
|
||||||
$body = $outputLines -join "`n"
|
$body = $outputLines -join "`n"
|
||||||
|
|
||||||
# Apply other substitutions
|
# Apply other substitutions
|
||||||
$body = $body -replace '\{ARGS\}', $ArgFormat
|
$body = $body -replace '\{ARGS\}', $ArgFormat
|
||||||
$body = $body -replace '__AGENT__', $Agent
|
$body = $body -replace '__AGENT__', $Agent
|
||||||
$body = Rewrite-Paths -Content $body
|
$body = Rewrite-Paths -Content $body
|
||||||
|
|
||||||
# Generate output file based on extension
|
# Generate output file based on extension
|
||||||
$outputFile = Join-Path $OutputDir "speckit.$name.$Extension"
|
$outputFile = Join-Path $OutputDir "speckit.$name.$Extension"
|
||||||
|
|
||||||
switch ($Extension) {
|
switch ($Extension) {
|
||||||
'toml' {
|
'toml' {
|
||||||
$body = $body -replace '\\', '\\'
|
$body = $body -replace '\\', '\\'
|
||||||
@@ -183,15 +183,15 @@ function Generate-CopilotPrompts {
|
|||||||
[string]$AgentsDir,
|
[string]$AgentsDir,
|
||||||
[string]$PromptsDir
|
[string]$PromptsDir
|
||||||
)
|
)
|
||||||
|
|
||||||
New-Item -ItemType Directory -Path $PromptsDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $PromptsDir -Force | Out-Null
|
||||||
|
|
||||||
$agentFiles = Get-ChildItem -Path "$AgentsDir/speckit.*.agent.md" -File -ErrorAction SilentlyContinue
|
$agentFiles = Get-ChildItem -Path "$AgentsDir/speckit.*.agent.md" -File -ErrorAction SilentlyContinue
|
||||||
|
|
||||||
foreach ($agentFile in $agentFiles) {
|
foreach ($agentFile in $agentFiles) {
|
||||||
$basename = $agentFile.Name -replace '\.agent\.md$', ''
|
$basename = $agentFile.Name -replace '\.agent\.md$', ''
|
||||||
$promptFile = Join-Path $PromptsDir "$basename.prompt.md"
|
$promptFile = Join-Path $PromptsDir "$basename.prompt.md"
|
||||||
|
|
||||||
$content = @"
|
$content = @"
|
||||||
---
|
---
|
||||||
agent: $basename
|
agent: $basename
|
||||||
@@ -201,124 +201,31 @@ agent: $basename
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create skills in <skills_dir>\<name>\SKILL.md format.
|
|
||||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
|
||||||
# current dotted-name exception (e.g. speckit.plan).
|
|
||||||
#
|
|
||||||
# Technical debt note:
|
|
||||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
|
||||||
# overrides (at minimum: name/description/compatibility/metadata.{author,source}).
|
|
||||||
function New-Skills {
|
|
||||||
param(
|
|
||||||
[string]$SkillsDir,
|
|
||||||
[string]$ScriptVariant,
|
|
||||||
[string]$AgentName,
|
|
||||||
[string]$Separator = '-'
|
|
||||||
)
|
|
||||||
|
|
||||||
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
|
|
||||||
|
|
||||||
foreach ($template in $templates) {
|
|
||||||
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
|
|
||||||
$skillName = "speckit${Separator}$name"
|
|
||||||
$skillDir = Join-Path $SkillsDir $skillName
|
|
||||||
New-Item -ItemType Directory -Force -Path $skillDir | Out-Null
|
|
||||||
|
|
||||||
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
|
|
||||||
|
|
||||||
# Extract description
|
|
||||||
$description = "Spec Kit: $name workflow"
|
|
||||||
if ($fileContent -match '(?m)^description:\s*(.+)$') {
|
|
||||||
$description = $matches[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Extract script command
|
|
||||||
$scriptCommand = "(Missing script command for $ScriptVariant)"
|
|
||||||
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
|
|
||||||
$scriptCommand = $matches[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Extract agent_script command from frontmatter if present
|
|
||||||
$agentScriptCommand = ""
|
|
||||||
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
|
|
||||||
$agentScriptCommand = $matches[1].Trim()
|
|
||||||
}
|
|
||||||
|
|
||||||
# Replace {SCRIPT}, strip scripts sections, rewrite paths
|
|
||||||
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
|
|
||||||
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
|
|
||||||
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
|
|
||||||
}
|
|
||||||
|
|
||||||
$lines = $body -split "`n"
|
|
||||||
$outputLines = @()
|
|
||||||
$inFrontmatter = $false
|
|
||||||
$skipScripts = $false
|
|
||||||
$dashCount = 0
|
|
||||||
|
|
||||||
foreach ($line in $lines) {
|
|
||||||
if ($line -match '^---$') {
|
|
||||||
$outputLines += $line
|
|
||||||
$dashCount++
|
|
||||||
$inFrontmatter = ($dashCount -eq 1)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ($inFrontmatter) {
|
|
||||||
if ($line -match '^(scripts|agent_scripts):$') { $skipScripts = $true; continue }
|
|
||||||
if ($line -match '^[a-zA-Z].*:' -and $skipScripts) { $skipScripts = $false }
|
|
||||||
if ($skipScripts -and $line -match '^\s+') { continue }
|
|
||||||
}
|
|
||||||
$outputLines += $line
|
|
||||||
}
|
|
||||||
|
|
||||||
$body = $outputLines -join "`n"
|
|
||||||
$body = $body -replace '\{ARGS\}', '$ARGUMENTS'
|
|
||||||
$body = $body -replace '__AGENT__', $AgentName
|
|
||||||
$body = Rewrite-Paths -Content $body
|
|
||||||
|
|
||||||
# Strip existing frontmatter, keep only body
|
|
||||||
$templateBody = ""
|
|
||||||
$fmCount = 0
|
|
||||||
$inBody = $false
|
|
||||||
foreach ($line in ($body -split "`n")) {
|
|
||||||
if ($line -match '^---$') {
|
|
||||||
$fmCount++
|
|
||||||
if ($fmCount -eq 2) { $inBody = $true }
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ($inBody) { $templateBody += "$line`n" }
|
|
||||||
}
|
|
||||||
|
|
||||||
$skillContent = "---`nname: `"$skillName`"`ndescription: `"$description`"`ncompatibility: `"Requires spec-kit project structure with .specify/ directory`"`nmetadata:`n author: `"github-spec-kit`"`n source: `"templates/commands/$name.md`"`n---`n`n$templateBody"
|
|
||||||
Set-Content -Path (Join-Path $skillDir "SKILL.md") -Value $skillContent -NoNewline
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function Build-Variant {
|
function Build-Variant {
|
||||||
param(
|
param(
|
||||||
[string]$Agent,
|
[string]$Agent,
|
||||||
[string]$Script
|
[string]$Script
|
||||||
)
|
)
|
||||||
|
|
||||||
$baseDir = Join-Path $GenReleasesDir "sdd-${Agent}-package-${Script}"
|
$baseDir = Join-Path $GenReleasesDir "sdd-${Agent}-package-${Script}"
|
||||||
Write-Host "Building $Agent ($Script) package..."
|
Write-Host "Building $Agent ($Script) package..."
|
||||||
New-Item -ItemType Directory -Path $baseDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $baseDir -Force | Out-Null
|
||||||
|
|
||||||
# Copy base structure but filter scripts by variant
|
# Copy base structure but filter scripts by variant
|
||||||
$specDir = Join-Path $baseDir ".specify"
|
$specDir = Join-Path $baseDir ".specify"
|
||||||
New-Item -ItemType Directory -Path $specDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $specDir -Force | Out-Null
|
||||||
|
|
||||||
# Copy memory directory
|
# Copy memory directory
|
||||||
if (Test-Path "memory") {
|
if (Test-Path "memory") {
|
||||||
Copy-Item -Path "memory" -Destination $specDir -Recurse -Force
|
Copy-Item -Path "memory" -Destination $specDir -Recurse -Force
|
||||||
Write-Host "Copied memory -> .specify"
|
Write-Host "Copied memory -> .specify"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only copy the relevant script variant directory
|
# Only copy the relevant script variant directory
|
||||||
if (Test-Path "scripts") {
|
if (Test-Path "scripts") {
|
||||||
$scriptsDestDir = Join-Path $specDir "scripts"
|
$scriptsDestDir = Join-Path $specDir "scripts"
|
||||||
New-Item -ItemType Directory -Path $scriptsDestDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $scriptsDestDir -Force | Out-Null
|
||||||
|
|
||||||
switch ($Script) {
|
switch ($Script) {
|
||||||
'sh' {
|
'sh' {
|
||||||
if (Test-Path "scripts/bash") {
|
if (Test-Path "scripts/bash") {
|
||||||
@@ -333,17 +240,18 @@ function Build-Variant {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Copy any script files that aren't in variant-specific directories
|
||||||
Get-ChildItem -Path "scripts" -File -ErrorAction SilentlyContinue | ForEach-Object {
|
Get-ChildItem -Path "scripts" -File -ErrorAction SilentlyContinue | ForEach-Object {
|
||||||
Copy-Item -Path $_.FullName -Destination $scriptsDestDir -Force
|
Copy-Item -Path $_.FullName -Destination $scriptsDestDir -Force
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Copy templates (excluding commands directory and vscode-settings.json)
|
# Copy templates (excluding commands directory and vscode-settings.json)
|
||||||
if (Test-Path "templates") {
|
if (Test-Path "templates") {
|
||||||
$templatesDestDir = Join-Path $specDir "templates"
|
$templatesDestDir = Join-Path $specDir "templates"
|
||||||
New-Item -ItemType Directory -Path $templatesDestDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $templatesDestDir -Force | Out-Null
|
||||||
|
|
||||||
Get-ChildItem -Path "templates" -Recurse -File | Where-Object {
|
Get-ChildItem -Path "templates" -Recurse -File | Where-Object {
|
||||||
$_.FullName -notmatch 'templates[/\\]commands[/\\]' -and $_.Name -ne 'vscode-settings.json'
|
$_.FullName -notmatch 'templates[/\\]commands[/\\]' -and $_.Name -ne 'vscode-settings.json'
|
||||||
} | ForEach-Object {
|
} | ForEach-Object {
|
||||||
@@ -355,7 +263,7 @@ function Build-Variant {
|
|||||||
}
|
}
|
||||||
Write-Host "Copied templates -> .specify/templates"
|
Write-Host "Copied templates -> .specify/templates"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generate agent-specific command files
|
# Generate agent-specific command files
|
||||||
switch ($Agent) {
|
switch ($Agent) {
|
||||||
'claude' {
|
'claude' {
|
||||||
@@ -372,10 +280,12 @@ function Build-Variant {
|
|||||||
'copilot' {
|
'copilot' {
|
||||||
$agentsDir = Join-Path $baseDir ".github/agents"
|
$agentsDir = Join-Path $baseDir ".github/agents"
|
||||||
Generate-Commands -Agent 'copilot' -Extension 'agent.md' -ArgFormat '$ARGUMENTS' -OutputDir $agentsDir -ScriptVariant $Script
|
Generate-Commands -Agent 'copilot' -Extension 'agent.md' -ArgFormat '$ARGUMENTS' -OutputDir $agentsDir -ScriptVariant $Script
|
||||||
|
|
||||||
|
# Generate companion prompt files
|
||||||
$promptsDir = Join-Path $baseDir ".github/prompts"
|
$promptsDir = Join-Path $baseDir ".github/prompts"
|
||||||
Generate-CopilotPrompts -AgentsDir $agentsDir -PromptsDir $promptsDir
|
Generate-CopilotPrompts -AgentsDir $agentsDir -PromptsDir $promptsDir
|
||||||
|
|
||||||
|
# Create VS Code workspace settings
|
||||||
$vscodeDir = Join-Path $baseDir ".vscode"
|
$vscodeDir = Join-Path $baseDir ".vscode"
|
||||||
New-Item -ItemType Directory -Path $vscodeDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $vscodeDir -Force | Out-Null
|
||||||
if (Test-Path "templates/vscode-settings.json") {
|
if (Test-Path "templates/vscode-settings.json") {
|
||||||
@@ -388,7 +298,7 @@ function Build-Variant {
|
|||||||
}
|
}
|
||||||
'qwen' {
|
'qwen' {
|
||||||
$cmdDir = Join-Path $baseDir ".qwen/commands"
|
$cmdDir = Join-Path $baseDir ".qwen/commands"
|
||||||
Generate-Commands -Agent 'qwen' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'qwen' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
if (Test-Path "agent_templates/qwen/QWEN.md") {
|
if (Test-Path "agent_templates/qwen/QWEN.md") {
|
||||||
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
|
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
|
||||||
}
|
}
|
||||||
@@ -401,14 +311,9 @@ function Build-Variant {
|
|||||||
$cmdDir = Join-Path $baseDir ".windsurf/workflows"
|
$cmdDir = Join-Path $baseDir ".windsurf/workflows"
|
||||||
Generate-Commands -Agent 'windsurf' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'windsurf' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
}
|
}
|
||||||
'junie' {
|
|
||||||
$cmdDir = Join-Path $baseDir ".junie/commands"
|
|
||||||
Generate-Commands -Agent 'junie' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
|
||||||
}
|
|
||||||
'codex' {
|
'codex' {
|
||||||
$skillsDir = Join-Path $baseDir ".agents/skills"
|
$cmdDir = Join-Path $baseDir ".codex/prompts"
|
||||||
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
|
Generate-Commands -Agent 'codex' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'codex' -Separator '-'
|
|
||||||
}
|
}
|
||||||
'kilocode' {
|
'kilocode' {
|
||||||
$cmdDir = Join-Path $baseDir ".kilocode/workflows"
|
$cmdDir = Join-Path $baseDir ".kilocode/workflows"
|
||||||
@@ -453,40 +358,22 @@ function Build-Variant {
|
|||||||
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
||||||
}
|
}
|
||||||
'agy' {
|
'agy' {
|
||||||
$cmdDir = Join-Path $baseDir ".agent/commands"
|
$cmdDir = Join-Path $baseDir ".agent/workflows"
|
||||||
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
}
|
}
|
||||||
'vibe' {
|
|
||||||
$cmdDir = Join-Path $baseDir ".vibe/prompts"
|
|
||||||
Generate-Commands -Agent 'vibe' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
|
||||||
}
|
|
||||||
'kimi' {
|
|
||||||
$skillsDir = Join-Path $baseDir ".kimi/skills"
|
|
||||||
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
|
|
||||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'kimi' -Separator '.'
|
|
||||||
}
|
|
||||||
'trae' {
|
|
||||||
$rulesDir = Join-Path $baseDir ".trae/rules"
|
|
||||||
New-Item -ItemType Directory -Force -Path $rulesDir | Out-Null
|
|
||||||
Generate-Commands -Agent 'trae' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $rulesDir -ScriptVariant $Script
|
|
||||||
}
|
|
||||||
'pi' {
|
|
||||||
$cmdDir = Join-Path $baseDir ".pi/prompts"
|
|
||||||
Generate-Commands -Agent 'pi' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
|
||||||
}
|
|
||||||
'iflow' {
|
|
||||||
$cmdDir = Join-Path $baseDir ".iflow/commands"
|
|
||||||
Generate-Commands -Agent 'iflow' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
|
||||||
}
|
|
||||||
'generic' {
|
'generic' {
|
||||||
$cmdDir = Join-Path $baseDir ".speckit/commands"
|
$cmdDir = Join-Path $baseDir ".speckit/commands"
|
||||||
Generate-Commands -Agent 'generic' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'generic' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
}
|
}
|
||||||
|
'vibe' {
|
||||||
|
$cmdDir = Join-Path $baseDir ".vibe/prompts"
|
||||||
|
Generate-Commands -Agent 'vibe' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
|
}
|
||||||
default {
|
default {
|
||||||
throw "Unsupported agent '$Agent'."
|
throw "Unsupported agent '$Agent'."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create zip archive
|
# Create zip archive
|
||||||
$zipFile = Join-Path $GenReleasesDir "spec-kit-template-${Agent}-${Script}-${Version}.zip"
|
$zipFile = Join-Path $GenReleasesDir "spec-kit-template-${Agent}-${Script}-${Version}.zip"
|
||||||
Compress-Archive -Path "$baseDir/*" -DestinationPath $zipFile -Force
|
Compress-Archive -Path "$baseDir/*" -DestinationPath $zipFile -Force
|
||||||
@@ -494,16 +381,17 @@ function Build-Variant {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Define all agents and scripts
|
# Define all agents and scripts
|
||||||
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'junie', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'kimi', 'trae', 'pi', 'iflow', 'generic')
|
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'generic')
|
||||||
$AllScripts = @('sh', 'ps')
|
$AllScripts = @('sh', 'ps')
|
||||||
|
|
||||||
function Normalize-List {
|
function Normalize-List {
|
||||||
param([string]$Input)
|
param([string]$Input)
|
||||||
|
|
||||||
if ([string]::IsNullOrEmpty($Input)) {
|
if ([string]::IsNullOrEmpty($Input)) {
|
||||||
return @()
|
return @()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Split by comma or space and remove duplicates while preserving order
|
||||||
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||||
return $items
|
return $items
|
||||||
}
|
}
|
||||||
@@ -514,7 +402,7 @@ function Validate-Subset {
|
|||||||
[string[]]$Allowed,
|
[string[]]$Allowed,
|
||||||
[string[]]$Items
|
[string[]]$Items
|
||||||
)
|
)
|
||||||
|
|
||||||
$ok = $true
|
$ok = $true
|
||||||
foreach ($item in $Items) {
|
foreach ($item in $Items) {
|
||||||
if ($item -notin $Allowed) {
|
if ($item -notin $Allowed) {
|
||||||
|
|||||||
201
.github/workflows/scripts/create-release-packages.sh
vendored
201
.github/workflows/scripts/create-release-packages.sh
vendored
@@ -6,7 +6,7 @@ set -euo pipefail
|
|||||||
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
||||||
# Version argument should include leading 'v'.
|
# Version argument should include leading 'v'.
|
||||||
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
||||||
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf junie codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi trae pi iflow generic (default: all)
|
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli generic (default: all)
|
||||||
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
||||||
# Examples:
|
# Examples:
|
||||||
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
||||||
@@ -26,27 +26,9 @@ fi
|
|||||||
echo "Building release packages for $NEW_VERSION"
|
echo "Building release packages for $NEW_VERSION"
|
||||||
|
|
||||||
# Create and use .genreleases directory for all build artifacts
|
# Create and use .genreleases directory for all build artifacts
|
||||||
# Override via GENRELEASES_DIR env var (e.g. for tests writing to a temp dir)
|
GENRELEASES_DIR=".genreleases"
|
||||||
GENRELEASES_DIR="${GENRELEASES_DIR:-.genreleases}"
|
|
||||||
|
|
||||||
# Guard against unsafe GENRELEASES_DIR values before cleaning
|
|
||||||
if [[ -z "$GENRELEASES_DIR" ]]; then
|
|
||||||
echo "GENRELEASES_DIR must not be empty" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
case "$GENRELEASES_DIR" in
|
|
||||||
'/'|'.'|'..')
|
|
||||||
echo "Refusing to use unsafe GENRELEASES_DIR value: $GENRELEASES_DIR" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
if [[ "$GENRELEASES_DIR" == *".."* ]]; then
|
|
||||||
echo "Refusing to use GENRELEASES_DIR containing '..' path segments: $GENRELEASES_DIR" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p "$GENRELEASES_DIR"
|
mkdir -p "$GENRELEASES_DIR"
|
||||||
rm -rf "${GENRELEASES_DIR%/}/"* || true
|
rm -rf "$GENRELEASES_DIR"/* || true
|
||||||
|
|
||||||
rewrite_paths() {
|
rewrite_paths() {
|
||||||
sed -E \
|
sed -E \
|
||||||
@@ -63,19 +45,19 @@ generate_commands() {
|
|||||||
[[ -f "$template" ]] || continue
|
[[ -f "$template" ]] || continue
|
||||||
local name description script_command agent_script_command body
|
local name description script_command agent_script_command body
|
||||||
name=$(basename "$template" .md)
|
name=$(basename "$template" .md)
|
||||||
|
|
||||||
# Normalize line endings
|
# Normalize line endings
|
||||||
file_content=$(tr -d '\r' < "$template")
|
file_content=$(tr -d '\r' < "$template")
|
||||||
|
|
||||||
# Extract description and script command from YAML frontmatter
|
# Extract description and script command from YAML frontmatter
|
||||||
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
|
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
|
||||||
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
|
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
|
||||||
|
|
||||||
if [[ -z $script_command ]]; then
|
if [[ -z $script_command ]]; then
|
||||||
echo "Warning: no script command found for $script_variant in $template" >&2
|
echo "Warning: no script command found for $script_variant in $template" >&2
|
||||||
script_command="(Missing script command for $script_variant)"
|
script_command="(Missing script command for $script_variant)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Extract agent_script command from YAML frontmatter if present
|
# Extract agent_script command from YAML frontmatter if present
|
||||||
agent_script_command=$(printf '%s\n' "$file_content" | awk '
|
agent_script_command=$(printf '%s\n' "$file_content" | awk '
|
||||||
/^agent_scripts:$/ { in_agent_scripts=1; next }
|
/^agent_scripts:$/ { in_agent_scripts=1; next }
|
||||||
@@ -86,15 +68,15 @@ generate_commands() {
|
|||||||
}
|
}
|
||||||
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
|
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
|
||||||
')
|
')
|
||||||
|
|
||||||
# Replace {SCRIPT} placeholder with the script command
|
# Replace {SCRIPT} placeholder with the script command
|
||||||
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
|
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
|
||||||
|
|
||||||
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
|
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
|
||||||
if [[ -n $agent_script_command ]]; then
|
if [[ -n $agent_script_command ]]; then
|
||||||
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
|
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Remove the scripts: and agent_scripts: sections from frontmatter while preserving YAML structure
|
# Remove the scripts: and agent_scripts: sections from frontmatter while preserving YAML structure
|
||||||
body=$(printf '%s\n' "$body" | awk '
|
body=$(printf '%s\n' "$body" | awk '
|
||||||
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
|
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
|
||||||
@@ -104,10 +86,10 @@ generate_commands() {
|
|||||||
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
|
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
|
||||||
{ print }
|
{ print }
|
||||||
')
|
')
|
||||||
|
|
||||||
# Apply other substitutions
|
# Apply other substitutions
|
||||||
body=$(printf '%s\n' "$body" | sed "s/{ARGS}/$arg_format/g" | sed "s/__AGENT__/$agent/g" | rewrite_paths)
|
body=$(printf '%s\n' "$body" | sed "s/{ARGS}/$arg_format/g" | sed "s/__AGENT__/$agent/g" | rewrite_paths)
|
||||||
|
|
||||||
case $ext in
|
case $ext in
|
||||||
toml)
|
toml)
|
||||||
body=$(printf '%s\n' "$body" | sed 's/\\/\\\\/g')
|
body=$(printf '%s\n' "$body" | sed 's/\\/\\\\/g')
|
||||||
@@ -123,14 +105,15 @@ generate_commands() {
|
|||||||
generate_copilot_prompts() {
|
generate_copilot_prompts() {
|
||||||
local agents_dir=$1 prompts_dir=$2
|
local agents_dir=$1 prompts_dir=$2
|
||||||
mkdir -p "$prompts_dir"
|
mkdir -p "$prompts_dir"
|
||||||
|
|
||||||
# Generate a .prompt.md file for each .agent.md file
|
# Generate a .prompt.md file for each .agent.md file
|
||||||
for agent_file in "$agents_dir"/speckit.*.agent.md; do
|
for agent_file in "$agents_dir"/speckit.*.agent.md; do
|
||||||
[[ -f "$agent_file" ]] || continue
|
[[ -f "$agent_file" ]] || continue
|
||||||
|
|
||||||
local basename=$(basename "$agent_file" .agent.md)
|
local basename=$(basename "$agent_file" .agent.md)
|
||||||
local prompt_file="$prompts_dir/${basename}.prompt.md"
|
local prompt_file="$prompts_dir/${basename}.prompt.md"
|
||||||
|
|
||||||
|
# Create prompt file with agent frontmatter
|
||||||
cat > "$prompt_file" <<EOF
|
cat > "$prompt_file" <<EOF
|
||||||
---
|
---
|
||||||
agent: ${basename}
|
agent: ${basename}
|
||||||
@@ -139,114 +122,41 @@ EOF
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create skills in <skills_dir>/<name>/SKILL.md format.
|
|
||||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
|
||||||
# current dotted-name exception (e.g. speckit.plan).
|
|
||||||
#
|
|
||||||
# Technical debt note:
|
|
||||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
|
||||||
# overrides (at minimum: name/description/compatibility/metadata.{author,source}).
|
|
||||||
create_skills() {
|
|
||||||
local skills_dir="$1"
|
|
||||||
local script_variant="$2"
|
|
||||||
local agent_name="$3"
|
|
||||||
local separator="${4:-"-"}"
|
|
||||||
|
|
||||||
for template in templates/commands/*.md; do
|
|
||||||
[[ -f "$template" ]] || continue
|
|
||||||
local name
|
|
||||||
name=$(basename "$template" .md)
|
|
||||||
local skill_name="speckit${separator}${name}"
|
|
||||||
local skill_dir="${skills_dir}/${skill_name}"
|
|
||||||
mkdir -p "$skill_dir"
|
|
||||||
|
|
||||||
local file_content
|
|
||||||
file_content=$(tr -d '\r' < "$template")
|
|
||||||
|
|
||||||
# Extract description from frontmatter
|
|
||||||
local description
|
|
||||||
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
|
|
||||||
[[ -z "$description" ]] && description="Spec Kit: ${name} workflow"
|
|
||||||
|
|
||||||
# Extract script command
|
|
||||||
local script_command
|
|
||||||
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
|
|
||||||
[[ -z "$script_command" ]] && script_command="(Missing script command for $script_variant)"
|
|
||||||
|
|
||||||
# Extract agent_script command from frontmatter if present
|
|
||||||
local agent_script_command
|
|
||||||
agent_script_command=$(printf '%s\n' "$file_content" | awk '
|
|
||||||
/^agent_scripts:$/ { in_agent_scripts=1; next }
|
|
||||||
in_agent_scripts && /^[[:space:]]*'"$script_variant"':[[:space:]]*/ {
|
|
||||||
sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, "")
|
|
||||||
print
|
|
||||||
exit
|
|
||||||
}
|
|
||||||
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
|
|
||||||
')
|
|
||||||
|
|
||||||
# Build body: replace placeholders, strip scripts sections, rewrite paths
|
|
||||||
local body
|
|
||||||
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
|
|
||||||
if [[ -n $agent_script_command ]]; then
|
|
||||||
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
|
|
||||||
fi
|
|
||||||
body=$(printf '%s\n' "$body" | awk '
|
|
||||||
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
|
|
||||||
in_frontmatter && /^scripts:$/ { skip_scripts=1; next }
|
|
||||||
in_frontmatter && /^agent_scripts:$/ { skip_scripts=1; next }
|
|
||||||
in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 }
|
|
||||||
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
|
|
||||||
{ print }
|
|
||||||
')
|
|
||||||
body=$(printf '%s\n' "$body" | sed 's/{ARGS}/\$ARGUMENTS/g' | sed "s/__AGENT__/$agent_name/g" | rewrite_paths)
|
|
||||||
|
|
||||||
# Strip existing frontmatter and prepend skills frontmatter.
|
|
||||||
local template_body
|
|
||||||
template_body=$(printf '%s\n' "$body" | awk '/^---/{p++; if(p==2){found=1; next}} found')
|
|
||||||
|
|
||||||
{
|
|
||||||
printf -- '---\n'
|
|
||||||
printf 'name: "%s"\n' "$skill_name"
|
|
||||||
printf 'description: "%s"\n' "$description"
|
|
||||||
printf 'compatibility: "%s"\n' "Requires spec-kit project structure with .specify/ directory"
|
|
||||||
printf -- 'metadata:\n'
|
|
||||||
printf ' author: "%s"\n' "github-spec-kit"
|
|
||||||
printf ' source: "%s"\n' "templates/commands/${name}.md"
|
|
||||||
printf -- '---\n\n'
|
|
||||||
printf '%s\n' "$template_body"
|
|
||||||
} > "$skill_dir/SKILL.md"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
build_variant() {
|
build_variant() {
|
||||||
local agent=$1 script=$2
|
local agent=$1 script=$2
|
||||||
local base_dir="$GENRELEASES_DIR/sdd-${agent}-package-${script}"
|
local base_dir="$GENRELEASES_DIR/sdd-${agent}-package-${script}"
|
||||||
echo "Building $agent ($script) package..."
|
echo "Building $agent ($script) package..."
|
||||||
mkdir -p "$base_dir"
|
mkdir -p "$base_dir"
|
||||||
|
|
||||||
# Copy base structure but filter scripts by variant
|
# Copy base structure but filter scripts by variant
|
||||||
SPEC_DIR="$base_dir/.specify"
|
SPEC_DIR="$base_dir/.specify"
|
||||||
mkdir -p "$SPEC_DIR"
|
mkdir -p "$SPEC_DIR"
|
||||||
|
|
||||||
[[ -d memory ]] && { cp -r memory "$SPEC_DIR/"; echo "Copied memory -> .specify"; }
|
[[ -d memory ]] && { cp -r memory "$SPEC_DIR/"; echo "Copied memory -> .specify"; }
|
||||||
|
|
||||||
# Only copy the relevant script variant directory
|
# Only copy the relevant script variant directory
|
||||||
if [[ -d scripts ]]; then
|
if [[ -d scripts ]]; then
|
||||||
mkdir -p "$SPEC_DIR/scripts"
|
mkdir -p "$SPEC_DIR/scripts"
|
||||||
case $script in
|
case $script in
|
||||||
sh)
|
sh)
|
||||||
[[ -d scripts/bash ]] && { cp -r scripts/bash "$SPEC_DIR/scripts/"; echo "Copied scripts/bash -> .specify/scripts"; }
|
[[ -d scripts/bash ]] && { cp -r scripts/bash "$SPEC_DIR/scripts/"; echo "Copied scripts/bash -> .specify/scripts"; }
|
||||||
|
# Copy any script files that aren't in variant-specific directories
|
||||||
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
|
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
|
||||||
;;
|
;;
|
||||||
ps)
|
ps)
|
||||||
[[ -d scripts/powershell ]] && { cp -r scripts/powershell "$SPEC_DIR/scripts/"; echo "Copied scripts/powershell -> .specify/scripts"; }
|
[[ -d scripts/powershell ]] && { cp -r scripts/powershell "$SPEC_DIR/scripts/"; echo "Copied scripts/powershell -> .specify/scripts"; }
|
||||||
|
# Copy any script files that aren't in variant-specific directories
|
||||||
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
|
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -not -name "vscode-settings.json" | while IFS= read -r f; do d="$SPEC_DIR/$(dirname "$f")"; mkdir -p "$d"; cp "$f" "$d/"; done; echo "Copied templates -> .specify/templates"; }
|
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -not -name "vscode-settings.json" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; }
|
||||||
|
|
||||||
|
# NOTE: We substitute {ARGS} internally. Outward tokens differ intentionally:
|
||||||
|
# * Markdown/prompt (claude, copilot, cursor-agent, opencode): $ARGUMENTS
|
||||||
|
# * TOML (gemini, qwen, tabnine): {{args}}
|
||||||
|
# This keeps formats readable without extra abstraction.
|
||||||
|
|
||||||
case $agent in
|
case $agent in
|
||||||
claude)
|
claude)
|
||||||
@@ -259,7 +169,9 @@ build_variant() {
|
|||||||
copilot)
|
copilot)
|
||||||
mkdir -p "$base_dir/.github/agents"
|
mkdir -p "$base_dir/.github/agents"
|
||||||
generate_commands copilot agent.md "\$ARGUMENTS" "$base_dir/.github/agents" "$script"
|
generate_commands copilot agent.md "\$ARGUMENTS" "$base_dir/.github/agents" "$script"
|
||||||
|
# Generate companion prompt files
|
||||||
generate_copilot_prompts "$base_dir/.github/agents" "$base_dir/.github/prompts"
|
generate_copilot_prompts "$base_dir/.github/agents" "$base_dir/.github/prompts"
|
||||||
|
# Create VS Code workspace settings
|
||||||
mkdir -p "$base_dir/.vscode"
|
mkdir -p "$base_dir/.vscode"
|
||||||
[[ -f templates/vscode-settings.json ]] && cp templates/vscode-settings.json "$base_dir/.vscode/settings.json"
|
[[ -f templates/vscode-settings.json ]] && cp templates/vscode-settings.json "$base_dir/.vscode/settings.json"
|
||||||
;;
|
;;
|
||||||
@@ -268,7 +180,7 @@ build_variant() {
|
|||||||
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
||||||
qwen)
|
qwen)
|
||||||
mkdir -p "$base_dir/.qwen/commands"
|
mkdir -p "$base_dir/.qwen/commands"
|
||||||
generate_commands qwen md "\$ARGUMENTS" "$base_dir/.qwen/commands" "$script"
|
generate_commands qwen toml "{{args}}" "$base_dir/.qwen/commands" "$script"
|
||||||
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
||||||
opencode)
|
opencode)
|
||||||
mkdir -p "$base_dir/.opencode/command"
|
mkdir -p "$base_dir/.opencode/command"
|
||||||
@@ -276,12 +188,9 @@ build_variant() {
|
|||||||
windsurf)
|
windsurf)
|
||||||
mkdir -p "$base_dir/.windsurf/workflows"
|
mkdir -p "$base_dir/.windsurf/workflows"
|
||||||
generate_commands windsurf md "\$ARGUMENTS" "$base_dir/.windsurf/workflows" "$script" ;;
|
generate_commands windsurf md "\$ARGUMENTS" "$base_dir/.windsurf/workflows" "$script" ;;
|
||||||
junie)
|
|
||||||
mkdir -p "$base_dir/.junie/commands"
|
|
||||||
generate_commands junie md "\$ARGUMENTS" "$base_dir/.junie/commands" "$script" ;;
|
|
||||||
codex)
|
codex)
|
||||||
mkdir -p "$base_dir/.agents/skills"
|
mkdir -p "$base_dir/.codex/prompts"
|
||||||
create_skills "$base_dir/.agents/skills" "$script" "codex" "-" ;;
|
generate_commands codex md "\$ARGUMENTS" "$base_dir/.codex/prompts" "$script" ;;
|
||||||
kilocode)
|
kilocode)
|
||||||
mkdir -p "$base_dir/.kilocode/workflows"
|
mkdir -p "$base_dir/.kilocode/workflows"
|
||||||
generate_commands kilocode md "\$ARGUMENTS" "$base_dir/.kilocode/workflows" "$script" ;;
|
generate_commands kilocode md "\$ARGUMENTS" "$base_dir/.kilocode/workflows" "$script" ;;
|
||||||
@@ -311,26 +220,14 @@ build_variant() {
|
|||||||
mkdir -p "$base_dir/.kiro/prompts"
|
mkdir -p "$base_dir/.kiro/prompts"
|
||||||
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
||||||
agy)
|
agy)
|
||||||
mkdir -p "$base_dir/.agent/commands"
|
mkdir -p "$base_dir/.agent/workflows"
|
||||||
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/commands" "$script" ;;
|
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/workflows" "$script" ;;
|
||||||
bob)
|
bob)
|
||||||
mkdir -p "$base_dir/.bob/commands"
|
mkdir -p "$base_dir/.bob/commands"
|
||||||
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
||||||
vibe)
|
vibe)
|
||||||
mkdir -p "$base_dir/.vibe/prompts"
|
mkdir -p "$base_dir/.vibe/prompts"
|
||||||
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
|
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
|
||||||
kimi)
|
|
||||||
mkdir -p "$base_dir/.kimi/skills"
|
|
||||||
create_skills "$base_dir/.kimi/skills" "$script" "kimi" "." ;;
|
|
||||||
trae)
|
|
||||||
mkdir -p "$base_dir/.trae/rules"
|
|
||||||
generate_commands trae md "\$ARGUMENTS" "$base_dir/.trae/rules" "$script" ;;
|
|
||||||
pi)
|
|
||||||
mkdir -p "$base_dir/.pi/prompts"
|
|
||||||
generate_commands pi md "\$ARGUMENTS" "$base_dir/.pi/prompts" "$script" ;;
|
|
||||||
iflow)
|
|
||||||
mkdir -p "$base_dir/.iflow/commands"
|
|
||||||
generate_commands iflow md "\$ARGUMENTS" "$base_dir/.iflow/commands" "$script" ;;
|
|
||||||
generic)
|
generic)
|
||||||
mkdir -p "$base_dir/.speckit/commands"
|
mkdir -p "$base_dir/.speckit/commands"
|
||||||
generate_commands generic md "\$ARGUMENTS" "$base_dir/.speckit/commands" "$script" ;;
|
generate_commands generic md "\$ARGUMENTS" "$base_dir/.speckit/commands" "$script" ;;
|
||||||
@@ -340,38 +237,38 @@ build_variant() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Determine agent list
|
# Determine agent list
|
||||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf junie codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi trae pi iflow generic)
|
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli generic)
|
||||||
ALL_SCRIPTS=(sh ps)
|
ALL_SCRIPTS=(sh ps)
|
||||||
|
|
||||||
|
norm_list() {
|
||||||
|
# convert comma+space separated -> line separated unique while preserving order of first occurrence
|
||||||
|
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?"\n":"") $i);out=1}}}END{printf("\n")}'
|
||||||
|
}
|
||||||
|
|
||||||
validate_subset() {
|
validate_subset() {
|
||||||
local type=$1; shift
|
local type=$1; shift; local -n allowed=$1; shift; local items=("$@")
|
||||||
local allowed_str="$1"; shift
|
|
||||||
local invalid=0
|
local invalid=0
|
||||||
for it in "$@"; do
|
for it in "${items[@]}"; do
|
||||||
local found=0
|
local found=0
|
||||||
for a in $allowed_str; do
|
for a in "${allowed[@]}"; do [[ $it == "$a" ]] && { found=1; break; }; done
|
||||||
if [[ "$it" == "$a" ]]; then found=1; break; fi
|
|
||||||
done
|
|
||||||
if [[ $found -eq 0 ]]; then
|
if [[ $found -eq 0 ]]; then
|
||||||
echo "Error: unknown $type '$it' (allowed: $allowed_str)" >&2
|
echo "Error: unknown $type '$it' (allowed: ${allowed[*]})" >&2
|
||||||
invalid=1
|
invalid=1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
return $invalid
|
return $invalid
|
||||||
}
|
}
|
||||||
|
|
||||||
read_list() { tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?" ":"") $i);out=1}}}END{printf("\n")}'; }
|
|
||||||
|
|
||||||
if [[ -n ${AGENTS:-} ]]; then
|
if [[ -n ${AGENTS:-} ]]; then
|
||||||
read -ra AGENT_LIST <<< "$(printf '%s' "$AGENTS" | read_list)"
|
mapfile -t AGENT_LIST < <(printf '%s' "$AGENTS" | norm_list)
|
||||||
validate_subset agent "${ALL_AGENTS[*]}" "${AGENT_LIST[@]}" || exit 1
|
validate_subset agent ALL_AGENTS "${AGENT_LIST[@]}" || exit 1
|
||||||
else
|
else
|
||||||
AGENT_LIST=("${ALL_AGENTS[@]}")
|
AGENT_LIST=("${ALL_AGENTS[@]}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n ${SCRIPTS:-} ]]; then
|
if [[ -n ${SCRIPTS:-} ]]; then
|
||||||
read -ra SCRIPT_LIST <<< "$(printf '%s' "$SCRIPTS" | read_list)"
|
mapfile -t SCRIPT_LIST < <(printf '%s' "$SCRIPTS" | norm_list)
|
||||||
validate_subset script "${ALL_SCRIPTS[*]}" "${SCRIPT_LIST[@]}" || exit 1
|
validate_subset script ALL_SCRIPTS "${SCRIPT_LIST[@]}" || exit 1
|
||||||
else
|
else
|
||||||
SCRIPT_LIST=("${ALL_SCRIPTS[@]}")
|
SCRIPT_LIST=("${ALL_SCRIPTS[@]}")
|
||||||
fi
|
fi
|
||||||
|
|||||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -39,4 +39,4 @@ jobs:
|
|||||||
any-of-labels: ''
|
any-of-labels: ''
|
||||||
|
|
||||||
# Operations per run (helps avoid rate limits)
|
# Operations per run (helps avoid rate limits)
|
||||||
operations-per-run: 250
|
operations-per-run: 100
|
||||||
|
|||||||
33
AGENTS.md
33
AGENTS.md
@@ -10,6 +10,10 @@ The toolkit supports multiple AI coding assistants, allowing teams to use their
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## General practices
|
||||||
|
|
||||||
|
- Any changes to `__init__.py` for the Specify CLI require a version rev in `pyproject.toml` and addition of entries to `CHANGELOG.md`.
|
||||||
|
|
||||||
## Adding New Agent Support
|
## Adding New Agent Support
|
||||||
|
|
||||||
This section explains how to add support for new AI agents/assistants to the Specify CLI. Use this guide as a reference when integrating new AI tools into the Spec-Driven Development workflow.
|
This section explains how to add support for new AI agents/assistants to the Specify CLI. Use this guide as a reference when integrating new AI tools into the Spec-Driven Development workflow.
|
||||||
@@ -31,25 +35,20 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
|||||||
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
||||||
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
||||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
||||||
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
|
| **Qwen Code** | `.qwen/commands/` | TOML | `qwen` | Alibaba's Qwen Code CLI |
|
||||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
||||||
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (skills) |
|
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
|
||||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
||||||
| **Junie** | `.junie/commands/` | Markdown | `junie` | Junie by JetBrains |
|
| **Kilo Code** | `.kilocode/rules/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
||||||
| **Kilo Code** | `.kilocode/workflows/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
| **Auggie CLI** | `.augment/rules/` | Markdown | `auggie` | Auggie CLI |
|
||||||
| **Auggie CLI** | `.augment/commands/` | Markdown | `auggie` | Auggie CLI |
|
| **Roo Code** | `.roo/rules/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
||||||
| **Roo Code** | `.roo/commands/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
|
||||||
| **CodeBuddy CLI** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy CLI |
|
| **CodeBuddy CLI** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy CLI |
|
||||||
| **Qoder CLI** | `.qoder/commands/` | Markdown | `qodercli` | Qoder CLI |
|
| **Qoder CLI** | `.qoder/commands/` | Markdown | `qodercli` | Qoder CLI |
|
||||||
| **Kiro CLI** | `.kiro/prompts/` | Markdown | `kiro-cli` | Kiro CLI |
|
| **Kiro CLI** | `.kiro/prompts/` | Markdown | `kiro-cli` | Kiro CLI |
|
||||||
| **Amp** | `.agents/commands/` | Markdown | `amp` | Amp CLI |
|
| **Amp** | `.agents/commands/` | Markdown | `amp` | Amp CLI |
|
||||||
| **SHAI** | `.shai/commands/` | Markdown | `shai` | SHAI CLI |
|
| **SHAI** | `.shai/commands/` | Markdown | `shai` | SHAI CLI |
|
||||||
| **Tabnine CLI** | `.tabnine/agent/commands/` | TOML | `tabnine` | Tabnine CLI |
|
| **Tabnine CLI** | `.tabnine/agent/commands/` | TOML | `tabnine` | Tabnine CLI |
|
||||||
| **Kimi Code** | `.kimi/skills/` | Markdown | `kimi` | Kimi Code CLI (Moonshot AI) |
|
|
||||||
| **Pi Coding Agent** | `.pi/prompts/` | Markdown | `pi` | Pi terminal coding agent |
|
|
||||||
| **iFlow CLI** | `.iflow/commands/` | Markdown | `iflow` | iFlow CLI (iflow-ai) |
|
|
||||||
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
|
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
|
||||||
| **Trae** | `.trae/rules/` | Markdown | N/A (IDE-based) | Trae IDE |
|
|
||||||
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
|
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
|
||||||
|
|
||||||
### Step-by-Step Integration Guide
|
### Step-by-Step Integration Guide
|
||||||
@@ -88,7 +87,7 @@ This eliminates the need for special-case mappings throughout the codebase.
|
|||||||
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
||||||
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
||||||
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
||||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli, pi), `"command"` (opencode - singular)
|
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode, agy), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
||||||
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
||||||
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
||||||
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
||||||
@@ -319,15 +318,12 @@ Require a command-line tool to be installed:
|
|||||||
- **Cursor**: `cursor-agent` CLI
|
- **Cursor**: `cursor-agent` CLI
|
||||||
- **Qwen Code**: `qwen` CLI
|
- **Qwen Code**: `qwen` CLI
|
||||||
- **opencode**: `opencode` CLI
|
- **opencode**: `opencode` CLI
|
||||||
- **Junie**: `junie` CLI
|
|
||||||
- **Kiro CLI**: `kiro-cli` CLI
|
- **Kiro CLI**: `kiro-cli` CLI
|
||||||
- **CodeBuddy CLI**: `codebuddy` CLI
|
- **CodeBuddy CLI**: `codebuddy` CLI
|
||||||
- **Qoder CLI**: `qodercli` CLI
|
- **Qoder CLI**: `qodercli` CLI
|
||||||
- **Amp**: `amp` CLI
|
- **Amp**: `amp` CLI
|
||||||
- **SHAI**: `shai` CLI
|
- **SHAI**: `shai` CLI
|
||||||
- **Tabnine CLI**: `tabnine` CLI
|
- **Tabnine CLI**: `tabnine` CLI
|
||||||
- **Kimi Code**: `kimi` CLI
|
|
||||||
- **Pi Coding Agent**: `pi` CLI
|
|
||||||
|
|
||||||
### IDE-Based Agents
|
### IDE-Based Agents
|
||||||
|
|
||||||
@@ -341,7 +337,7 @@ Work within integrated development environments:
|
|||||||
|
|
||||||
### Markdown Format
|
### Markdown Format
|
||||||
|
|
||||||
Used by: Claude, Cursor, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi
|
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob
|
||||||
|
|
||||||
**Standard format:**
|
**Standard format:**
|
||||||
|
|
||||||
@@ -366,7 +362,7 @@ Command content with {SCRIPT} and $ARGUMENTS placeholders.
|
|||||||
|
|
||||||
### TOML Format
|
### TOML Format
|
||||||
|
|
||||||
Used by: Gemini, Tabnine
|
Used by: Gemini, Qwen, Tabnine
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
description = "Command description"
|
description = "Command description"
|
||||||
@@ -379,11 +375,6 @@ Command content with {SCRIPT} and {{args}} placeholders.
|
|||||||
## Directory Conventions
|
## Directory Conventions
|
||||||
|
|
||||||
- **CLI agents**: Usually `.<agent-name>/commands/`
|
- **CLI agents**: Usually `.<agent-name>/commands/`
|
||||||
- **Skills-based exceptions**:
|
|
||||||
- Codex: `.agents/skills/` (skills, invoked as `$speckit-<command>`)
|
|
||||||
- **Prompt-based exceptions**:
|
|
||||||
- Kiro CLI: `.kiro/prompts/`
|
|
||||||
- Pi: `.pi/prompts/`
|
|
||||||
- **IDE agents**: Follow IDE-specific patterns:
|
- **IDE agents**: Follow IDE-specific patterns:
|
||||||
- Copilot: `.github/agents/`
|
- Copilot: `.github/agents/`
|
||||||
- Cursor: `.cursor/commands/`
|
- Cursor: `.cursor/commands/`
|
||||||
|
|||||||
143
CHANGELOG.md
143
CHANGELOG.md
@@ -1,111 +1,37 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## [0.3.2] - 2026-03-19
|
<!-- markdownlint-disable MD024 -->
|
||||||
|
|
||||||
### Changes
|
Recent changes to the Specify CLI and templates are documented here.
|
||||||
|
|
||||||
- chore: bump version to 0.3.2
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
- Add conduct extension to community catalog (#1908)
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
- feat(extensions): add verify-tasks extension to community catalog (#1871)
|
|
||||||
- feat(presets): add enable/disable toggle and update semantics (#1891)
|
|
||||||
- feat: add iFlow CLI support (#1875)
|
|
||||||
- feat(commands): wire before/after hook events into specify and plan templates (#1886)
|
|
||||||
- docs(catalog): add speckit-utils to community catalog (#1896)
|
|
||||||
- docs: Add Extensions & Presets section to README (#1898)
|
|
||||||
- chore: update DocGuard extension to v0.9.11 (#1899)
|
|
||||||
- Update cognitive-squad catalog entry — Triadic Model, full lifecycle (#1884)
|
|
||||||
- feat: register spec-kit-iterate extension (#1887)
|
|
||||||
- fix(scripts): add explicit positional binding to PowerShell create-new-feature params (#1885)
|
|
||||||
- fix(scripts): encode residual JSON control chars as \uXXXX instead of stripping (#1872)
|
|
||||||
- chore: update DocGuard extension to v0.9.10 (#1890)
|
|
||||||
- Feature/spec kit add pi coding agent pullrequest (#1853)
|
|
||||||
- feat: register spec-kit-learn extension (#1883)
|
|
||||||
|
|
||||||
## [0.3.1] - 2026-03-17
|
## [0.2.1] - 2026-03-10
|
||||||
|
|
||||||
### Changed
|
### Added
|
||||||
|
|
||||||
- chore: bump version to 0.3.1
|
- feat(presets): Pluggable preset system with preset catalog and template resolver
|
||||||
- docs: add greenfield Spring Boot pirate-speak preset demo to README (#1878)
|
- Preset manifest (`preset.yml`) with validation for artifact, command, and script types
|
||||||
- fix(ai-skills): exclude non-speckit copilot agent markdown from skills (#1867)
|
- `PresetManifest`, `PresetRegistry`, `PresetManager`, `PresetCatalog`, `PresetResolver` classes in `src/specify_cli/presets.py`
|
||||||
- feat: add Trae IDE support as a new agent (#1817)
|
- CLI commands: `specify preset search`, `specify preset add`, `specify preset list`, `specify preset remove`, `specify preset resolve`, `specify preset info`
|
||||||
- feat(cli): polite deep merge for settings.json and support JSONC (#1874)
|
- CLI commands: `specify preset catalog list`, `specify preset catalog add`, `specify preset catalog remove` for multi-catalog management
|
||||||
- feat(extensions,presets): add priority-based resolution ordering (#1855)
|
- `PresetCatalogEntry` dataclass and multi-catalog support mirroring the extension catalog system
|
||||||
- fix(scripts): suppress stdout from git fetch in create-new-feature.sh (#1876)
|
- `--preset` option for `specify init` to install presets during initialization
|
||||||
- fix(scripts): harden bash scripts — escape, compat, and error handling (#1869)
|
- Priority-based preset resolution: presets with lower priority number win (`--priority` flag)
|
||||||
- Add cognitive-squad to community extension catalog (#1870)
|
- `resolve_template()` / `Resolve-Template` helpers in bash and PowerShell common scripts
|
||||||
- docs: add Go / React brownfield walkthrough to community walkthroughs (#1868)
|
- Template resolution priority stack: overrides → presets → extensions → core
|
||||||
- chore: update DocGuard extension to v0.9.8 (#1859)
|
- Preset catalog files (`presets/catalog.json`, `presets/catalog.community.json`)
|
||||||
- Feature: add specify status command (#1837)
|
- Preset scaffold directory (`presets/scaffold/`)
|
||||||
- fix(extensions): show extension ID in list output (#1843)
|
- Scripts updated to use template resolution instead of hardcoded paths
|
||||||
- feat(extensions): add Archive and Reconcile extensions to community catalog (#1844)
|
- feat(presets): Preset command overrides now propagate to agent skills when `--ai-skills` was used during init
|
||||||
- feat: Add DocGuard CDD enforcement extension to community catalog (#1838)
|
- feat: `specify init` persists CLI options to `.specify/init-options.json` for downstream operations
|
||||||
|
- feat(extensions): support `.extensionignore` to exclude files/folders during `specify extension add` (#1781)
|
||||||
## [0.3.0] - 2026-03-13
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- chore: bump version to 0.3.0
|
|
||||||
- feat(presets): Pluggable preset system with catalog, resolver, and skills propagation (#1787)
|
|
||||||
- fix: match 'Last updated' timestamp with or without bold markers (#1836)
|
|
||||||
- Add specify doctor command for project health diagnostics (#1828)
|
|
||||||
- fix: harden bash scripts against shell injection and improve robustness (#1809)
|
|
||||||
- fix: clean up command templates (specify, analyze) (#1810)
|
|
||||||
- fix: migrate Qwen Code CLI from TOML to Markdown format (#1589) (#1730)
|
|
||||||
- fix(cli): deprecate explicit command support for agy (#1798) (#1808)
|
|
||||||
- Add /selftest.extension core extension to test other extensions (#1758)
|
|
||||||
- feat(extensions): Quality of life improvements for RFC-aligned catalog integration (#1776)
|
|
||||||
- Add Java brownfield walkthrough to community walkthroughs (#1820)
|
|
||||||
|
|
||||||
## [0.2.1] - 2026-03-11
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Added February 2026 newsletter (#1812)
|
|
||||||
- feat: add Kimi Code CLI agent support (#1790)
|
|
||||||
- docs: fix broken links in quickstart guide (#1759) (#1797)
|
|
||||||
- docs: add catalog cli help documentation (#1793) (#1794)
|
|
||||||
- fix: use quiet checkout to avoid exception on git checkout (#1792)
|
|
||||||
- feat(extensions): support .extensionignore to exclude files during install (#1781)
|
|
||||||
- feat: add Codex support for extension command registration (#1767)
|
|
||||||
- chore: bump version to 0.2.0 (#1786)
|
|
||||||
- fix: sync agent list comments with actual supported agents (#1785)
|
|
||||||
- feat(extensions): support multiple active catalogs simultaneously (#1720)
|
|
||||||
- Pavel/add tabnine cli support (#1503)
|
|
||||||
- Add Understanding extension to community catalog (#1778)
|
|
||||||
- Add ralph extension to community catalog (#1780)
|
|
||||||
- Update README with project initialization instructions (#1772)
|
|
||||||
- feat: add review extension to community catalog (#1775)
|
|
||||||
- Add fleet extension to community catalog (#1771)
|
|
||||||
- Integration of Mistral vibe support into speckit (#1725)
|
|
||||||
- fix: Remove duplicate options in specify.md (#1765)
|
|
||||||
- fix: use global branch numbering instead of per-short-name detection (#1757)
|
|
||||||
- Add Community Walkthroughs section to README (#1766)
|
|
||||||
- feat(extensions): add Jira Integration to community catalog (#1764)
|
|
||||||
- Add Azure DevOps Integration extension to community catalog (#1734)
|
|
||||||
- Fix docs: update Antigravity link and add initialization example (#1748)
|
|
||||||
- fix: wire after_tasks and after_implement hook events into command templates (#1702)
|
|
||||||
- make c ignores consistent with c++ (#1747)
|
|
||||||
- chore: bump version to 0.1.13 (#1746)
|
|
||||||
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
|
|
||||||
- feat: add verify extension to community catalog (#1726)
|
|
||||||
- Add Retrospective Extension to community catalog README table (#1741)
|
|
||||||
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
|
|
||||||
- fix: correct Copilot extension command registration (#1724)
|
|
||||||
- fix(implement): remove Makefile from C ignore patterns (#1558)
|
|
||||||
- Add sync extension to community catalog (#1728)
|
|
||||||
- fix(checklist): clarify file handling behavior for append vs create (#1556)
|
|
||||||
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
|
|
||||||
- chore: bump version to 0.1.12 (#1737)
|
|
||||||
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
|
||||||
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
|
||||||
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
|
||||||
|
|
||||||
## [0.2.0] - 2026-03-09
|
## [0.2.0] - 2026-03-09
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- feat: add Kimi Code CLI agent support
|
|
||||||
- fix: sync agent list comments with actual supported agents (#1785)
|
- fix: sync agent list comments with actual supported agents (#1785)
|
||||||
- feat(extensions): support multiple active catalogs simultaneously (#1720)
|
- feat(extensions): support multiple active catalogs simultaneously (#1720)
|
||||||
- Pavel/add tabnine cli support (#1503)
|
- Pavel/add tabnine cli support (#1503)
|
||||||
@@ -282,3 +208,28 @@
|
|||||||
|
|
||||||
- Add pytest and Python linting (ruff) to CI (#1637)
|
- Add pytest and Python linting (ruff) to CI (#1637)
|
||||||
- feat: add pull request template for better contribution guidelines (#1634)
|
- feat: add pull request template for better contribution guidelines (#1634)
|
||||||
|
|
||||||
|
## [0.0.99] - 2026-02-19
|
||||||
|
|
||||||
|
- Feat/ai skills (#1632)
|
||||||
|
|
||||||
|
## [0.0.98] - 2026-02-19
|
||||||
|
|
||||||
|
- chore(deps): bump actions/stale from 9 to 10 (#1623)
|
||||||
|
- feat: add dependabot configuration for pip and GitHub Actions updates (#1622)
|
||||||
|
|
||||||
|
## [0.0.97] - 2026-02-18
|
||||||
|
|
||||||
|
- Remove Maintainers section from README.md (#1618)
|
||||||
|
|
||||||
|
## [0.0.96] - 2026-02-17
|
||||||
|
|
||||||
|
- fix: typo in plan-template.md (#1446)
|
||||||
|
|
||||||
|
## [0.0.95] - 2026-02-12
|
||||||
|
|
||||||
|
- Feat: add a new agent: Google Anti Gravity (#1220)
|
||||||
|
|
||||||
|
## [0.0.94] - 2026-02-11
|
||||||
|
|
||||||
|
- Add stale workflow for 180-day inactive issues and PRs (#1594)
|
||||||
|
|||||||
160
README.md
160
README.md
@@ -25,7 +25,6 @@
|
|||||||
- [🚶 Community Walkthroughs](#-community-walkthroughs)
|
- [🚶 Community Walkthroughs](#-community-walkthroughs)
|
||||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
||||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||||
- [🧩 Making Spec Kit Your Own: Extensions & Presets](#-making-spec-kit-your-own-extensions--presets)
|
|
||||||
- [📚 Core Philosophy](#-core-philosophy)
|
- [📚 Core Philosophy](#-core-philosophy)
|
||||||
- [🌟 Development Phases](#-development-phases)
|
- [🌟 Development Phases](#-development-phases)
|
||||||
- [🎯 Experimental Goals](#-experimental-goals)
|
- [🎯 Experimental Goals](#-experimental-goals)
|
||||||
@@ -49,13 +48,9 @@ Choose your preferred installation method:
|
|||||||
|
|
||||||
#### Option 1: Persistent Installation (Recommended)
|
#### Option 1: Persistent Installation (Recommended)
|
||||||
|
|
||||||
Install once and use everywhere. Pin a specific release tag for stability (check [Releases](https://github.com/github/spec-kit/releases) for the latest):
|
Install once and use everywhere:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install a specific stable release (recommended — replace vX.Y.Z with the latest tag)
|
|
||||||
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git@vX.Y.Z
|
|
||||||
|
|
||||||
# Or install latest from main (may include unreleased changes)
|
|
||||||
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git
|
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -77,7 +72,7 @@ specify check
|
|||||||
To upgrade Specify, see the [Upgrade Guide](./docs/upgrade.md) for detailed instructions. Quick upgrade:
|
To upgrade Specify, see the [Upgrade Guide](./docs/upgrade.md) for detailed instructions. Quick upgrade:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z
|
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Option 2: One-time Usage
|
#### Option 2: One-time Usage
|
||||||
@@ -85,13 +80,13 @@ uv tool install specify-cli --force --from git+https://github.com/github/spec-ki
|
|||||||
Run directly without installing:
|
Run directly without installing:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Create new project (pinned to a stable release — replace vX.Y.Z with the latest tag)
|
# Create new project
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <PROJECT_NAME>
|
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||||
|
|
||||||
# Or initialize in existing project
|
# Or initialize in existing project
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init . --ai claude
|
uvx --from git+https://github.com/github/spec-kit.git specify init . --ai claude
|
||||||
# or
|
# or
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here --ai claude
|
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai claude
|
||||||
```
|
```
|
||||||
|
|
||||||
**Benefits of persistent installation:**
|
**Benefits of persistent installation:**
|
||||||
@@ -101,13 +96,9 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
|
|||||||
- Better tool management with `uv tool list`, `uv tool upgrade`, `uv tool uninstall`
|
- Better tool management with `uv tool list`, `uv tool upgrade`, `uv tool uninstall`
|
||||||
- Cleaner shell configuration
|
- Cleaner shell configuration
|
||||||
|
|
||||||
#### Option 3: Enterprise / Air-Gapped Installation
|
|
||||||
|
|
||||||
If your environment blocks access to PyPI or GitHub, see the [Enterprise / Air-Gapped Installation](./docs/installation.md#enterprise--air-gapped-installation) guide for step-by-step instructions on using `pip download` to create portable, OS-specific wheel bundles on a connected machine.
|
|
||||||
|
|
||||||
### 2. Establish project principles
|
### 2. Establish project principles
|
||||||
|
|
||||||
Launch your AI assistant in the project directory. Most agents expose spec-kit as `/speckit.*` slash commands; Codex CLI in skills mode uses `$speckit-*` instead.
|
Launch your AI assistant in the project directory. The `/speckit.*` commands are available in the assistant.
|
||||||
|
|
||||||
Use the **`/speckit.constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
|
Use the **`/speckit.constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
|
||||||
|
|
||||||
@@ -163,13 +154,7 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
|
|
||||||
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
||||||
|
|
||||||
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core, ~307,000 lines of C#, Razor, SQL, JavaScript, and config files) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
||||||
|
|
||||||
- **[Brownfield Java runtime extension](https://github.com/mnriem/spec-kit-java-brownfield-demo)** — Extends an existing open-source Jakarta EE runtime (Piranha, ~420,000 lines of Java, XML, JSP, HTML, and config files across 180 Maven modules) with a password-protected Server Admin Console, demonstrating spec-kit on a large multi-module Java project with no prior specs or constitution.
|
|
||||||
|
|
||||||
- **[Brownfield Go / React dashboard demo](https://github.com/mnriem/spec-kit-go-brownfield-demo)** — Demonstrates spec-kit driven entirely from the **terminal using GitHub Copilot CLI**. Extends NASA's open-source Hermes ground support system (Go) with a lightweight React-based web telemetry dashboard, showing that the full constitution → specify → plan → tasks → implement workflow works from the terminal.
|
|
||||||
|
|
||||||
- **[Greenfield Spring Boot MVC with a custom preset](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo)** — Builds a Spring Boot MVC application from scratch using a custom pirate-speak preset, demonstrating how presets can reshape the entire spec-kit experience: specifications become "Voyage Manifests," plans become "Battle Plans," and tasks become "Crew Assignments" — all generated in full pirate vernacular without changing any tooling.
|
|
||||||
|
|
||||||
## 🤖 Supported AI Agents
|
## 🤖 Supported AI Agents
|
||||||
|
|
||||||
@@ -181,7 +166,7 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
||||||
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | |
|
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | |
|
||||||
| [CodeBuddy CLI](https://www.codebuddy.ai/cli) | ✅ | |
|
| [CodeBuddy CLI](https://www.codebuddy.ai/cli) | ✅ | |
|
||||||
| [Codex CLI](https://github.com/openai/codex) | ✅ | Requires `--ai-skills`. Codex recommends [skills](https://developers.openai.com/codex/skills) and treats [custom prompts](https://developers.openai.com/codex/custom-prompts) as deprecated. Spec-kit installs Codex skills into `.agents/skills` and invokes them as `$speckit-<command>`. |
|
| [Codex CLI](https://github.com/openai/codex) | ✅ | |
|
||||||
| [Cursor](https://cursor.sh/) | ✅ | |
|
| [Cursor](https://cursor.sh/) | ✅ | |
|
||||||
| [Gemini CLI](https://github.com/google-gemini/gemini-cli) | ✅ | |
|
| [Gemini CLI](https://github.com/google-gemini/gemini-cli) | ✅ | |
|
||||||
| [GitHub Copilot](https://code.visualstudio.com/) | ✅ | |
|
| [GitHub Copilot](https://code.visualstudio.com/) | ✅ | |
|
||||||
@@ -189,18 +174,13 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
| [Jules](https://jules.google.com/) | ✅ | |
|
| [Jules](https://jules.google.com/) | ✅ | |
|
||||||
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
|
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
|
||||||
| [opencode](https://opencode.ai/) | ✅ | |
|
| [opencode](https://opencode.ai/) | ✅ | |
|
||||||
| [Pi Coding Agent](https://pi.dev) | ✅ | Pi doesn't have MCP support out of the box, so `taskstoissues` won't work as intended. MCP support can be added via [extensions](https://github.com/badlogic/pi-mono/tree/main/packages/coding-agent#extensions) |
|
|
||||||
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
|
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
|
||||||
| [Roo Code](https://roocode.com/) | ✅ | |
|
| [Roo Code](https://roocode.com/) | ✅ | |
|
||||||
| [SHAI (OVHcloud)](https://github.com/ovh/shai) | ✅ | |
|
| [SHAI (OVHcloud)](https://github.com/ovh/shai) | ✅ | |
|
||||||
| [Tabnine CLI](https://docs.tabnine.com/main/getting-started/tabnine-cli) | ✅ | |
|
| [Tabnine CLI](https://docs.tabnine.com/main/getting-started/tabnine-cli) | ✅ | |
|
||||||
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
||||||
| [Kimi Code](https://code.kimi.com/) | ✅ | |
|
|
||||||
| [iFlow CLI](https://docs.iflow.cn/en/cli/quickstart) | ✅ | |
|
|
||||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
| [Windsurf](https://windsurf.com/) | ✅ | |
|
||||||
| [Junie](https://junie.jetbrains.com/) | ✅ | |
|
| [Antigravity (agy)](https://antigravity.google/) | ✅ | |
|
||||||
| [Antigravity (agy)](https://antigravity.google/) | ✅ | Requires `--ai-skills` |
|
|
||||||
| [Trae](https://www.trae.ai/) | ✅ | |
|
|
||||||
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
||||||
|
|
||||||
## 🔧 Specify CLI Reference
|
## 🔧 Specify CLI Reference
|
||||||
@@ -209,28 +189,27 @@ The `specify` command supports the following options:
|
|||||||
|
|
||||||
### Commands
|
### Commands
|
||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
| ------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `init` | Initialize a new Specify project from the latest template |
|
| `init` | Initialize a new Specify project from the latest template |
|
||||||
| `check` | Check for installed tools: `git` plus all CLI-based agents configured in `AGENT_CONFIG` (for example: `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `junie`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`, `kimi`, `iflow`, `pi`, etc.) |
|
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`) |
|
||||||
|
|
||||||
### `specify init` Arguments & Options
|
### `specify init` Arguments & Options
|
||||||
|
|
||||||
| Argument/Option | Type | Description |
|
| Argument/Option | Type | Description |
|
||||||
| ---------------------- | -------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| ---------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
|
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
|
||||||
| `--ai` | Option | AI assistant to use (see `AGENT_CONFIG` for the full, up-to-date list). Common options include: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `junie`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, `kimi`, `iflow`, `pi`, or `generic` (requires `--ai-commands-dir`) |
|
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, or `generic` (requires `--ai-commands-dir`) |
|
||||||
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
|
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
|
||||||
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
||||||
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
||||||
| `--no-git` | Flag | Skip git repository initialization |
|
| `--no-git` | Flag | Skip git repository initialization |
|
||||||
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
||||||
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
|
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
|
||||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||||
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
||||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
|
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
|
||||||
| `--branch-numbering` | Option | Branch numbering strategy: `sequential` (default — `001`, `002`, `003`) or `timestamp` (`YYYYMMDD-HHMMSS`). Timestamp mode is useful for distributed teams to avoid numbering conflicts |
|
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
@@ -265,14 +244,8 @@ specify init my-project --ai vibe
|
|||||||
# Initialize with IBM Bob support
|
# Initialize with IBM Bob support
|
||||||
specify init my-project --ai bob
|
specify init my-project --ai bob
|
||||||
|
|
||||||
# Initialize with Pi Coding Agent support
|
|
||||||
specify init my-project --ai pi
|
|
||||||
|
|
||||||
# Initialize with Codex CLI support
|
|
||||||
specify init my-project --ai codex --ai-skills
|
|
||||||
|
|
||||||
# Initialize with Antigravity support
|
# Initialize with Antigravity support
|
||||||
specify init my-project --ai agy --ai-skills
|
specify init my-project --ai agy
|
||||||
|
|
||||||
# Initialize with an unsupported agent (generic / bring your own agent)
|
# Initialize with an unsupported agent (generic / bring your own agent)
|
||||||
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
||||||
@@ -305,18 +278,13 @@ specify init my-project --ai claude --ai-skills
|
|||||||
# Initialize in current directory with agent skills
|
# Initialize in current directory with agent skills
|
||||||
specify init --here --ai gemini --ai-skills
|
specify init --here --ai gemini --ai-skills
|
||||||
|
|
||||||
# Use timestamp-based branch numbering (useful for distributed teams)
|
|
||||||
specify init my-project --ai claude --branch-numbering timestamp
|
|
||||||
|
|
||||||
# Check system requirements
|
# Check system requirements
|
||||||
specify check
|
specify check
|
||||||
```
|
```
|
||||||
|
|
||||||
### Available Slash Commands
|
### Available Slash Commands
|
||||||
|
|
||||||
After running `specify init`, your AI coding agent will have access to these slash commands for structured development.
|
After running `specify init`, your AI coding agent will have access to these slash commands for structured development:
|
||||||
|
|
||||||
For Codex CLI, `--ai-skills` installs spec-kit as agent skills instead of slash-command prompt files. In Codex skills mode, invoke spec-kit as `$speckit-constitution`, `$speckit-specify`, `$speckit-plan`, `$speckit-tasks`, and `$speckit-implement`.
|
|
||||||
|
|
||||||
#### Core Commands
|
#### Core Commands
|
||||||
|
|
||||||
@@ -346,68 +314,6 @@ Additional commands for enhanced quality and validation:
|
|||||||
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>\*\*Must be set in the context of the agent you're working with prior to using `/speckit.plan` or follow-up commands. |
|
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>\*\*Must be set in the context of the agent you're working with prior to using `/speckit.plan` or follow-up commands. |
|
||||||
|
|
||||||
## 🧩 Making Spec Kit Your Own: Extensions & Presets
|
|
||||||
|
|
||||||
Spec Kit can be tailored to your needs through two complementary systems — **extensions** and **presets** — plus project-local overrides for one-off adjustments:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
block-beta
|
|
||||||
columns 1
|
|
||||||
overrides["⬆ Highest priority\nProject-Local Overrides\n.specify/templates/overrides/"]
|
|
||||||
presets["Presets — Customize core & extensions\n.specify/presets/<preset-id>/templates/"]
|
|
||||||
extensions["Extensions — Add new capabilities\n.specify/extensions/<ext-id>/templates/"]
|
|
||||||
core["Spec Kit Core — Built-in SDD commands & templates\n.specify/templates/\n⬇ Lowest priority"]
|
|
||||||
|
|
||||||
style overrides fill:transparent,stroke:#999
|
|
||||||
style presets fill:transparent,stroke:#4a9eda
|
|
||||||
style extensions fill:transparent,stroke:#4a9e4a
|
|
||||||
style core fill:transparent,stroke:#e6a817
|
|
||||||
```
|
|
||||||
|
|
||||||
**Templates** are resolved at **runtime** — Spec Kit walks the stack top-down and uses the first match. Project-local overrides (`.specify/templates/overrides/`) let you make one-off adjustments for a single project without creating a full preset. **Commands** are applied at **install time** — when you run `specify extension add` or `specify preset add`, command files are written into agent directories (e.g., `.claude/commands/`). If multiple presets or extensions provide the same command, the highest-priority version wins. On removal, the next-highest-priority version is restored automatically. If no overrides or customizations exist, Spec Kit uses its core defaults.
|
|
||||||
|
|
||||||
### Extensions — Add New Capabilities
|
|
||||||
|
|
||||||
Use **extensions** when you need functionality that goes beyond Spec Kit's core. Extensions introduce new commands and templates — for example, adding domain-specific workflows that are not covered by the built-in SDD commands, integrating with external tools, or adding entirely new development phases. They expand *what Spec Kit can do*.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Search available extensions
|
|
||||||
specify extension search
|
|
||||||
|
|
||||||
# Install an extension
|
|
||||||
specify extension add <extension-name>
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, extensions could add Jira integration, post-implementation code review, V-Model test traceability, or project health diagnostics.
|
|
||||||
|
|
||||||
See the [Extensions README](./extensions/README.md) for the full guide, the complete community catalog, and how to build and publish your own.
|
|
||||||
|
|
||||||
### Presets — Customize Existing Workflows
|
|
||||||
|
|
||||||
Use **presets** when you want to change *how* Spec Kit works without adding new capabilities. Presets override the templates and commands that ship with the core *and* with installed extensions — for example, enforcing a compliance-oriented spec format, using domain-specific terminology, or applying organizational standards to plans and tasks. They customize the artifacts and instructions that Spec Kit and its extensions produce.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Search available presets
|
|
||||||
specify preset search
|
|
||||||
|
|
||||||
# Install a preset
|
|
||||||
specify preset add <preset-name>
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, presets could restructure spec templates to require regulatory traceability, adapt the workflow to fit the methodology you use (e.g., Agile, Kanban, Waterfall, jobs-to-be-done, or domain-driven design), add mandatory security review gates to plans, enforce test-first task ordering, or localize the entire workflow to a different language. The [pirate-speak demo](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo) shows just how deep the customization can go. Multiple presets can be stacked with priority ordering.
|
|
||||||
|
|
||||||
See the [Presets README](./presets/README.md) for the full guide, including resolution order, priority, and how to create your own.
|
|
||||||
|
|
||||||
### When to Use Which
|
|
||||||
|
|
||||||
| Goal | Use |
|
|
||||||
| --- | --- |
|
|
||||||
| Add a brand-new command or workflow | Extension |
|
|
||||||
| Customize the format of specs, plans, or tasks | Preset |
|
|
||||||
| Integrate an external tool or service | Extension |
|
|
||||||
| Enforce organizational or regulatory standards | Preset |
|
|
||||||
| Ship reusable domain-specific templates | Either — presets for template overrides, extensions for templates bundled with new commands |
|
|
||||||
|
|
||||||
## 📚 Core Philosophy
|
## 📚 Core Philosophy
|
||||||
|
|
||||||
Spec-Driven Development is a structured process that emphasizes:
|
Spec-Driven Development is a structured process that emphasizes:
|
||||||
@@ -502,11 +408,11 @@ specify init <project_name> --ai copilot
|
|||||||
|
|
||||||
# Or in current directory:
|
# Or in current directory:
|
||||||
specify init . --ai claude
|
specify init . --ai claude
|
||||||
specify init . --ai codex --ai-skills
|
specify init . --ai codex
|
||||||
|
|
||||||
# or use --here flag
|
# or use --here flag
|
||||||
specify init --here --ai claude
|
specify init --here --ai claude
|
||||||
specify init --here --ai codex --ai-skills
|
specify init --here --ai codex
|
||||||
|
|
||||||
# Force merge into a non-empty current directory
|
# Force merge into a non-empty current directory
|
||||||
specify init . --force --ai claude
|
specify init . --force --ai claude
|
||||||
@@ -515,7 +421,7 @@ specify init . --force --ai claude
|
|||||||
specify init --here --force --ai claude
|
specify init --here --force --ai claude
|
||||||
```
|
```
|
||||||
|
|
||||||
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, Pi, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
specify init <project_name> --ai claude --ignore-agent-tools
|
specify init <project_name> --ai claude --ignore-agent-tools
|
||||||
|
|||||||
13
SUPPORT.md
13
SUPPORT.md
@@ -1,17 +1,18 @@
|
|||||||
# Support
|
# Support
|
||||||
|
|
||||||
## How to get help
|
## How to file issues and get help
|
||||||
|
|
||||||
Please search existing [issues](https://github.com/github/spec-kit/issues) and [discussions](https://github.com/github/spec-kit/discussions) before creating new ones to avoid duplicates.
|
This project uses GitHub issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new issue.
|
||||||
|
|
||||||
- Review the [README](./README.md) for getting started instructions and troubleshooting tips
|
For help or questions about using this project, please:
|
||||||
|
|
||||||
|
- Open a [GitHub issue](https://github.com/github/spec-kit/issues/new) for bug reports, feature requests, or questions about the Spec-Driven Development methodology
|
||||||
- Check the [comprehensive guide](./spec-driven.md) for detailed documentation on the Spec-Driven Development process
|
- Check the [comprehensive guide](./spec-driven.md) for detailed documentation on the Spec-Driven Development process
|
||||||
- Ask in [GitHub Discussions](https://github.com/github/spec-kit/discussions) for questions about using Spec Kit or the Spec-Driven Development methodology
|
- Review the [README](./README.md) for getting started instructions and troubleshooting tips
|
||||||
- Open a [GitHub issue](https://github.com/github/spec-kit/issues/new) for bug reports and feature requests
|
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
**Spec Kit** is under active development and maintained by GitHub staff and the community. We will do our best to respond to support, feature requests, and community questions as time permits.
|
**Spec Kit** is under active development and maintained by GitHub staff **AND THE COMMUNITY**. We will do our best to respond to support, feature requests, and community questions in a timely manner.
|
||||||
|
|
||||||
## GitHub Support Policy
|
## GitHub Support Policy
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
|
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
|
||||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli), [Gemini CLI](https://github.com/google-gemini/gemini-cli), or [Pi Coding Agent](https://pi.dev)
|
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli) or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||||
- [uv](https://docs.astral.sh/uv/) for package management
|
- [uv](https://docs.astral.sh/uv/) for package management
|
||||||
- [Python 3.11+](https://www.python.org/downloads/)
|
- [Python 3.11+](https://www.python.org/downloads/)
|
||||||
- [Git](https://git-scm.com/downloads)
|
- [Git](https://git-scm.com/downloads)
|
||||||
@@ -12,22 +12,18 @@
|
|||||||
|
|
||||||
### Initialize a New Project
|
### Initialize a New Project
|
||||||
|
|
||||||
The easiest way to get started is to initialize a new project. Pin a specific release tag for stability (check [Releases](https://github.com/github/spec-kit/releases) for the latest):
|
The easiest way to get started is to initialize a new project:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install from a specific stable release (recommended — replace vX.Y.Z with the latest tag)
|
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <PROJECT_NAME>
|
|
||||||
|
|
||||||
# Or install latest from main (may include unreleased changes)
|
|
||||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||||
```
|
```
|
||||||
|
|
||||||
Or initialize in the current directory:
|
Or initialize in the current directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init .
|
uvx --from git+https://github.com/github/spec-kit.git specify init .
|
||||||
# or use the --here flag
|
# or use the --here flag
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
|
uvx --from git+https://github.com/github/spec-kit.git specify init --here
|
||||||
```
|
```
|
||||||
|
|
||||||
### Specify AI Agent
|
### Specify AI Agent
|
||||||
@@ -35,11 +31,10 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
|
|||||||
You can proactively specify your AI agent during initialization:
|
You can proactively specify your AI agent during initialization:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai claude
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai gemini
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai copilot
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai codebuddy
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai codebuddy
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai pi
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Specify Script Type (Shell vs PowerShell)
|
### Specify Script Type (Shell vs PowerShell)
|
||||||
@@ -55,8 +50,8 @@ Auto behavior:
|
|||||||
Force a specific script type:
|
Force a specific script type:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --script sh
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script sh
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --script ps
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script ps
|
||||||
```
|
```
|
||||||
|
|
||||||
### Ignore Agent Tools Check
|
### Ignore Agent Tools Check
|
||||||
@@ -64,7 +59,7 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <proje
|
|||||||
If you prefer to get the templates without checking for the right tools:
|
If you prefer to get the templates without checking for the right tools:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai claude --ignore-agent-tools
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude --ignore-agent-tools
|
||||||
```
|
```
|
||||||
|
|
||||||
## Verification
|
## Verification
|
||||||
@@ -79,52 +74,6 @@ The `.specify/scripts` directory will contain both `.sh` and `.ps1` scripts.
|
|||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### Enterprise / Air-Gapped Installation
|
|
||||||
|
|
||||||
If your environment blocks access to PyPI (you see 403 errors when running `uv tool install` or `pip install`), you can create a portable wheel bundle on a connected machine and transfer it to the air-gapped target.
|
|
||||||
|
|
||||||
**Step 1: Build the wheel on a connected machine (same OS and Python version as the target)**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone the repository
|
|
||||||
git clone https://github.com/github/spec-kit.git
|
|
||||||
cd spec-kit
|
|
||||||
|
|
||||||
# Build the wheel
|
|
||||||
pip install build
|
|
||||||
python -m build --wheel --outdir dist/
|
|
||||||
|
|
||||||
# Download the wheel and all its runtime dependencies
|
|
||||||
pip download -d dist/ dist/specify_cli-*.whl
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Important:** `pip download` resolves platform-specific wheels (e.g., PyYAML includes native extensions). You must run this step on a machine with the **same OS and Python version** as the air-gapped target. If you need to support multiple platforms, repeat this step on each target OS (Linux, macOS, Windows) and Python version.
|
|
||||||
|
|
||||||
**Step 2: Transfer the `dist/` directory to the air-gapped machine**
|
|
||||||
|
|
||||||
Copy the entire `dist/` directory (which contains the `specify-cli` wheel and all dependency wheels) to the target machine via USB, network share, or other approved transfer method.
|
|
||||||
|
|
||||||
**Step 3: Install on the air-gapped machine**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install --no-index --find-links=./dist specify-cli
|
|
||||||
```
|
|
||||||
|
|
||||||
**Step 4: Initialize a project (no network required)**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize a project — no GitHub access needed
|
|
||||||
specify init my-project --ai claude --offline
|
|
||||||
```
|
|
||||||
|
|
||||||
The `--offline` flag tells the CLI to use the templates, commands, and scripts bundled inside the wheel instead of downloading from GitHub.
|
|
||||||
|
|
||||||
> **Deprecation notice:** Starting with v0.6.0, `specify init` will use bundled assets by default and the `--offline` flag will be removed. The GitHub download path will be retired because bundled assets eliminate the need for network access, avoid proxy/firewall issues, and guarantee that templates always match the installed CLI version. No action will be needed — `specify init` will simply work without network access out of the box.
|
|
||||||
|
|
||||||
> **Note:** Python 3.11+ is required.
|
|
||||||
|
|
||||||
> **Windows note:** Offline scaffolding requires PowerShell 7+ (`pwsh`), not Windows PowerShell 5.x (`powershell.exe`). Install from https://aka.ms/powershell.
|
|
||||||
|
|
||||||
### Git Credential Manager on Linux
|
### Git Credential Manager on Linux
|
||||||
|
|
||||||
If you're having issues with Git authentication on Linux, you can install Git Credential Manager:
|
If you're having issues with Git authentication on Linux, you can install Git Credential Manager:
|
||||||
|
|||||||
@@ -173,6 +173,6 @@ Finally, implement the solution:
|
|||||||
|
|
||||||
## Next Steps
|
## Next Steps
|
||||||
|
|
||||||
- Read the [complete methodology](https://github.com/github/spec-kit/blob/main/spec-driven.md) for in-depth guidance
|
- Read the [complete methodology](../spec-driven.md) for in-depth guidance
|
||||||
- Check out [more examples](https://github.com/github/spec-kit/tree/main/templates) in the repository
|
- Check out [more examples](../templates) in the repository
|
||||||
- Explore the [source code on GitHub](https://github.com/github/spec-kit)
|
- Explore the [source code on GitHub](https://github.com/github/spec-kit)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
| What to Upgrade | Command | When to Use |
|
| What to Upgrade | Command | When to Use |
|
||||||
|----------------|---------|-------------|
|
|----------------|---------|-------------|
|
||||||
| **CLI Tool Only** | `uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z` | Get latest CLI features without touching project files |
|
| **CLI Tool Only** | `uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git` | Get latest CLI features without touching project files |
|
||||||
| **Project Files** | `specify init --here --force --ai <your-agent>` | Update slash commands, templates, and scripts in your project |
|
| **Project Files** | `specify init --here --force --ai <your-agent>` | Update slash commands, templates, and scripts in your project |
|
||||||
| **Both** | Run CLI upgrade, then project update | Recommended for major version updates |
|
| **Both** | Run CLI upgrade, then project update | Recommended for major version updates |
|
||||||
|
|
||||||
@@ -20,18 +20,16 @@ The CLI tool (`specify`) is separate from your project files. Upgrade it to get
|
|||||||
|
|
||||||
### If you installed with `uv tool install`
|
### If you installed with `uv tool install`
|
||||||
|
|
||||||
Upgrade to a specific release (check [Releases](https://github.com/github/spec-kit/releases) for the latest tag):
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z
|
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git
|
||||||
```
|
```
|
||||||
|
|
||||||
### If you use one-shot `uvx` commands
|
### If you use one-shot `uvx` commands
|
||||||
|
|
||||||
Specify the desired release tag:
|
No upgrade needed—`uvx` always fetches the latest version. Just run your commands as normal:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here --ai copilot
|
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai copilot
|
||||||
```
|
```
|
||||||
|
|
||||||
### Verify the upgrade
|
### Verify the upgrade
|
||||||
@@ -291,9 +289,8 @@ This tells Spec Kit which feature directory to use when creating specs, plans, a
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
ls -la .claude/commands/ # Claude Code
|
ls -la .claude/commands/ # Claude Code
|
||||||
ls -la .gemini/commands/ # Gemini
|
ls -la .gemini/commands/ # Gemini
|
||||||
ls -la .cursor/commands/ # Cursor
|
ls -la .cursor/commands/ # Cursor
|
||||||
ls -la .pi/prompts/ # Pi Coding Agent
|
|
||||||
```
|
```
|
||||||
|
|
||||||
3. **Check agent-specific setup:**
|
3. **Check agent-specific setup:**
|
||||||
@@ -401,7 +398,7 @@ The `specify` CLI tool is used for:
|
|||||||
- **Upgrades:** `specify init --here --force` to update templates and commands
|
- **Upgrades:** `specify init --here --force` to update templates and commands
|
||||||
- **Diagnostics:** `specify check` to verify tool installation
|
- **Diagnostics:** `specify check` to verify tool installation
|
||||||
|
|
||||||
Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/speckit.plan`, etc.) are **permanently installed** in your project's agent folder (`.claude/`, `.github/prompts/`, `.pi/prompts/`, etc.). Your AI assistant reads these command files directly—no need to run `specify` again.
|
Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/speckit.plan`, etc.) are **permanently installed** in your project's agent folder (`.claude/`, `.github/prompts/`, etc.). Your AI assistant reads these command files directly—no need to run `specify` again.
|
||||||
|
|
||||||
**If your agent isn't recognizing slash commands:**
|
**If your agent isn't recognizing slash commands:**
|
||||||
|
|
||||||
@@ -413,9 +410,6 @@ Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/s
|
|||||||
|
|
||||||
# For Claude
|
# For Claude
|
||||||
ls -la .claude/commands/
|
ls -la .claude/commands/
|
||||||
|
|
||||||
# For Pi
|
|
||||||
ls -la .pi/prompts/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
2. **Restart your IDE/editor completely** (not just reload window)
|
2. **Restart your IDE/editor completely** (not just reload window)
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ provides:
|
|||||||
required: boolean # Default: false
|
required: boolean # Default: false
|
||||||
|
|
||||||
hooks: # Optional, event hooks
|
hooks: # Optional, event hooks
|
||||||
event_name: # e.g., "after_specify", "after_plan", "after_tasks", "after_implement"
|
event_name: # e.g., "after_tasks", "after_implement"
|
||||||
command: string # Command to execute
|
command: string # Command to execute
|
||||||
optional: boolean # Default: true
|
optional: boolean # Default: true
|
||||||
prompt: string # Prompt text for optional hooks
|
prompt: string # Prompt text for optional hooks
|
||||||
@@ -108,7 +108,7 @@ defaults: # Optional, default configuration values
|
|||||||
#### `hooks`
|
#### `hooks`
|
||||||
|
|
||||||
- **Type**: object
|
- **Type**: object
|
||||||
- **Keys**: Event names (e.g., `after_specify`, `after_plan`, `after_tasks`, `after_implement`, `before_commit`)
|
- **Keys**: Event names (e.g., `after_tasks`, `after_implement`, `before_commit`)
|
||||||
- **Description**: Hooks that execute at lifecycle events
|
- **Description**: Hooks that execute at lifecycle events
|
||||||
- **Events**: Defined by core spec-kit commands
|
- **Events**: Defined by core spec-kit commands
|
||||||
|
|
||||||
@@ -551,16 +551,10 @@ hooks:
|
|||||||
|
|
||||||
Standard events (defined by core):
|
Standard events (defined by core):
|
||||||
|
|
||||||
- `before_specify` - Before specification generation
|
|
||||||
- `after_specify` - After specification generation
|
|
||||||
- `before_plan` - Before implementation planning
|
|
||||||
- `after_plan` - After implementation planning
|
|
||||||
- `before_tasks` - Before task generation
|
|
||||||
- `after_tasks` - After task generation
|
- `after_tasks` - After task generation
|
||||||
- `before_implement` - Before implementation
|
|
||||||
- `after_implement` - After implementation
|
- `after_implement` - After implementation
|
||||||
- `before_commit` - Before git commit *(planned - not yet wired into core templates)*
|
- `before_commit` - Before git commit
|
||||||
- `after_commit` - After git commit *(planned - not yet wired into core templates)*
|
- `after_commit` - After git commit
|
||||||
|
|
||||||
### Hook Configuration
|
### Hook Configuration
|
||||||
|
|
||||||
|
|||||||
@@ -209,22 +209,9 @@ Edit `extensions/catalog.community.json` and add your extension:
|
|||||||
Add your extension to the Available Extensions table in `extensions/README.md`:
|
Add your extension to the Available Extensions table in `extensions/README.md`:
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
| Your Extension Name | Brief description of what it does | `<category>` | <effect> | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
|
| Your Extension Name | Brief description of what it does | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
|
||||||
```
|
```
|
||||||
|
|
||||||
**(Table) Category** — pick the one that best fits your extension:
|
|
||||||
|
|
||||||
- `docs` — reads, validates, or generates spec artifacts
|
|
||||||
- `code` — reviews, validates, or modifies source code
|
|
||||||
- `process` — orchestrates workflow across phases
|
|
||||||
- `integration` — syncs with external platforms
|
|
||||||
- `visibility` — reports on project health or progress
|
|
||||||
|
|
||||||
**Effect** — choose one:
|
|
||||||
|
|
||||||
- Read-only — produces reports without modifying files
|
|
||||||
- Read+Write — modifies files, creates artifacts, or updates specs
|
|
||||||
|
|
||||||
Insert your extension in alphabetical order in the table.
|
Insert your extension in alphabetical order in the table.
|
||||||
|
|
||||||
### 4. Submit Pull Request
|
### 4. Submit Pull Request
|
||||||
|
|||||||
@@ -387,9 +387,6 @@ settings:
|
|||||||
auto_execute_hooks: true
|
auto_execute_hooks: true
|
||||||
|
|
||||||
# Hook configuration
|
# Hook configuration
|
||||||
# Available events: before_specify, after_specify, before_plan, after_plan,
|
|
||||||
# before_tasks, after_tasks, before_implement, after_implement
|
|
||||||
# Planned (not yet wired into core templates): before_commit, after_commit
|
|
||||||
hooks:
|
hooks:
|
||||||
after_tasks:
|
after_tasks:
|
||||||
- extension: jira
|
- extension: jira
|
||||||
@@ -435,26 +432,6 @@ Spec Kit uses a **catalog stack** — an ordered list of catalogs searched simul
|
|||||||
specify extension catalog list
|
specify extension catalog list
|
||||||
```
|
```
|
||||||
|
|
||||||
### Managing Catalogs via CLI
|
|
||||||
|
|
||||||
You can view the main catalog management commands using `--help`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
specify extension catalog --help
|
|
||||||
|
|
||||||
Usage: specify extension catalog [OPTIONS] COMMAND [ARGS]...
|
|
||||||
|
|
||||||
Manage extension catalogs
|
|
||||||
╭─ Options ────────────────────────────────────────────────────────────────────────╮
|
|
||||||
│ --help Show this message and exit. │
|
|
||||||
╰──────────────────────────────────────────────────────────────────────────────────╯
|
|
||||||
╭─ Commands ───────────────────────────────────────────────────────────────────────╮
|
|
||||||
│ list List all active extension catalogs. │
|
|
||||||
│ add Add a catalog to .specify/extension-catalogs.yml. │
|
|
||||||
│ remove Remove a catalog from .specify/extension-catalogs.yml. │
|
|
||||||
╰──────────────────────────────────────────────────────────────────────────────────╯
|
|
||||||
```
|
|
||||||
|
|
||||||
### Adding a Catalog (Project-scoped)
|
### Adding a Catalog (Project-scoped)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
@@ -70,34 +70,19 @@ specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/ta
|
|||||||
|
|
||||||
The following community-contributed extensions are available in [`catalog.community.json`](catalog.community.json):
|
The following community-contributed extensions are available in [`catalog.community.json`](catalog.community.json):
|
||||||
|
|
||||||
**Categories:** `docs` — reads, validates, or generates spec artifacts · `code` — reviews, validates, or modifies source code · `process` — orchestrates workflow across phases · `integration` — syncs with external platforms · `visibility` — reports on project health or progress
|
| Extension | Purpose | URL |
|
||||||
|
|-----------|---------|-----|
|
||||||
**Effect:** `Read-only` — produces reports without modifying files · `Read+Write` — modifies files, creates artifacts, or updates specs
|
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||||
|
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||||
| Extension | Purpose | Category | Effect | URL |
|
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||||
|-----------|---------|----------|--------|-----|
|
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||||
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
|
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||||
| Cognitive Squad | Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing | `docs` | Read+Write | [cognitive-squad](https://github.com/Testimonial/cognitive-squad) |
|
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||||
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
|
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | [understanding](https://github.com/Testimonial/understanding) |
|
||||||
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
|
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
|
||||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
|
||||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
|
||||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
|
||||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
|
||||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
|
||||||
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
|
|
||||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
|
||||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
|
||||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
|
||||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
|
||||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
|
||||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
|
||||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
|
||||||
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
|
|
||||||
|
|
||||||
|
|
||||||
## Adding Your Extension
|
## Adding Your Extension
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
# RFC: Spec Kit Extension System
|
# RFC: Spec Kit Extension System
|
||||||
|
|
||||||
**Status**: Implemented
|
**Status**: Draft
|
||||||
**Author**: Stats Perform Engineering
|
**Author**: Stats Perform Engineering
|
||||||
**Created**: 2026-01-28
|
**Created**: 2026-01-28
|
||||||
**Updated**: 2026-03-11
|
**Updated**: 2026-01-28
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -24,9 +24,8 @@
|
|||||||
13. [Security Considerations](#security-considerations)
|
13. [Security Considerations](#security-considerations)
|
||||||
14. [Migration Strategy](#migration-strategy)
|
14. [Migration Strategy](#migration-strategy)
|
||||||
15. [Implementation Phases](#implementation-phases)
|
15. [Implementation Phases](#implementation-phases)
|
||||||
16. [Resolved Questions](#resolved-questions)
|
16. [Open Questions](#open-questions)
|
||||||
17. [Open Questions (Remaining)](#open-questions-remaining)
|
17. [Appendices](#appendices)
|
||||||
18. [Appendices](#appendices)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -359,15 +358,12 @@ specify extension add jira
|
|||||||
"installed_at": "2026-01-28T14:30:00Z",
|
"installed_at": "2026-01-28T14:30:00Z",
|
||||||
"source": "catalog",
|
"source": "catalog",
|
||||||
"manifest_hash": "sha256:abc123...",
|
"manifest_hash": "sha256:abc123...",
|
||||||
"enabled": true,
|
"enabled": true
|
||||||
"priority": 10
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Priority Field**: Extensions are ordered by `priority` (lower = higher precedence). Default is 10. Used for template resolution when multiple extensions provide the same template.
|
|
||||||
|
|
||||||
### 3. Configuration
|
### 3. Configuration
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -1087,15 +1083,11 @@ List installed extensions in current project.
|
|||||||
$ specify extension list
|
$ specify extension list
|
||||||
|
|
||||||
Installed Extensions:
|
Installed Extensions:
|
||||||
✓ Jira Integration (v1.0.0)
|
✓ jira (v1.0.0) - Jira Integration
|
||||||
jira
|
Commands: 3 | Hooks: 2 | Status: Enabled
|
||||||
Create Jira issues from spec-kit artifacts
|
|
||||||
Commands: 3 | Hooks: 2 | Priority: 10 | Status: Enabled
|
|
||||||
|
|
||||||
✓ Linear Integration (v0.9.0)
|
✓ linear (v0.9.0) - Linear Integration
|
||||||
linear
|
Commands: 1 | Hooks: 1 | Status: Enabled
|
||||||
Create Linear issues from spec-kit artifacts
|
|
||||||
Commands: 1 | Hooks: 1 | Priority: 10 | Status: Enabled
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Options:**
|
**Options:**
|
||||||
@@ -1203,9 +1195,10 @@ Next steps:
|
|||||||
|
|
||||||
**Options:**
|
**Options:**
|
||||||
|
|
||||||
- `--from URL`: Install from a remote URL (archive). Does not accept Git repositories directly.
|
- `--from URL`: Install from custom URL or Git repo
|
||||||
- `--dev`: Install from a local path in development mode (the PATH is the positional `extension` argument).
|
- `--version VERSION`: Install specific version
|
||||||
- `--priority NUMBER`: Set resolution priority (lower = higher precedence, default 10)
|
- `--dev PATH`: Install from local path (development mode)
|
||||||
|
- `--no-register`: Skip command registration (manual setup)
|
||||||
|
|
||||||
#### `specify extension remove NAME`
|
#### `specify extension remove NAME`
|
||||||
|
|
||||||
@@ -1286,29 +1279,6 @@ $ specify extension disable jira
|
|||||||
To re-enable: specify extension enable jira
|
To re-enable: specify extension enable jira
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `specify extension set-priority NAME PRIORITY`
|
|
||||||
|
|
||||||
Change the resolution priority of an installed extension.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ specify extension set-priority jira 5
|
|
||||||
|
|
||||||
✓ Extension 'Jira Integration' priority changed: 10 → 5
|
|
||||||
|
|
||||||
Lower priority = higher precedence in template resolution
|
|
||||||
```
|
|
||||||
|
|
||||||
**Priority Values:**
|
|
||||||
|
|
||||||
- Lower numbers = higher precedence (checked first in resolution)
|
|
||||||
- Default priority is 10
|
|
||||||
- Must be a positive integer (1 or higher)
|
|
||||||
|
|
||||||
**Use Cases:**
|
|
||||||
|
|
||||||
- Ensure a critical extension's templates take precedence
|
|
||||||
- Override default resolution order when multiple extensions provide similar templates
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Compatibility & Versioning
|
## Compatibility & Versioning
|
||||||
@@ -1534,225 +1504,203 @@ AI agent registers both names, so old scripts work.
|
|||||||
|
|
||||||
## Implementation Phases
|
## Implementation Phases
|
||||||
|
|
||||||
### Phase 1: Core Extension System ✅ COMPLETED
|
### Phase 1: Core Extension System (Week 1-2)
|
||||||
|
|
||||||
**Goal**: Basic extension infrastructure
|
**Goal**: Basic extension infrastructure
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [x] Extension manifest schema (`extension.yml`)
|
- [ ] Extension manifest schema (`extension.yml`)
|
||||||
- [x] Extension directory structure
|
- [ ] Extension directory structure
|
||||||
- [x] CLI commands:
|
- [ ] CLI commands:
|
||||||
- [x] `specify extension list`
|
- [ ] `specify extension list`
|
||||||
- [x] `specify extension add` (from URL and local `--dev`)
|
- [ ] `specify extension add` (from URL)
|
||||||
- [x] `specify extension remove`
|
- [ ] `specify extension remove`
|
||||||
- [x] Extension registry (`.specify/extensions/.registry`)
|
- [ ] Extension registry (`.specify/extensions/.registry`)
|
||||||
- [x] Command registration (Claude and 15+ other agents)
|
- [ ] Command registration (Claude only initially)
|
||||||
- [x] Basic validation (manifest schema, compatibility)
|
- [ ] Basic validation (manifest schema, compatibility)
|
||||||
- [x] Documentation (extension development guide)
|
- [ ] Documentation (extension development guide)
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [x] Unit tests for manifest parsing
|
- [ ] Unit tests for manifest parsing
|
||||||
- [x] Integration test: Install dummy extension
|
- [ ] Integration test: Install dummy extension
|
||||||
- [x] Integration test: Register commands with Claude
|
- [ ] Integration test: Register commands with Claude
|
||||||
|
|
||||||
### Phase 2: Jira Extension ✅ COMPLETED
|
### Phase 2: Jira Extension (Week 3)
|
||||||
|
|
||||||
**Goal**: First production extension
|
**Goal**: First production extension
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [x] Create `spec-kit-jira` repository
|
- [ ] Create `spec-kit-jira` repository
|
||||||
- [x] Port Jira functionality to extension
|
- [ ] Port Jira functionality to extension
|
||||||
- [x] Create `jira-config.yml` template
|
- [ ] Create `jira-config.yml` template
|
||||||
- [x] Commands:
|
- [ ] Commands:
|
||||||
- [x] `specstoissues.md`
|
- [ ] `specstoissues.md`
|
||||||
- [x] `discover-fields.md`
|
- [ ] `discover-fields.md`
|
||||||
- [x] `sync-status.md`
|
- [ ] `sync-status.md`
|
||||||
- [x] Helper scripts
|
- [ ] Helper scripts
|
||||||
- [x] Documentation (README, configuration guide, examples)
|
- [ ] Documentation (README, configuration guide, examples)
|
||||||
- [x] Release v3.0.0
|
- [ ] Release v1.0.0
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [x] Test on `eng-msa-ts` project
|
- [ ] Test on `eng-msa-ts` project
|
||||||
- [x] Verify spec→Epic, phase→Story, task→Issue mapping
|
- [ ] Verify spec→Epic, phase→Story, task→Issue mapping
|
||||||
- [x] Test configuration loading and validation
|
- [ ] Test configuration loading and validation
|
||||||
- [x] Test custom field application
|
- [ ] Test custom field application
|
||||||
|
|
||||||
### Phase 3: Extension Catalog ✅ COMPLETED
|
### Phase 3: Extension Catalog (Week 4)
|
||||||
|
|
||||||
**Goal**: Discovery and distribution
|
**Goal**: Discovery and distribution
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [x] Central catalog (`extensions/catalog.json` in spec-kit repo)
|
- [ ] Central catalog (`extensions/catalog.json` in spec-kit repo)
|
||||||
- [x] Community catalog (`extensions/catalog.community.json`)
|
- [ ] Catalog fetch and parsing
|
||||||
- [x] Catalog fetch and parsing with multi-catalog support
|
- [ ] CLI commands:
|
||||||
- [x] CLI commands:
|
- [ ] `specify extension search`
|
||||||
- [x] `specify extension search`
|
- [ ] `specify extension info`
|
||||||
- [x] `specify extension info`
|
- [ ] Catalog publishing process (GitHub Action)
|
||||||
- [x] `specify extension catalog list`
|
- [ ] Documentation (how to publish extensions)
|
||||||
- [x] `specify extension catalog add`
|
|
||||||
- [x] `specify extension catalog remove`
|
|
||||||
- [x] Documentation (how to publish extensions)
|
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [x] Test catalog fetch
|
- [ ] Test catalog fetch
|
||||||
- [x] Test extension search/filtering
|
- [ ] Test extension search/filtering
|
||||||
- [x] Test catalog caching
|
- [ ] Test catalog caching
|
||||||
- [x] Test multi-catalog merge with priority
|
|
||||||
|
|
||||||
### Phase 4: Advanced Features ✅ COMPLETED
|
### Phase 4: Advanced Features (Week 5-6)
|
||||||
|
|
||||||
**Goal**: Hooks, updates, multi-agent support
|
**Goal**: Hooks, updates, multi-agent support
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [x] Hook system (`hooks` in extension.yml)
|
- [ ] Hook system (`hooks` in extension.yml)
|
||||||
- [x] Hook registration and execution
|
- [ ] Hook registration and execution
|
||||||
- [x] Project extensions config (`.specify/extensions.yml`)
|
- [ ] Project extensions config (`.specify/extensions.yml`)
|
||||||
- [x] CLI commands:
|
- [ ] CLI commands:
|
||||||
- [x] `specify extension update` (with atomic backup/restore)
|
- [ ] `specify extension update`
|
||||||
- [x] `specify extension enable/disable`
|
- [ ] `specify extension enable/disable`
|
||||||
- [x] Command registration for multiple agents (15+ agents including Claude, Copilot, Gemini, Cursor, etc.)
|
- [ ] Command registration for multiple agents (Gemini, Copilot)
|
||||||
- [x] Extension update notifications (version comparison)
|
- [ ] Extension update notifications
|
||||||
- [x] Configuration layer resolution (project, local, env)
|
- [ ] Configuration layer resolution (project, local, env)
|
||||||
|
|
||||||
**Additional features implemented beyond original RFC**:
|
|
||||||
|
|
||||||
- [x] **Display name resolution**: All commands accept extension display names in addition to IDs
|
|
||||||
- [x] **Ambiguous name handling**: User-friendly tables when multiple extensions match a name
|
|
||||||
- [x] **Atomic update with rollback**: Full backup of extension dir, commands, hooks, and registry with automatic rollback on failure
|
|
||||||
- [x] **Pre-install ID validation**: Validates extension ID from ZIP before installing (security)
|
|
||||||
- [x] **Enabled state preservation**: Disabled extensions stay disabled after update
|
|
||||||
- [x] **Registry update/restore methods**: Clean API for enable/disable and rollback operations
|
|
||||||
- [x] **Catalog error fallback**: `extension info` falls back to local info when catalog unavailable
|
|
||||||
- [x] **`_install_allowed` flag**: Discovery-only catalogs can't be used for installation
|
|
||||||
- [x] **Cache invalidation**: Cache invalidated when `SPECKIT_CATALOG_URL` changes
|
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [x] Test hooks in core commands
|
- [ ] Test hooks in core commands
|
||||||
- [x] Test extension updates (preserve config)
|
- [ ] Test extension updates (preserve config)
|
||||||
- [x] Test multi-agent registration
|
- [ ] Test multi-agent registration
|
||||||
- [x] Test atomic rollback on update failure
|
|
||||||
- [x] Test enabled state preservation
|
|
||||||
- [x] Test display name resolution
|
|
||||||
|
|
||||||
### Phase 5: Polish & Documentation ✅ COMPLETED
|
### Phase 5: Polish & Documentation (Week 7)
|
||||||
|
|
||||||
**Goal**: Production ready
|
**Goal**: Production ready
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [x] Comprehensive documentation:
|
- [ ] Comprehensive documentation:
|
||||||
- [x] User guide (EXTENSION-USER-GUIDE.md)
|
- [ ] User guide (installing/using extensions)
|
||||||
- [x] Extension development guide (EXTENSION-DEV-GUIDE.md)
|
- [ ] Extension development guide
|
||||||
- [x] Extension API reference (EXTENSION-API-REFERENCE.md)
|
- [ ] Extension API reference
|
||||||
- [x] Error messages and validation improvements
|
- [ ] Migration guide (core → extension)
|
||||||
- [x] CLI help text updates
|
- [ ] Error messages and validation improvements
|
||||||
|
- [ ] CLI help text updates
|
||||||
|
- [ ] Example extension template (cookiecutter)
|
||||||
|
- [ ] Blog post / announcement
|
||||||
|
- [ ] Video tutorial
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [x] End-to-end testing on multiple projects
|
- [ ] End-to-end testing on multiple projects
|
||||||
- [x] 163 unit tests passing
|
- [ ] Community beta testing
|
||||||
|
- [ ] Performance testing (large projects)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Resolved Questions
|
## Open Questions
|
||||||
|
|
||||||
The following questions from the original RFC have been resolved during implementation:
|
### 1. Extension Namespace
|
||||||
|
|
||||||
### 1. Extension Namespace ✅ RESOLVED
|
|
||||||
|
|
||||||
**Question**: Should extension commands use namespace prefix?
|
**Question**: Should extension commands use namespace prefix?
|
||||||
|
|
||||||
**Decision**: **Option C** - Both prefixed and aliases are supported. Commands use `speckit.{extension}.{command}` as canonical name, with optional aliases defined in manifest.
|
**Options**:
|
||||||
|
|
||||||
**Implementation**: The `aliases` field in `extension.yml` allows extensions to register additional command names.
|
- A) Prefixed: `/speckit.jira.specstoissues` (explicit, avoids conflicts)
|
||||||
|
- B) Short alias: `/jira.specstoissues` (shorter, less verbose)
|
||||||
|
- C) Both: Register both names, prefer prefixed in docs
|
||||||
|
|
||||||
|
**Recommendation**: C (both), prefixed is canonical
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 2. Config File Location ✅ RESOLVED
|
### 2. Config File Location
|
||||||
|
|
||||||
**Question**: Where should extension configs live?
|
**Question**: Where should extension configs live?
|
||||||
|
|
||||||
**Decision**: **Option A** - Extension directory (`.specify/extensions/{ext-id}/{ext-id}-config.yml`). This keeps extensions self-contained and easier to manage.
|
**Options**:
|
||||||
|
|
||||||
**Implementation**: Each extension has its own config file within its directory, with layered resolution (defaults → project → local → env vars).
|
- A) Extension directory: `.specify/extensions/jira/jira-config.yml` (encapsulated)
|
||||||
|
- B) Root level: `.specify/jira-config.yml` (more visible)
|
||||||
|
- C) Unified: `.specify/extensions.yml` (all extension configs in one file)
|
||||||
|
|
||||||
|
**Recommendation**: A (extension directory), cleaner separation
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 3. Command File Format ✅ RESOLVED
|
### 3. Command File Format
|
||||||
|
|
||||||
**Question**: Should extensions use universal format or agent-specific?
|
**Question**: Should extensions use universal format or agent-specific?
|
||||||
|
|
||||||
**Decision**: **Option A** - Universal Markdown format. Extensions write commands once, CLI converts to agent-specific format during registration.
|
**Options**:
|
||||||
|
|
||||||
**Implementation**: `CommandRegistrar` class handles conversion to 15+ agent formats (Claude, Copilot, Gemini, Cursor, etc.).
|
- A) Universal Markdown: Extensions write once, CLI converts per-agent
|
||||||
|
- B) Agent-specific: Extensions provide separate files for each agent
|
||||||
|
- C) Hybrid: Universal default, agent-specific overrides
|
||||||
|
|
||||||
|
**Recommendation**: A (universal), reduces duplication
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 4. Hook Execution Model ✅ RESOLVED
|
### 4. Hook Execution Model
|
||||||
|
|
||||||
**Question**: How should hooks execute?
|
**Question**: How should hooks execute?
|
||||||
|
|
||||||
**Decision**: **Option A** - Hooks are registered in `.specify/extensions.yml` and executed by the AI agent when it sees the hook trigger. Hook state (enabled/disabled) is managed per-extension.
|
**Options**:
|
||||||
|
|
||||||
**Implementation**: `HookExecutor` class manages hook registration and state in `extensions.yml`.
|
- A) AI agent interprets: Core commands output `EXECUTE_COMMAND: name`
|
||||||
|
- B) CLI executes: Core commands call `specify extension hook after_tasks`
|
||||||
|
- C) Agent built-in: Extension system built into AI agent (Claude SDK)
|
||||||
|
|
||||||
|
**Recommendation**: A initially (simpler), move to C long-term
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 5. Extension Distribution ✅ RESOLVED
|
### 5. Extension Distribution
|
||||||
|
|
||||||
**Question**: How should extensions be packaged?
|
**Question**: How should extensions be packaged?
|
||||||
|
|
||||||
**Decision**: **Option A** - ZIP archives downloaded from GitHub releases (via catalog `download_url`). Local development uses `--dev` flag with directory path.
|
**Options**:
|
||||||
|
|
||||||
**Implementation**: `ExtensionManager.install_from_zip()` handles ZIP extraction and validation.
|
- A) ZIP archives: Downloaded from GitHub releases
|
||||||
|
- B) Git repos: Cloned directly (`git clone`)
|
||||||
|
- C) Python packages: Installable via `uv tool install`
|
||||||
|
|
||||||
|
**Recommendation**: A (ZIP), simpler for non-Python extensions in future
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 6. Multi-Version Support ✅ RESOLVED
|
### 6. Multi-Version Support
|
||||||
|
|
||||||
**Question**: Can multiple versions of same extension coexist?
|
**Question**: Can multiple versions of same extension coexist?
|
||||||
|
|
||||||
**Decision**: **Option A** - Single version only. Updates replace the existing version with atomic rollback on failure.
|
|
||||||
|
|
||||||
**Implementation**: `extension update` performs atomic backup/restore to ensure safe updates.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Open Questions (Remaining)
|
|
||||||
|
|
||||||
### 1. Sandboxing / Permissions (Future)
|
|
||||||
|
|
||||||
**Question**: Should extensions declare required permissions?
|
|
||||||
|
|
||||||
**Options**:
|
**Options**:
|
||||||
|
|
||||||
- A) No sandboxing (current): Extensions run with same privileges as AI agent
|
- A) Single version: Only one version installed at a time
|
||||||
- B) Permission declarations: Extensions declare `filesystem:read`, `network:external`, etc.
|
- B) Multi-version: Side-by-side versions (`.specify/extensions/jira@1.0/`, `.specify/extensions/jira@2.0/`)
|
||||||
- C) Opt-in sandboxing: Organizations can enable permission enforcement
|
- C) Per-branch: Different branches use different versions
|
||||||
|
|
||||||
**Status**: Deferred to future version. Currently using trust-based model where users trust extension authors.
|
**Recommendation**: A initially (simpler), consider B in future if needed
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2. Package Signatures (Future)
|
|
||||||
|
|
||||||
**Question**: Should extensions be cryptographically signed?
|
|
||||||
|
|
||||||
**Options**:
|
|
||||||
|
|
||||||
- A) No signatures (current): Trust based on catalog source
|
|
||||||
- B) GPG/Sigstore signatures: Verify package integrity
|
|
||||||
- C) Catalog-level verification: Catalog maintainers verify packages
|
|
||||||
|
|
||||||
**Status**: Deferred to future version. `checksum` field is available in catalog schema but not enforced.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -1,39 +1,8 @@
|
|||||||
{
|
{
|
||||||
"schema_version": "1.0",
|
"schema_version": "1.0",
|
||||||
"updated_at": "2026-03-19T12:08:20Z",
|
"updated_at": "2026-03-09T00:00:00Z",
|
||||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||||
"extensions": {
|
"extensions": {
|
||||||
"archive": {
|
|
||||||
"name": "Archive Extension",
|
|
||||||
"id": "archive",
|
|
||||||
"description": "Archive merged features into main project memory, resolving gaps and conflicts.",
|
|
||||||
"author": "Stanislav Deviatov",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/stn1slv/spec-kit-archive/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/stn1slv/spec-kit-archive",
|
|
||||||
"homepage": "https://github.com/stn1slv/spec-kit-archive",
|
|
||||||
"documentation": "https://github.com/stn1slv/spec-kit-archive/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/stn1slv/spec-kit-archive/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 1,
|
|
||||||
"hooks": 0
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"archive",
|
|
||||||
"memory",
|
|
||||||
"merge",
|
|
||||||
"changelog"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-14T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-14T00:00:00Z"
|
|
||||||
},
|
|
||||||
"azure-devops": {
|
"azure-devops": {
|
||||||
"name": "Azure DevOps Integration",
|
"name": "Azure DevOps Integration",
|
||||||
"id": "azure-devops",
|
"id": "azure-devops",
|
||||||
@@ -105,153 +74,6 @@
|
|||||||
"created_at": "2026-02-22T00:00:00Z",
|
"created_at": "2026-02-22T00:00:00Z",
|
||||||
"updated_at": "2026-02-22T00:00:00Z"
|
"updated_at": "2026-02-22T00:00:00Z"
|
||||||
},
|
},
|
||||||
"cognitive-squad": {
|
|
||||||
"name": "Cognitive Squad",
|
|
||||||
"id": "cognitive-squad",
|
|
||||||
"description": "Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing",
|
|
||||||
"author": "Testimonial",
|
|
||||||
"version": "0.1.0",
|
|
||||||
"download_url": "https://github.com/Testimonial/cognitive-squad/archive/refs/tags/v0.1.0.zip",
|
|
||||||
"repository": "https://github.com/Testimonial/cognitive-squad",
|
|
||||||
"homepage": "https://github.com/Testimonial/cognitive-squad",
|
|
||||||
"documentation": "https://github.com/Testimonial/cognitive-squad/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/Testimonial/cognitive-squad/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.3.0",
|
|
||||||
"tools": [
|
|
||||||
{
|
|
||||||
"name": "understanding",
|
|
||||||
"version": ">=3.4.0",
|
|
||||||
"required": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "spec-kit-reverse-eng",
|
|
||||||
"version": ">=1.0.0",
|
|
||||||
"required": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 10,
|
|
||||||
"hooks": 1
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"ai-agents",
|
|
||||||
"cognitive",
|
|
||||||
"full-lifecycle",
|
|
||||||
"verification",
|
|
||||||
"multi-agent"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-16T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-18T00:00:00Z"
|
|
||||||
},
|
|
||||||
"conduct": {
|
|
||||||
"name": "Conduct Extension",
|
|
||||||
"id": "conduct",
|
|
||||||
"description": "Executes a single spec-kit phase via sub-agent delegation to reduce context pollution.",
|
|
||||||
"author": "twbrandon7",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/twbrandon7/spec-kit-conduct-ext/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/twbrandon7/spec-kit-conduct-ext",
|
|
||||||
"homepage": "https://github.com/twbrandon7/spec-kit-conduct-ext",
|
|
||||||
"documentation": "https://github.com/twbrandon7/spec-kit-conduct-ext/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/twbrandon7/spec-kit-conduct-ext/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.3.1"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 1,
|
|
||||||
"hooks": 0
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"conduct",
|
|
||||||
"workflow",
|
|
||||||
"automation"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-19T12:08:20Z",
|
|
||||||
"updated_at": "2026-03-19T12:08:20Z"
|
|
||||||
},
|
|
||||||
"docguard": {
|
|
||||||
"name": "DocGuard \u2014 CDD Enforcement",
|
|
||||||
"id": "docguard",
|
|
||||||
"description": "Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies.",
|
|
||||||
"author": "raccioly",
|
|
||||||
"version": "0.9.11",
|
|
||||||
"download_url": "https://github.com/raccioly/docguard/releases/download/v0.9.11/spec-kit-docguard-v0.9.11.zip",
|
|
||||||
"repository": "https://github.com/raccioly/docguard",
|
|
||||||
"homepage": "https://www.npmjs.com/package/docguard-cli",
|
|
||||||
"documentation": "https://github.com/raccioly/docguard/blob/main/extensions/spec-kit-docguard/README.md",
|
|
||||||
"changelog": "https://github.com/raccioly/docguard/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0",
|
|
||||||
"tools": [
|
|
||||||
{
|
|
||||||
"name": "node",
|
|
||||||
"version": ">=18.0.0",
|
|
||||||
"required": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 6,
|
|
||||||
"hooks": 3
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"documentation",
|
|
||||||
"validation",
|
|
||||||
"quality",
|
|
||||||
"cdd",
|
|
||||||
"traceability",
|
|
||||||
"ai-agents",
|
|
||||||
"enforcement",
|
|
||||||
"spec-kit"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-13T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-18T18:53:31Z"
|
|
||||||
},
|
|
||||||
"doctor": {
|
|
||||||
"name": "Project Health Check",
|
|
||||||
"id": "doctor",
|
|
||||||
"description": "Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git.",
|
|
||||||
"author": "KhawarHabibKhan",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-doctor/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-doctor",
|
|
||||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-doctor",
|
|
||||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-doctor/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-doctor/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 1,
|
|
||||||
"hooks": 0
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"diagnostics",
|
|
||||||
"health-check",
|
|
||||||
"validation",
|
|
||||||
"project-structure"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-13T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-13T00:00:00Z"
|
|
||||||
},
|
|
||||||
"fleet": {
|
"fleet": {
|
||||||
"name": "Fleet Orchestrator",
|
"name": "Fleet Orchestrator",
|
||||||
"id": "fleet",
|
"id": "fleet",
|
||||||
@@ -271,48 +93,13 @@
|
|||||||
"commands": 2,
|
"commands": 2,
|
||||||
"hooks": 1
|
"hooks": 1
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": ["orchestration", "workflow", "human-in-the-loop", "parallel"],
|
||||||
"orchestration",
|
|
||||||
"workflow",
|
|
||||||
"human-in-the-loop",
|
|
||||||
"parallel"
|
|
||||||
],
|
|
||||||
"verified": false,
|
"verified": false,
|
||||||
"downloads": 0,
|
"downloads": 0,
|
||||||
"stars": 0,
|
"stars": 0,
|
||||||
"created_at": "2026-03-06T00:00:00Z",
|
"created_at": "2026-03-06T00:00:00Z",
|
||||||
"updated_at": "2026-03-06T00:00:00Z"
|
"updated_at": "2026-03-06T00:00:00Z"
|
||||||
},
|
},
|
||||||
"iterate": {
|
|
||||||
"name": "Iterate",
|
|
||||||
"id": "iterate",
|
|
||||||
"description": "Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building",
|
|
||||||
"author": "Vianca Martinez",
|
|
||||||
"version": "2.0.0",
|
|
||||||
"download_url": "https://github.com/imviancagrace/spec-kit-iterate/archive/refs/tags/v2.0.0.zip",
|
|
||||||
"repository": "https://github.com/imviancagrace/spec-kit-iterate",
|
|
||||||
"homepage": "https://github.com/imviancagrace/spec-kit-iterate",
|
|
||||||
"documentation": "https://github.com/imviancagrace/spec-kit-iterate/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/imviancagrace/spec-kit-iterate/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 2,
|
|
||||||
"hooks": 0
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"iteration",
|
|
||||||
"change-management",
|
|
||||||
"spec-maintenance"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-17T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-17T00:00:00Z"
|
|
||||||
},
|
|
||||||
"jira": {
|
"jira": {
|
||||||
"name": "Jira Integration",
|
"name": "Jira Integration",
|
||||||
"id": "jira",
|
"id": "jira",
|
||||||
@@ -373,49 +160,13 @@
|
|||||||
"commands": 2,
|
"commands": 2,
|
||||||
"hooks": 1
|
"hooks": 1
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": ["implementation", "automation", "loop", "copilot"],
|
||||||
"implementation",
|
|
||||||
"automation",
|
|
||||||
"loop",
|
|
||||||
"copilot"
|
|
||||||
],
|
|
||||||
"verified": false,
|
"verified": false,
|
||||||
"downloads": 0,
|
"downloads": 0,
|
||||||
"stars": 0,
|
"stars": 0,
|
||||||
"created_at": "2026-03-09T00:00:00Z",
|
"created_at": "2026-03-09T00:00:00Z",
|
||||||
"updated_at": "2026-03-09T00:00:00Z"
|
"updated_at": "2026-03-09T00:00:00Z"
|
||||||
},
|
},
|
||||||
"reconcile": {
|
|
||||||
"name": "Reconcile Extension",
|
|
||||||
"id": "reconcile",
|
|
||||||
"description": "Reconcile implementation drift by surgically updating the feature's own spec, plan, and tasks.",
|
|
||||||
"author": "Stanislav Deviatov",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/stn1slv/spec-kit-reconcile/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/stn1slv/spec-kit-reconcile",
|
|
||||||
"homepage": "https://github.com/stn1slv/spec-kit-reconcile",
|
|
||||||
"documentation": "https://github.com/stn1slv/spec-kit-reconcile/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/stn1slv/spec-kit-reconcile/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 1,
|
|
||||||
"hooks": 0
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"reconcile",
|
|
||||||
"drift",
|
|
||||||
"tasks",
|
|
||||||
"remediation"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-14T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-14T00:00:00Z"
|
|
||||||
},
|
|
||||||
"retrospective": {
|
"retrospective": {
|
||||||
"name": "Retrospective Extension",
|
"name": "Retrospective Extension",
|
||||||
"id": "retrospective",
|
"id": "retrospective",
|
||||||
@@ -467,53 +218,13 @@
|
|||||||
"commands": 7,
|
"commands": 7,
|
||||||
"hooks": 1
|
"hooks": 1
|
||||||
},
|
},
|
||||||
"tags": [
|
"tags": ["code-review", "quality", "review", "testing", "error-handling", "type-design", "simplification"],
|
||||||
"code-review",
|
|
||||||
"quality",
|
|
||||||
"review",
|
|
||||||
"testing",
|
|
||||||
"error-handling",
|
|
||||||
"type-design",
|
|
||||||
"simplification"
|
|
||||||
],
|
|
||||||
"verified": false,
|
"verified": false,
|
||||||
"downloads": 0,
|
"downloads": 0,
|
||||||
"stars": 0,
|
"stars": 0,
|
||||||
"created_at": "2026-03-06T00:00:00Z",
|
"created_at": "2026-03-06T00:00:00Z",
|
||||||
"updated_at": "2026-03-06T00:00:00Z"
|
"updated_at": "2026-03-06T00:00:00Z"
|
||||||
},
|
},
|
||||||
"speckit-utils": {
|
|
||||||
"name": "SDD Utilities",
|
|
||||||
"id": "speckit-utils",
|
|
||||||
"description": "Resume interrupted workflows, validate project health, and verify spec-to-task traceability.",
|
|
||||||
"author": "mvanhorn",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/mvanhorn/speckit-utils/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/mvanhorn/speckit-utils",
|
|
||||||
"homepage": "https://github.com/mvanhorn/speckit-utils",
|
|
||||||
"documentation": "https://github.com/mvanhorn/speckit-utils/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/mvanhorn/speckit-utils/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 3,
|
|
||||||
"hooks": 2
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"resume",
|
|
||||||
"doctor",
|
|
||||||
"validate",
|
|
||||||
"workflow",
|
|
||||||
"health-check"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-18T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-18T00:00:00Z"
|
|
||||||
},
|
|
||||||
"sync": {
|
"sync": {
|
||||||
"name": "Spec Sync",
|
"name": "Spec Sync",
|
||||||
"id": "sync",
|
"id": "sync",
|
||||||
@@ -549,7 +260,7 @@
|
|||||||
"understanding": {
|
"understanding": {
|
||||||
"name": "Understanding",
|
"name": "Understanding",
|
||||||
"id": "understanding",
|
"id": "understanding",
|
||||||
"description": "Automated requirements quality analysis \u2014 validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||||
"author": "Ladislav Bihari",
|
"author": "Ladislav Bihari",
|
||||||
"version": "3.4.0",
|
"version": "3.4.0",
|
||||||
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
||||||
@@ -587,38 +298,6 @@
|
|||||||
"created_at": "2026-03-07T00:00:00Z",
|
"created_at": "2026-03-07T00:00:00Z",
|
||||||
"updated_at": "2026-03-07T00:00:00Z"
|
"updated_at": "2026-03-07T00:00:00Z"
|
||||||
},
|
},
|
||||||
"status": {
|
|
||||||
"name": "Project Status",
|
|
||||||
"id": "status",
|
|
||||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
|
||||||
"author": "KhawarHabibKhan",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
|
||||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
|
||||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 1,
|
|
||||||
"hooks": 0
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"status",
|
|
||||||
"workflow",
|
|
||||||
"progress",
|
|
||||||
"feature-tracking",
|
|
||||||
"task-progress"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-16T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-16T00:00:00Z"
|
|
||||||
},
|
|
||||||
"v-model": {
|
"v-model": {
|
||||||
"name": "V-Model Extension Pack",
|
"name": "V-Model Extension Pack",
|
||||||
"id": "v-model",
|
"id": "v-model",
|
||||||
@@ -651,37 +330,6 @@
|
|||||||
"created_at": "2026-02-20T00:00:00Z",
|
"created_at": "2026-02-20T00:00:00Z",
|
||||||
"updated_at": "2026-02-22T00:00:00Z"
|
"updated_at": "2026-02-22T00:00:00Z"
|
||||||
},
|
},
|
||||||
"learn": {
|
|
||||||
"name": "Learning Extension",
|
|
||||||
"id": "learn",
|
|
||||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
|
||||||
"author": "Vianca Martinez",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
|
||||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
|
||||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 2,
|
|
||||||
"hooks": 1
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"learning",
|
|
||||||
"education",
|
|
||||||
"mentoring",
|
|
||||||
"knowledge-transfer"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-17T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-17T00:00:00Z"
|
|
||||||
},
|
|
||||||
"verify": {
|
"verify": {
|
||||||
"name": "Verify Extension",
|
"name": "Verify Extension",
|
||||||
"id": "verify",
|
"id": "verify",
|
||||||
@@ -713,37 +361,6 @@
|
|||||||
"stars": 0,
|
"stars": 0,
|
||||||
"created_at": "2026-03-03T00:00:00Z",
|
"created_at": "2026-03-03T00:00:00Z",
|
||||||
"updated_at": "2026-03-03T00:00:00Z"
|
"updated_at": "2026-03-03T00:00:00Z"
|
||||||
},
|
|
||||||
"verify-tasks": {
|
|
||||||
"name": "Verify Tasks Extension",
|
|
||||||
"id": "verify-tasks",
|
|
||||||
"description": "Detect phantom completions: tasks marked [X] in tasks.md with no real implementation.",
|
|
||||||
"author": "Dave Sharpe",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"download_url": "https://github.com/datastone-inc/spec-kit-verify-tasks/archive/refs/tags/v1.0.0.zip",
|
|
||||||
"repository": "https://github.com/datastone-inc/spec-kit-verify-tasks",
|
|
||||||
"homepage": "https://github.com/datastone-inc/spec-kit-verify-tasks",
|
|
||||||
"documentation": "https://github.com/datastone-inc/spec-kit-verify-tasks/blob/main/README.md",
|
|
||||||
"changelog": "https://github.com/datastone-inc/spec-kit-verify-tasks/blob/main/CHANGELOG.md",
|
|
||||||
"license": "MIT",
|
|
||||||
"requires": {
|
|
||||||
"speckit_version": ">=0.1.0"
|
|
||||||
},
|
|
||||||
"provides": {
|
|
||||||
"commands": 1,
|
|
||||||
"hooks": 1
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"verification",
|
|
||||||
"quality",
|
|
||||||
"phantom-completion",
|
|
||||||
"tasks"
|
|
||||||
],
|
|
||||||
"verified": false,
|
|
||||||
"downloads": 0,
|
|
||||||
"stars": 0,
|
|
||||||
"created_at": "2026-03-16T00:00:00Z",
|
|
||||||
"updated_at": "2026-03-16T00:00:00Z"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,6 @@
|
|||||||
{
|
{
|
||||||
"schema_version": "1.0",
|
"schema_version": "1.0",
|
||||||
"updated_at": "2026-03-10T00:00:00Z",
|
"updated_at": "2026-02-03T00:00:00Z",
|
||||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json",
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json",
|
||||||
"extensions": {
|
"extensions": {}
|
||||||
"selftest": {
|
}
|
||||||
"name": "Spec Kit Self-Test Utility",
|
|
||||||
"id": "selftest",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"description": "Verifies catalog extensions by programmatically walking through the discovery, installation, and registration lifecycle.",
|
|
||||||
"author": "spec-kit-core",
|
|
||||||
"repository": "https://github.com/github/spec-kit",
|
|
||||||
"download_url": "https://github.com/github/spec-kit/releases/download/selftest-v1.0.0/selftest.zip",
|
|
||||||
"tags": [
|
|
||||||
"testing",
|
|
||||||
"core",
|
|
||||||
"utility"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
---
|
|
||||||
description: "Validate the lifecycle of an extension from the catalog."
|
|
||||||
---
|
|
||||||
|
|
||||||
# Extension Self-Test: `$ARGUMENTS`
|
|
||||||
|
|
||||||
This command drives a self-test simulating the developer experience with the `$ARGUMENTS` extension.
|
|
||||||
|
|
||||||
## Goal
|
|
||||||
|
|
||||||
Validate the end-to-end lifecycle (discovery, installation, registration) for the extension: `$ARGUMENTS`.
|
|
||||||
If `$ARGUMENTS` is empty, you must tell the user to provide an extension name, for example: `/speckit.selftest.extension linear`.
|
|
||||||
|
|
||||||
## Steps
|
|
||||||
|
|
||||||
### Step 1: Catalog Discovery Validation
|
|
||||||
|
|
||||||
Check if the extension exists in the Spec Kit catalog.
|
|
||||||
Execute this command and verify that it completes successfully and that the returned extension ID exactly matches `$ARGUMENTS`. If the command fails or the ID does not match `$ARGUMENTS`, fail the test.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
specify extension info "$ARGUMENTS"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Simulate Installation
|
|
||||||
|
|
||||||
First, try to add the extension to the current workspace configuration directly. If the catalog provides the extension as `install_allowed: false` (discovery-only), this step is *expected* to fail.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
specify extension add "$ARGUMENTS"
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, simulate adding the extension by installing it from its catalog download URL, which should bypass the restriction.
|
|
||||||
Obtain the extension's `download_url` from the catalog metadata (for example, via a catalog info command or UI), then run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
specify extension add "$ARGUMENTS" --from "<download_url>"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Registration Verification
|
|
||||||
|
|
||||||
Once the `add` command completes, verify the installation by checking the project configuration.
|
|
||||||
Use terminal tools (like `cat`) to verify that the following file contains a record for `$ARGUMENTS`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cat .specify/extensions/.registry/$ARGUMENTS.json
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Verification Report
|
|
||||||
|
|
||||||
Analyze the standard output of the three steps.
|
|
||||||
Generate a terminal-style test output format detailing the results of discovery, installation, and registration. Return this directly to the user.
|
|
||||||
|
|
||||||
Example output format:
|
|
||||||
```text
|
|
||||||
============================= test session starts ==============================
|
|
||||||
collected 3 items
|
|
||||||
|
|
||||||
test_selftest_discovery.py::test_catalog_search [PASS/FAIL]
|
|
||||||
Details: [Provide execution result of specify extension search]
|
|
||||||
|
|
||||||
test_selftest_installation.py::test_extension_add [PASS/FAIL]
|
|
||||||
Details: [Provide execution result of specify extension add]
|
|
||||||
|
|
||||||
test_selftest_registration.py::test_config_verification [PASS/FAIL]
|
|
||||||
Details: [Provide execution result of registry record verification]
|
|
||||||
|
|
||||||
============================== [X] passed in ... ==============================
|
|
||||||
```
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
schema_version: "1.0"
|
|
||||||
extension:
|
|
||||||
id: selftest
|
|
||||||
name: Spec Kit Self-Test Utility
|
|
||||||
version: 1.0.0
|
|
||||||
description: Verifies catalog extensions by programmatically walking through the discovery, installation, and registration lifecycle.
|
|
||||||
author: spec-kit-core
|
|
||||||
repository: https://github.com/github/spec-kit
|
|
||||||
license: MIT
|
|
||||||
requires:
|
|
||||||
speckit_version: ">=0.2.0"
|
|
||||||
provides:
|
|
||||||
commands:
|
|
||||||
- name: speckit.selftest.extension
|
|
||||||
file: commands/selftest.md
|
|
||||||
description: Validate the lifecycle of an extension from the catalog.
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
# Spec Kit - February 2026 Newsletter
|
|
||||||
|
|
||||||
This edition covers Spec Kit activity in February 2026. Versions v0.1.7 through v0.1.13 shipped during the month, addressing bugs and adding features including a dual-catalog extension system and additional agent integrations. Community activity included blog posts, tutorials, and meetup sessions. A category summary is in the table below, followed by details.
|
|
||||||
|
|
||||||
| **Spec Kit Core (Feb 2026)** | **Community & Content** | **Roadmap & Next** |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| Versions **v0.1.7** through **v0.1.13** shipped with bug fixes and features, including a **dual-catalog extension system** and new agent integrations. Over 300 issues were closed (of ~800 filed). The repo reached 71k stars and 6.4k forks. [\[github.com\]](https://github.com/github/spec-kit/releases) [\[github.com\]](https://github.com/github/spec-kit/issues) [\[rywalker.com\]](https://rywalker.com/research/github-spec-kit) | Eduardo Luz published a LinkedIn article on SDD and Spec Kit [\[linkedin.com\]](https://www.linkedin.com/pulse/specification-driven-development-sdd-github-spec-kit-elevating-luz-tojmc?tl=en). Erick Matsen blogged a walkthrough of building a bioinformatics pipeline with Spec Kit [\[matsen.fredhutch.org\]](https://matsen.fredhutch.org/general/2026/02/10/spec-kit-walkthrough.html). Microsoft MVP [Eric Boyd](https://ericboyd.com/) (not the Microsoft AI Platform VP of the same name) presented at the Cleveland .NET User Group [\[ericboyd.com\]](https://ericboyd.com/events/cleveland-csharp-user-group-february-25-2026-spec-driven-development-sdd-github-spec-kit). | **v0.2.0** was released in early March, consolidating February's work. It added extensions for Jira and Azure DevOps, community plugin support, and agents for Tabnine CLI and Kiro CLI [\[github.com\]](https://github.com/github/spec-kit/releases). Future work includes spec lifecycle management and progress toward a stable 1.0 release [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html). |
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
## Spec Kit Project Updates
|
|
||||||
|
|
||||||
Spec Kit released versions **v0.1.7** through **v0.1.13** during February. Version 0.1.7 (early February) updated documentation for the newly introduced **dual-catalog extension system**, which allows both core and community extension catalogs to coexist. Subsequent patches (0.1.8, 0.1.9, etc.) bumped dependencies such as GitHub Actions versions and resolved minor issues. **v0.1.10** fixed YAML front-matter handling in generated files. By late February, **v0.1.12** and **v0.1.13** shipped with additional fixes in preparation for the next version bump. [\[github.com\]](https://github.com/github/spec-kit/releases)
|
|
||||||
|
|
||||||
The main architectural addition was the **modular extension system** with separate "core" and "community" extension catalogs for third-party add-ons. Multiple community-contributed extensions were merged during the month, including a **Jira extension** for issue tracker integration, an **Azure DevOps extension**, and utility extensions for code review, retrospective documentation, and CI/CD sync. The pending 0.2.0 release changelog lists over a dozen changes from February, including the extension additions and support for **multiple agent catalogs concurrently**. [\[github.com\]](https://github.com/github/spec-kit/releases)
|
|
||||||
|
|
||||||
By end of February, **over 330 issues/feature requests had been closed on GitHub** (out of ~870 filed to date). External contributors submitted pull requests including the **Tabnine CLI support**, which was merged in late February. The repository reached ~71k stars and crossed 6,000 forks. [\[github.com\]](https://github.com/github/spec-kit/issues) [\[github.com\]](https://github.com/github/spec-kit/releases) [\[rywalker.com\]](https://rywalker.com/research/github-spec-kit)
|
|
||||||
|
|
||||||
On the stability side, February's work focused on tightening core workflows and fixing edge-case bugs in the specification, planning, and task-generation commands. The team addressed file-handling issues (e.g., clarifying how output files are created/appended) and improved the reliability of the automated release pipeline. The project also added **Kiro CLI** to the supported agent list and updated integration scripts for Cursor and Code Interpreter, bringing the total number of supported AI coding assistants to over 20. [\[github.com\]](https://github.com/github/spec-kit/releases) [\[github.com\]](https://github.com/github/spec-kit)
|
|
||||||
|
|
||||||
## Community & Content
|
|
||||||
|
|
||||||
**Eduardo Luz** published a LinkedIn article on Feb 15 titled *"Specification Driven Development (SDD) and the GitHub Spec Kit: Elevating Software Engineering."* The article draws on his experience as a senior engineer to describe common causes of technical debt and inconsistent designs, and how SDD addresses them. It walks through Spec Kit's **four-layer approach** (Constitution, Design, Tasks, Implementation) and discusses treating specifications as a source of truth. The post generated discussion among software architects on LinkedIn about reducing misunderstandings and rework through spec-driven workflows. [\[linkedin.com\]](https://www.linkedin.com/pulse/specification-driven-development-sdd-github-spec-kit-elevating-luz-tojmc?tl=en)
|
|
||||||
|
|
||||||
**Erick Matsen** (Fred Hutchinson Cancer Center) posted a detailed walkthrough on Feb 10 titled *"Spec-Driven Development with spec-kit."* He describes building a **bioinformatics pipeline** in a single day using Spec Kit's workflow (from `speckit.constitution` to `speckit.implement`). The post includes command outputs and notes on decisions made along the way, such as refining the spec to add domain-specific requirements. He writes: "I really recommend this approach. This feels like the way software development should be." [\[matsen.fredhutch.org\]](https://matsen.fredhutch.org/general/2026/02/10/spec-kit-walkthrough.html) [\[github.com\]](https://github.com/mnriem/spec-kit-dotnet-cli-demo)
|
|
||||||
|
|
||||||
Several other tutorials and guides appeared during the month. An article on *IntuitionLabs* (updated Feb 21) provided a guide to Spec Kit covering the philosophy behind SDD and a walkthrough of the four-phase workflow with examples. A piece by Ry Walker (Feb 22) summarized key aspects of Spec Kit, noting its agent-agnostic design and 71k-star count. Microsoft's Developer Blog post from late 2025 (*"Diving Into Spec-Driven Development with GitHub Spec Kit"* by Den Delimarsky) continued to circulate among new users. [\[intuitionlabs.ai\]](https://intuitionlabs.ai/articles/spec-driven-development-spec-kit) [\[rywalker.com\]](https://rywalker.com/research/github-spec-kit)
|
|
||||||
|
|
||||||
On **Feb 25**, the Cleveland C# .NET User Group hosted a session titled *"Spec Driven Development with GitHub Spec Kit."* The talk was delivered by Microsoft MVP **[Eric Boyd](https://ericboyd.com/)** (Cleveland-based .NET developer; not to be confused with the Microsoft AI Platform VP of the same name). Boyd covered how specs change an AI coding assistant's output, patterns for iterating and refining specs over multiple cycles, and moving from ad-hoc prompting to a repeatable spec-driven workflow. Other groups, including GDG Madison, also listed sessions on spec-driven development in late February and early March. [\[ericboyd.com\]](https://ericboyd.com/events/cleveland-csharp-user-group-february-25-2026-spec-driven-development-sdd-github-spec-kit)
|
|
||||||
|
|
||||||
On GitHub, the **Spec Kit Discussions forum** saw activity around installation troubleshooting, handling multi-feature projects with Spec Kit's branching model, and feature suggestions. One thread discussed how Spec Kit treats each spec as a short-lived artifact tied to a feature branch, which led to discussion about future support for long-running "spec of record" use cases. [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
|
|
||||||
|
|
||||||
## SDD Ecosystem
|
|
||||||
|
|
||||||
Other spec-driven development tools also saw activity in February.
|
|
||||||
|
|
||||||
AWS **Kiro** released version 0.10 on Feb 18 with two new spec workflows: a **Design-First** mode (starting from architecture/pseudocode to derive requirements) and a **Bugfix** mode (structured root-cause analysis producing a `bugfix.md` spec file). Kiro also added hunk-level code review for AI-generated changes and pre/post task hooks for custom automation. AWS expanded Kiro to GovCloud regions on Feb 17 for government compliance use cases. [\[kiro.dev\]](https://kiro.dev/changelog/)
|
|
||||||
|
|
||||||
**OpenSpec** (by Fission AI), a lightweight SDD framework, reached ~29.3k stars and nearly 2k forks. Its community published guides and comparisons during the month, including *"Spec-Driven Development Made Easy: A Practical Guide with OpenSpec."* OpenSpec emphasizes simplicity and flexibility, integrating with multiple AI coding assistants via YAML configs.
|
|
||||||
|
|
||||||
**Tessl** remained in private beta. As described by Thoughtworks writer Birgitta Boeckeler, Tessl pursues a **spec-as-source** model where specifications are maintained long-term and directly generate code files one-to-one, with generated code labeled as "do not edit." This contrasts with Spec Kit's current approach of creating specs per feature/branch. [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
|
|
||||||
|
|
||||||
An **arXiv preprint** (January 2026) categorized SDD implementations into three levels: *spec-first*, *spec-anchored*, and *spec-as-source*. Spec Kit was identified as primarily spec-first with elements of spec-anchored. Tech media published reviews including a *Vibe Coding* "GitHub Spec Kit Review (2026)" and a blog post titled *"Putting Spec Kit Through Its Paces: Radical Idea or Reinvented Waterfall?"* which concluded that SDD with AI assistance is more iterative than traditional Waterfall. [\[intuitionlabs.ai\]](https://intuitionlabs.ai/articles/spec-driven-development-spec-kit) [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
|
|
||||||
|
|
||||||
## Roadmap
|
|
||||||
|
|
||||||
**v0.2.0** was released on March 10, 2026, consolidating the month's work. It includes new extensions (Jira, Azure DevOps, review, sync), support for multiple extension catalogs and community plugins, and additional agent integrations (Tabnine CLI, Kiro CLI). [\[github.com\]](https://github.com/github/spec-kit/releases)
|
|
||||||
|
|
||||||
Areas under discussion or in progress for future development:
|
|
||||||
|
|
||||||
- **Spec lifecycle management** -- supporting longer-lived specifications that can evolve across multiple iterations, rather than being tied to a single feature branch. Users have raised this in GitHub Discussions, and the concept of "spec-anchored" development is under consideration. [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
|
|
||||||
- **CI/CD integration** -- incorporating Spec Kit verification (e.g., `speckit.checklist` or `speckit.verify`) into pull request workflows and project management tools. February's Jira and Azure DevOps extensions are a step in this direction. [\[github.com\]](https://github.com/github/spec-kit/releases)
|
|
||||||
- **Continued agent support** -- adding integrations as new AI coding assistants emerge. The project currently supports over 20 agents and has been adding new ones (Kiro CLI, Tabnine CLI) as they become available. [\[github.com\]](https://github.com/github/spec-kit)
|
|
||||||
- **Community ecosystem** -- the open extension model allows external contributors to add functionality directly. February's Jira and Azure DevOps plugins were community-contributed. The Spec Kit README now links to community walkthrough demos for .NET, Spring Boot, and other stacks. [\[github.com\]](https://github.com/github/spec-kit)
|
|
||||||
@@ -13,15 +13,13 @@ When Spec Kit needs a template (e.g. `spec-template`), it walks a resolution sta
|
|||||||
|
|
||||||
If no preset is installed, core templates are used — exactly the same behavior as before presets existed.
|
If no preset is installed, core templates are used — exactly the same behavior as before presets existed.
|
||||||
|
|
||||||
Template resolution happens **at runtime** — although preset files are copied into `.specify/presets/<id>/` during installation, Spec Kit walks the resolution stack on every template lookup rather than merging templates into a single location.
|
|
||||||
|
|
||||||
For detailed resolution and command registration flows, see [ARCHITECTURE.md](ARCHITECTURE.md).
|
For detailed resolution and command registration flows, see [ARCHITECTURE.md](ARCHITECTURE.md).
|
||||||
|
|
||||||
## Command Overrides
|
## Command Overrides
|
||||||
|
|
||||||
Presets can also override the commands that guide the SDD workflow. Templates define *what* gets produced (specs, plans, constitutions); commands define *how* the LLM produces them (the step-by-step instructions).
|
Presets can also override the commands that guide the SDD workflow. Templates define *what* gets produced (specs, plans, constitutions); commands define *how* the LLM produces them (the step-by-step instructions).
|
||||||
|
|
||||||
Unlike templates, command overrides are applied **at install time**. When a preset includes `type: "command"` entries, the commands are registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
|
When a preset includes `type: "command"` entries, the commands are automatically registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "specify-cli"
|
name = "specify-cli"
|
||||||
version = "0.3.2"
|
version = "0.2.1"
|
||||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||||
requires-python = ">=3.11"
|
requires-python = ">=3.11"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
@@ -14,7 +14,6 @@ dependencies = [
|
|||||||
"pyyaml>=6.0",
|
"pyyaml>=6.0",
|
||||||
"packaging>=23.0",
|
"packaging>=23.0",
|
||||||
"pathspec>=0.12.0",
|
"pathspec>=0.12.0",
|
||||||
"json5>=0.13.0",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
@@ -27,23 +26,6 @@ build-backend = "hatchling.build"
|
|||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
packages = ["src/specify_cli"]
|
packages = ["src/specify_cli"]
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel.force-include]
|
|
||||||
# Bundle core assets so `specify init` works without network access (air-gapped / enterprise)
|
|
||||||
# Page templates (exclude commands/ — bundled separately below to avoid duplication)
|
|
||||||
"templates/agent-file-template.md" = "specify_cli/core_pack/templates/agent-file-template.md"
|
|
||||||
"templates/checklist-template.md" = "specify_cli/core_pack/templates/checklist-template.md"
|
|
||||||
"templates/constitution-template.md" = "specify_cli/core_pack/templates/constitution-template.md"
|
|
||||||
"templates/plan-template.md" = "specify_cli/core_pack/templates/plan-template.md"
|
|
||||||
"templates/spec-template.md" = "specify_cli/core_pack/templates/spec-template.md"
|
|
||||||
"templates/tasks-template.md" = "specify_cli/core_pack/templates/tasks-template.md"
|
|
||||||
"templates/vscode-settings.json" = "specify_cli/core_pack/templates/vscode-settings.json"
|
|
||||||
# Command templates
|
|
||||||
"templates/commands" = "specify_cli/core_pack/commands"
|
|
||||||
"scripts/bash" = "specify_cli/core_pack/scripts/bash"
|
|
||||||
"scripts/powershell" = "specify_cli/core_pack/scripts/powershell"
|
|
||||||
".github/workflows/scripts/create-release-packages.sh" = "specify_cli/core_pack/release_scripts/create-release-packages.sh"
|
|
||||||
".github/workflows/scripts/create-release-packages.ps1" = "specify_cli/core_pack/release_scripts/create-release-packages.ps1"
|
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
test = [
|
test = [
|
||||||
"pytest>=7.0",
|
"pytest>=7.0",
|
||||||
|
|||||||
@@ -79,28 +79,15 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get feature paths and validate branch
|
# Get feature paths and validate branch
|
||||||
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
eval $(get_feature_paths)
|
||||||
eval "$_paths_output"
|
|
||||||
unset _paths_output
|
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||||
|
|
||||||
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
||||||
if $PATHS_ONLY; then
|
if $PATHS_ONLY; then
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
# Minimal JSON paths payload (no validation performed)
|
# Minimal JSON paths payload (no validation performed)
|
||||||
if has_jq; then
|
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
||||||
jq -cn \
|
"$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS"
|
||||||
--arg repo_root "$REPO_ROOT" \
|
|
||||||
--arg branch "$CURRENT_BRANCH" \
|
|
||||||
--arg feature_dir "$FEATURE_DIR" \
|
|
||||||
--arg feature_spec "$FEATURE_SPEC" \
|
|
||||||
--arg impl_plan "$IMPL_PLAN" \
|
|
||||||
--arg tasks "$TASKS" \
|
|
||||||
'{REPO_ROOT:$repo_root,BRANCH:$branch,FEATURE_DIR:$feature_dir,FEATURE_SPEC:$feature_spec,IMPL_PLAN:$impl_plan,TASKS:$tasks}'
|
|
||||||
else
|
|
||||||
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
|
||||||
"$(json_escape "$REPO_ROOT")" "$(json_escape "$CURRENT_BRANCH")" "$(json_escape "$FEATURE_DIR")" "$(json_escape "$FEATURE_SPEC")" "$(json_escape "$IMPL_PLAN")" "$(json_escape "$TASKS")"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "REPO_ROOT: $REPO_ROOT"
|
echo "REPO_ROOT: $REPO_ROOT"
|
||||||
echo "BRANCH: $CURRENT_BRANCH"
|
echo "BRANCH: $CURRENT_BRANCH"
|
||||||
@@ -154,25 +141,14 @@ fi
|
|||||||
# Output results
|
# Output results
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
# Build JSON array of documents
|
# Build JSON array of documents
|
||||||
if has_jq; then
|
if [[ ${#docs[@]} -eq 0 ]]; then
|
||||||
if [[ ${#docs[@]} -eq 0 ]]; then
|
json_docs="[]"
|
||||||
json_docs="[]"
|
|
||||||
else
|
|
||||||
json_docs=$(printf '%s\n' "${docs[@]}" | jq -R . | jq -s .)
|
|
||||||
fi
|
|
||||||
jq -cn \
|
|
||||||
--arg feature_dir "$FEATURE_DIR" \
|
|
||||||
--argjson docs "$json_docs" \
|
|
||||||
'{FEATURE_DIR:$feature_dir,AVAILABLE_DOCS:$docs}'
|
|
||||||
else
|
else
|
||||||
if [[ ${#docs[@]} -eq 0 ]]; then
|
json_docs=$(printf '"%s",' "${docs[@]}")
|
||||||
json_docs="[]"
|
json_docs="[${json_docs%,}]"
|
||||||
else
|
|
||||||
json_docs=$(for d in "${docs[@]}"; do printf '"%s",' "$(json_escape "$d")"; done)
|
|
||||||
json_docs="[${json_docs%,}]"
|
|
||||||
fi
|
|
||||||
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$(json_escape "$FEATURE_DIR")" "$json_docs"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
||||||
else
|
else
|
||||||
# Text output
|
# Text output
|
||||||
echo "FEATURE_DIR:$FEATURE_DIR"
|
echo "FEATURE_DIR:$FEATURE_DIR"
|
||||||
|
|||||||
@@ -33,27 +33,16 @@ get_current_branch() {
|
|||||||
if [[ -d "$specs_dir" ]]; then
|
if [[ -d "$specs_dir" ]]; then
|
||||||
local latest_feature=""
|
local latest_feature=""
|
||||||
local highest=0
|
local highest=0
|
||||||
local latest_timestamp=""
|
|
||||||
|
|
||||||
for dir in "$specs_dir"/*; do
|
for dir in "$specs_dir"/*; do
|
||||||
if [[ -d "$dir" ]]; then
|
if [[ -d "$dir" ]]; then
|
||||||
local dirname=$(basename "$dir")
|
local dirname=$(basename "$dir")
|
||||||
if [[ "$dirname" =~ ^([0-9]{8}-[0-9]{6})- ]]; then
|
if [[ "$dirname" =~ ^([0-9]{3})- ]]; then
|
||||||
# Timestamp-based branch: compare lexicographically
|
|
||||||
local ts="${BASH_REMATCH[1]}"
|
|
||||||
if [[ "$ts" > "$latest_timestamp" ]]; then
|
|
||||||
latest_timestamp="$ts"
|
|
||||||
latest_feature=$dirname
|
|
||||||
fi
|
|
||||||
elif [[ "$dirname" =~ ^([0-9]{3})- ]]; then
|
|
||||||
local number=${BASH_REMATCH[1]}
|
local number=${BASH_REMATCH[1]}
|
||||||
number=$((10#$number))
|
number=$((10#$number))
|
||||||
if [[ "$number" -gt "$highest" ]]; then
|
if [[ "$number" -gt "$highest" ]]; then
|
||||||
highest=$number
|
highest=$number
|
||||||
# Only update if no timestamp branch found yet
|
latest_feature=$dirname
|
||||||
if [[ -z "$latest_timestamp" ]]; then
|
|
||||||
latest_feature=$dirname
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -83,9 +72,9 @@ check_feature_branch() {
|
|||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]] && [[ ! "$branch" =~ ^[0-9]{8}-[0-9]{6}- ]]; then
|
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
||||||
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
||||||
echo "Feature branches should be named like: 001-feature-name or 20260319-143022-feature-name" >&2
|
echo "Feature branches should be named like: 001-feature-name" >&2
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -101,18 +90,15 @@ find_feature_dir_by_prefix() {
|
|||||||
local branch_name="$2"
|
local branch_name="$2"
|
||||||
local specs_dir="$repo_root/specs"
|
local specs_dir="$repo_root/specs"
|
||||||
|
|
||||||
# Extract prefix from branch (e.g., "004" from "004-whatever" or "20260319-143022" from timestamp branches)
|
# Extract numeric prefix from branch (e.g., "004" from "004-whatever")
|
||||||
local prefix=""
|
if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then
|
||||||
if [[ "$branch_name" =~ ^([0-9]{8}-[0-9]{6})- ]]; then
|
# If branch doesn't have numeric prefix, fall back to exact match
|
||||||
prefix="${BASH_REMATCH[1]}"
|
|
||||||
elif [[ "$branch_name" =~ ^([0-9]{3})- ]]; then
|
|
||||||
prefix="${BASH_REMATCH[1]}"
|
|
||||||
else
|
|
||||||
# If branch doesn't have a recognized prefix, fall back to exact match
|
|
||||||
echo "$specs_dir/$branch_name"
|
echo "$specs_dir/$branch_name"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
local prefix="${BASH_REMATCH[1]}"
|
||||||
|
|
||||||
# Search for directories in specs/ that start with this prefix
|
# Search for directories in specs/ that start with this prefix
|
||||||
local matches=()
|
local matches=()
|
||||||
if [[ -d "$specs_dir" ]]; then
|
if [[ -d "$specs_dir" ]]; then
|
||||||
@@ -133,8 +119,8 @@ find_feature_dir_by_prefix() {
|
|||||||
else
|
else
|
||||||
# Multiple matches - this shouldn't happen with proper naming convention
|
# Multiple matches - this shouldn't happen with proper naming convention
|
||||||
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
||||||
echo "Please ensure only one spec directory exists per prefix." >&2
|
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
||||||
return 1
|
echo "$specs_dir/$branch_name" # Return something to avoid breaking the script
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,58 +134,21 @@ get_feature_paths() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Use prefix-based lookup to support multiple branches per spec
|
# Use prefix-based lookup to support multiple branches per spec
|
||||||
local feature_dir
|
local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch")
|
||||||
if ! feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch"); then
|
|
||||||
echo "ERROR: Failed to resolve feature directory" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Use printf '%q' to safely quote values, preventing shell injection
|
cat <<EOF
|
||||||
# via crafted branch names or paths containing special characters
|
REPO_ROOT='$repo_root'
|
||||||
printf 'REPO_ROOT=%q\n' "$repo_root"
|
CURRENT_BRANCH='$current_branch'
|
||||||
printf 'CURRENT_BRANCH=%q\n' "$current_branch"
|
HAS_GIT='$has_git_repo'
|
||||||
printf 'HAS_GIT=%q\n' "$has_git_repo"
|
FEATURE_DIR='$feature_dir'
|
||||||
printf 'FEATURE_DIR=%q\n' "$feature_dir"
|
FEATURE_SPEC='$feature_dir/spec.md'
|
||||||
printf 'FEATURE_SPEC=%q\n' "$feature_dir/spec.md"
|
IMPL_PLAN='$feature_dir/plan.md'
|
||||||
printf 'IMPL_PLAN=%q\n' "$feature_dir/plan.md"
|
TASKS='$feature_dir/tasks.md'
|
||||||
printf 'TASKS=%q\n' "$feature_dir/tasks.md"
|
RESEARCH='$feature_dir/research.md'
|
||||||
printf 'RESEARCH=%q\n' "$feature_dir/research.md"
|
DATA_MODEL='$feature_dir/data-model.md'
|
||||||
printf 'DATA_MODEL=%q\n' "$feature_dir/data-model.md"
|
QUICKSTART='$feature_dir/quickstart.md'
|
||||||
printf 'QUICKSTART=%q\n' "$feature_dir/quickstart.md"
|
CONTRACTS_DIR='$feature_dir/contracts'
|
||||||
printf 'CONTRACTS_DIR=%q\n' "$feature_dir/contracts"
|
EOF
|
||||||
}
|
|
||||||
|
|
||||||
# Check if jq is available for safe JSON construction
|
|
||||||
has_jq() {
|
|
||||||
command -v jq >/dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
|
||||||
# Handles backslash, double-quote, and JSON-required control character escapes (RFC 8259).
|
|
||||||
json_escape() {
|
|
||||||
local s="$1"
|
|
||||||
s="${s//\\/\\\\}"
|
|
||||||
s="${s//\"/\\\"}"
|
|
||||||
s="${s//$'\n'/\\n}"
|
|
||||||
s="${s//$'\t'/\\t}"
|
|
||||||
s="${s//$'\r'/\\r}"
|
|
||||||
s="${s//$'\b'/\\b}"
|
|
||||||
s="${s//$'\f'/\\f}"
|
|
||||||
# Escape any remaining U+0001-U+001F control characters as \uXXXX.
|
|
||||||
# (U+0000/NUL cannot appear in bash strings and is excluded.)
|
|
||||||
# LC_ALL=C ensures ${#s} counts bytes and ${s:$i:1} yields single bytes,
|
|
||||||
# so multi-byte UTF-8 sequences (first byte >= 0xC0) pass through intact.
|
|
||||||
local LC_ALL=C
|
|
||||||
local i char code
|
|
||||||
for (( i=0; i<${#s}; i++ )); do
|
|
||||||
char="${s:$i:1}"
|
|
||||||
printf -v code '%d' "'$char" 2>/dev/null || code=256
|
|
||||||
if (( code >= 1 && code <= 31 )); then
|
|
||||||
printf '\\u%04x' "$code"
|
|
||||||
else
|
|
||||||
printf '%s' "$char"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||||
@@ -224,11 +173,9 @@ resolve_template() {
|
|||||||
if [ -d "$presets_dir" ]; then
|
if [ -d "$presets_dir" ]; then
|
||||||
local registry_file="$presets_dir/.registry"
|
local registry_file="$presets_dir/.registry"
|
||||||
if [ -f "$registry_file" ] && command -v python3 >/dev/null 2>&1; then
|
if [ -f "$registry_file" ] && command -v python3 >/dev/null 2>&1; then
|
||||||
# Read preset IDs sorted by priority (lower number = higher precedence).
|
# Read preset IDs sorted by priority (lower number = higher precedence)
|
||||||
# The python3 call is wrapped in an if-condition so that set -e does not
|
local sorted_presets
|
||||||
# abort the function when python3 exits non-zero (e.g. invalid JSON).
|
sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
|
||||||
local sorted_presets=""
|
|
||||||
if sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
|
|
||||||
import json, sys, os
|
import json, sys, os
|
||||||
try:
|
try:
|
||||||
with open(os.environ['SPECKIT_REGISTRY']) as f:
|
with open(os.environ['SPECKIT_REGISTRY']) as f:
|
||||||
@@ -238,17 +185,14 @@ try:
|
|||||||
print(pid)
|
print(pid)
|
||||||
except Exception:
|
except Exception:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
" 2>/dev/null); then
|
" 2>/dev/null)
|
||||||
if [ -n "$sorted_presets" ]; then
|
if [ $? -eq 0 ] && [ -n "$sorted_presets" ]; then
|
||||||
# python3 succeeded and returned preset IDs — search in priority order
|
while IFS= read -r preset_id; do
|
||||||
while IFS= read -r preset_id; do
|
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
|
||||||
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
|
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||||
[ -f "$candidate" ] && echo "$candidate" && return 0
|
done <<< "$sorted_presets"
|
||||||
done <<< "$sorted_presets"
|
|
||||||
fi
|
|
||||||
# python3 succeeded but registry has no presets — nothing to search
|
|
||||||
else
|
else
|
||||||
# python3 failed (missing, or registry parse error) — fall back to unordered directory scan
|
# python3 returned empty list — fall through to directory scan
|
||||||
for preset in "$presets_dir"/*/; do
|
for preset in "$presets_dir"/*/; do
|
||||||
[ -d "$preset" ] || continue
|
[ -d "$preset" ] || continue
|
||||||
local candidate="$preset/templates/${template_name}.md"
|
local candidate="$preset/templates/${template_name}.md"
|
||||||
@@ -281,9 +225,6 @@ except Exception:
|
|||||||
local core="$base/${template_name}.md"
|
local core="$base/${template_name}.md"
|
||||||
[ -f "$core" ] && echo "$core" && return 0
|
[ -f "$core" ] && echo "$core" && return 0
|
||||||
|
|
||||||
# Template not found in any location.
|
|
||||||
# Return 1 so callers can distinguish "not found" from "found".
|
|
||||||
# Callers running under set -e should use: TEMPLATE=$(resolve_template ...) || true
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,14 +5,13 @@ set -e
|
|||||||
JSON_MODE=false
|
JSON_MODE=false
|
||||||
SHORT_NAME=""
|
SHORT_NAME=""
|
||||||
BRANCH_NUMBER=""
|
BRANCH_NUMBER=""
|
||||||
USE_TIMESTAMP=false
|
|
||||||
ARGS=()
|
ARGS=()
|
||||||
i=1
|
i=1
|
||||||
while [ $i -le $# ]; do
|
while [ $i -le $# ]; do
|
||||||
arg="${!i}"
|
arg="${!i}"
|
||||||
case "$arg" in
|
case "$arg" in
|
||||||
--json)
|
--json)
|
||||||
JSON_MODE=true
|
JSON_MODE=true
|
||||||
;;
|
;;
|
||||||
--short-name)
|
--short-name)
|
||||||
if [ $((i + 1)) -gt $# ]; then
|
if [ $((i + 1)) -gt $# ]; then
|
||||||
@@ -41,27 +40,22 @@ while [ $i -le $# ]; do
|
|||||||
fi
|
fi
|
||||||
BRANCH_NUMBER="$next_arg"
|
BRANCH_NUMBER="$next_arg"
|
||||||
;;
|
;;
|
||||||
--timestamp)
|
--help|-h)
|
||||||
USE_TIMESTAMP=true
|
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>"
|
||||||
;;
|
|
||||||
--help|-h)
|
|
||||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Options:"
|
echo "Options:"
|
||||||
echo " --json Output in JSON format"
|
echo " --json Output in JSON format"
|
||||||
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||||
echo " --number N Specify branch number manually (overrides auto-detection)"
|
echo " --number N Specify branch number manually (overrides auto-detection)"
|
||||||
echo " --timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
|
||||||
echo " --help, -h Show this help message"
|
echo " --help, -h Show this help message"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Examples:"
|
echo "Examples:"
|
||||||
echo " $0 'Add user authentication system' --short-name 'user-auth'"
|
echo " $0 'Add user authentication system' --short-name 'user-auth'"
|
||||||
echo " $0 'Implement OAuth2 integration for API' --number 5"
|
echo " $0 'Implement OAuth2 integration for API' --number 5"
|
||||||
echo " $0 --timestamp --short-name 'user-auth' 'Add user authentication'"
|
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
ARGS+=("$arg")
|
ARGS+=("$arg")
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
i=$((i + 1))
|
i=$((i + 1))
|
||||||
@@ -69,7 +63,7 @@ done
|
|||||||
|
|
||||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -102,13 +96,10 @@ get_highest_from_specs() {
|
|||||||
for dir in "$specs_dir"/*; do
|
for dir in "$specs_dir"/*; do
|
||||||
[ -d "$dir" ] || continue
|
[ -d "$dir" ] || continue
|
||||||
dirname=$(basename "$dir")
|
dirname=$(basename "$dir")
|
||||||
# Only match sequential prefixes (###-*), skip timestamp dirs
|
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
|
||||||
if echo "$dirname" | grep -q '^[0-9]\{3\}-'; then
|
number=$((10#$number))
|
||||||
number=$(echo "$dirname" | grep -o '^[0-9]\{3\}')
|
if [ "$number" -gt "$highest" ]; then
|
||||||
number=$((10#$number))
|
highest=$number
|
||||||
if [ "$number" -gt "$highest" ]; then
|
|
||||||
highest=$number
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
@@ -147,7 +138,7 @@ check_existing_branches() {
|
|||||||
local specs_dir="$1"
|
local specs_dir="$1"
|
||||||
|
|
||||||
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
|
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
|
||||||
git fetch --all --prune >/dev/null 2>&1 || true
|
git fetch --all --prune 2>/dev/null || true
|
||||||
|
|
||||||
# Get highest number from ALL branches (not just matching short name)
|
# Get highest number from ALL branches (not just matching short name)
|
||||||
local highest_branch=$(get_highest_from_branches)
|
local highest_branch=$(get_highest_from_branches)
|
||||||
@@ -251,42 +242,29 @@ else
|
|||||||
BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
|
BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Warn if --number and --timestamp are both specified
|
# Determine branch number
|
||||||
if [ "$USE_TIMESTAMP" = true ] && [ -n "$BRANCH_NUMBER" ]; then
|
if [ -z "$BRANCH_NUMBER" ]; then
|
||||||
>&2 echo "[specify] Warning: --number is ignored when --timestamp is used"
|
if [ "$HAS_GIT" = true ]; then
|
||||||
BRANCH_NUMBER=""
|
# Check existing branches on remotes
|
||||||
fi
|
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
|
||||||
|
else
|
||||||
# Determine branch prefix
|
# Fall back to local directory check
|
||||||
if [ "$USE_TIMESTAMP" = true ]; then
|
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
|
||||||
FEATURE_NUM=$(date +%Y%m%d-%H%M%S)
|
BRANCH_NUMBER=$((HIGHEST + 1))
|
||||||
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
|
||||||
else
|
|
||||||
# Determine branch number
|
|
||||||
if [ -z "$BRANCH_NUMBER" ]; then
|
|
||||||
if [ "$HAS_GIT" = true ]; then
|
|
||||||
# Check existing branches on remotes
|
|
||||||
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
|
|
||||||
else
|
|
||||||
# Fall back to local directory check
|
|
||||||
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
|
|
||||||
BRANCH_NUMBER=$((HIGHEST + 1))
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Force base-10 interpretation to prevent octal conversion (e.g., 010 → 8 in octal, but should be 10 in decimal)
|
|
||||||
FEATURE_NUM=$(printf "%03d" "$((10#$BRANCH_NUMBER))")
|
|
||||||
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Force base-10 interpretation to prevent octal conversion (e.g., 010 → 8 in octal, but should be 10 in decimal)
|
||||||
|
FEATURE_NUM=$(printf "%03d" "$((10#$BRANCH_NUMBER))")
|
||||||
|
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
||||||
|
|
||||||
# GitHub enforces a 244-byte limit on branch names
|
# GitHub enforces a 244-byte limit on branch names
|
||||||
# Validate and truncate if necessary
|
# Validate and truncate if necessary
|
||||||
MAX_BRANCH_LENGTH=244
|
MAX_BRANCH_LENGTH=244
|
||||||
if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
|
if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
|
||||||
# Calculate how much we need to trim from suffix
|
# Calculate how much we need to trim from suffix
|
||||||
# Account for prefix length: timestamp (15) + hyphen (1) = 16, or sequential (3) + hyphen (1) = 4
|
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||||
PREFIX_LENGTH=$(( ${#FEATURE_NUM} + 1 ))
|
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
|
||||||
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - PREFIX_LENGTH))
|
|
||||||
|
|
||||||
# Truncate suffix at word boundary if possible
|
# Truncate suffix at word boundary if possible
|
||||||
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
|
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
|
||||||
@@ -305,11 +283,7 @@ if [ "$HAS_GIT" = true ]; then
|
|||||||
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
||||||
# Check if branch already exists
|
# Check if branch already exists
|
||||||
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
||||||
if [ "$USE_TIMESTAMP" = true ]; then
|
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Rerun to get a new timestamp or use a different --short-name."
|
|
||||||
else
|
|
||||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
|
||||||
fi
|
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
||||||
@@ -323,31 +297,18 @@ fi
|
|||||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||||
mkdir -p "$FEATURE_DIR"
|
mkdir -p "$FEATURE_DIR"
|
||||||
|
|
||||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT")
|
||||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
||||||
cp "$TEMPLATE" "$SPEC_FILE"
|
|
||||||
else
|
|
||||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
|
||||||
touch "$SPEC_FILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Inform the user how to persist the feature variable in their own shell
|
# Set the SPECIFY_FEATURE environment variable for the current session
|
||||||
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
|
export SPECIFY_FEATURE="$BRANCH_NAME"
|
||||||
|
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
if command -v jq >/dev/null 2>&1; then
|
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
||||||
jq -cn \
|
|
||||||
--arg branch_name "$BRANCH_NAME" \
|
|
||||||
--arg spec_file "$SPEC_FILE" \
|
|
||||||
--arg feature_num "$FEATURE_NUM" \
|
|
||||||
'{BRANCH_NAME:$branch_name,SPEC_FILE:$spec_file,FEATURE_NUM:$feature_num}'
|
|
||||||
else
|
|
||||||
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$(json_escape "$BRANCH_NAME")" "$(json_escape "$SPEC_FILE")" "$(json_escape "$FEATURE_NUM")"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||||
echo "SPEC_FILE: $SPEC_FILE"
|
echo "SPEC_FILE: $SPEC_FILE"
|
||||||
echo "FEATURE_NUM: $FEATURE_NUM"
|
echo "FEATURE_NUM: $FEATURE_NUM"
|
||||||
printf '# To persist in your shell: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME"
|
echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -28,9 +28,7 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
# Get all paths and variables from common functions
|
||||||
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
eval $(get_feature_paths)
|
||||||
eval "$_paths_output"
|
|
||||||
unset _paths_output
|
|
||||||
|
|
||||||
# Check if we're on a proper feature branch (only for git repos)
|
# Check if we're on a proper feature branch (only for git repos)
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||||
@@ -39,7 +37,7 @@ check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
|||||||
mkdir -p "$FEATURE_DIR"
|
mkdir -p "$FEATURE_DIR"
|
||||||
|
|
||||||
# Copy plan template if it exists
|
# Copy plan template if it exists
|
||||||
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT") || true
|
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT")
|
||||||
if [[ -n "$TEMPLATE" ]] && [[ -f "$TEMPLATE" ]]; then
|
if [[ -n "$TEMPLATE" ]] && [[ -f "$TEMPLATE" ]]; then
|
||||||
cp "$TEMPLATE" "$IMPL_PLAN"
|
cp "$TEMPLATE" "$IMPL_PLAN"
|
||||||
echo "Copied plan template to $IMPL_PLAN"
|
echo "Copied plan template to $IMPL_PLAN"
|
||||||
@@ -51,18 +49,8 @@ fi
|
|||||||
|
|
||||||
# Output results
|
# Output results
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
if has_jq; then
|
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
||||||
jq -cn \
|
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT"
|
||||||
--arg feature_spec "$FEATURE_SPEC" \
|
|
||||||
--arg impl_plan "$IMPL_PLAN" \
|
|
||||||
--arg specs_dir "$FEATURE_DIR" \
|
|
||||||
--arg branch "$CURRENT_BRANCH" \
|
|
||||||
--arg has_git "$HAS_GIT" \
|
|
||||||
'{FEATURE_SPEC:$feature_spec,IMPL_PLAN:$impl_plan,SPECS_DIR:$specs_dir,BRANCH:$branch,HAS_GIT:$has_git}'
|
|
||||||
else
|
|
||||||
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
|
||||||
"$(json_escape "$FEATURE_SPEC")" "$(json_escape "$IMPL_PLAN")" "$(json_escape "$FEATURE_DIR")" "$(json_escape "$CURRENT_BRANCH")" "$(json_escape "$HAS_GIT")"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
||||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
echo "IMPL_PLAN: $IMPL_PLAN"
|
||||||
|
|||||||
@@ -30,12 +30,12 @@
|
|||||||
#
|
#
|
||||||
# 5. Multi-Agent Support
|
# 5. Multi-Agent Support
|
||||||
# - Handles agent-specific file paths and naming conventions
|
# - Handles agent-specific file paths and naming conventions
|
||||||
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Junie, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Kimi Code, Pi Coding Agent, iFlow CLI, Antigravity or Generic
|
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Antigravity or Generic
|
||||||
# - Can update single agents or all existing agent files
|
# - Can update single agents or all existing agent files
|
||||||
# - Creates default Claude file if no agent files exist
|
# - Creates default Claude file if no agent files exist
|
||||||
#
|
#
|
||||||
# Usage: ./update-agent-context.sh [agent_type]
|
# Usage: ./update-agent-context.sh [agent_type]
|
||||||
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic
|
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic
|
||||||
# Leave empty to update all existing agent files
|
# Leave empty to update all existing agent files
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
@@ -53,9 +53,7 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
# Get all paths and variables from common functions
|
||||||
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
eval $(get_feature_paths)
|
||||||
eval "$_paths_output"
|
|
||||||
unset _paths_output
|
|
||||||
|
|
||||||
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
||||||
AGENT_TYPE="${1:-}"
|
AGENT_TYPE="${1:-}"
|
||||||
@@ -68,24 +66,18 @@ CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
|
|||||||
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
||||||
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
||||||
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
|
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
|
||||||
JUNIE_FILE="$REPO_ROOT/.junie/AGENTS.md"
|
|
||||||
KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
|
KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
|
||||||
AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
|
AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
|
||||||
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
||||||
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
||||||
QODER_FILE="$REPO_ROOT/QODER.md"
|
QODER_FILE="$REPO_ROOT/QODER.md"
|
||||||
# Amp, Kiro CLI, IBM Bob, and Pi all share AGENTS.md — use AGENTS_FILE to avoid
|
AMP_FILE="$REPO_ROOT/AGENTS.md"
|
||||||
# updating the same file multiple times.
|
|
||||||
AMP_FILE="$AGENTS_FILE"
|
|
||||||
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
||||||
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
||||||
KIRO_FILE="$AGENTS_FILE"
|
KIRO_FILE="$REPO_ROOT/AGENTS.md"
|
||||||
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
||||||
BOB_FILE="$AGENTS_FILE"
|
BOB_FILE="$REPO_ROOT/AGENTS.md"
|
||||||
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
||||||
KIMI_FILE="$REPO_ROOT/KIMI.md"
|
|
||||||
TRAE_FILE="$REPO_ROOT/.trae/rules/AGENTS.md"
|
|
||||||
IFLOW_FILE="$REPO_ROOT/IFLOW.md"
|
|
||||||
|
|
||||||
# Template file
|
# Template file
|
||||||
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
|
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
|
||||||
@@ -119,8 +111,6 @@ log_warning() {
|
|||||||
# Cleanup function for temporary files
|
# Cleanup function for temporary files
|
||||||
cleanup() {
|
cleanup() {
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
# Disarm traps to prevent re-entrant loop
|
|
||||||
trap - EXIT INT TERM
|
|
||||||
rm -f /tmp/agent_update_*_$$
|
rm -f /tmp/agent_update_*_$$
|
||||||
rm -f /tmp/manual_additions_$$
|
rm -f /tmp/manual_additions_$$
|
||||||
exit $exit_code
|
exit $exit_code
|
||||||
@@ -485,7 +475,7 @@ update_existing_agent_file() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Update timestamp
|
# Update timestamp
|
||||||
if [[ "$line" =~ (\*\*)?Last\ updated(\*\*)?:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
||||||
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
||||||
else
|
else
|
||||||
echo "$line" >> "$temp_file"
|
echo "$line" >> "$temp_file"
|
||||||
@@ -616,155 +606,174 @@ update_specific_agent() {
|
|||||||
|
|
||||||
case "$agent_type" in
|
case "$agent_type" in
|
||||||
claude)
|
claude)
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
||||||
;;
|
;;
|
||||||
gemini)
|
gemini)
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI" || return 1
|
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
||||||
;;
|
;;
|
||||||
copilot)
|
copilot)
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot" || return 1
|
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
||||||
;;
|
;;
|
||||||
cursor-agent)
|
cursor-agent)
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE" || return 1
|
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
||||||
;;
|
;;
|
||||||
qwen)
|
qwen)
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code" || return 1
|
update_agent_file "$QWEN_FILE" "Qwen Code"
|
||||||
;;
|
;;
|
||||||
opencode)
|
opencode)
|
||||||
update_agent_file "$AGENTS_FILE" "opencode" || return 1
|
update_agent_file "$AGENTS_FILE" "opencode"
|
||||||
;;
|
;;
|
||||||
codex)
|
codex)
|
||||||
update_agent_file "$AGENTS_FILE" "Codex CLI" || return 1
|
update_agent_file "$AGENTS_FILE" "Codex CLI"
|
||||||
;;
|
;;
|
||||||
windsurf)
|
windsurf)
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf" || return 1
|
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
||||||
;;
|
|
||||||
junie)
|
|
||||||
update_agent_file "$JUNIE_FILE" "Junie" || return 1
|
|
||||||
;;
|
;;
|
||||||
kilocode)
|
kilocode)
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code" || return 1
|
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
||||||
;;
|
;;
|
||||||
auggie)
|
auggie)
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI" || return 1
|
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
||||||
;;
|
;;
|
||||||
roo)
|
roo)
|
||||||
update_agent_file "$ROO_FILE" "Roo Code" || return 1
|
update_agent_file "$ROO_FILE" "Roo Code"
|
||||||
;;
|
;;
|
||||||
codebuddy)
|
codebuddy)
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI" || return 1
|
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||||
;;
|
;;
|
||||||
qodercli)
|
qodercli)
|
||||||
update_agent_file "$QODER_FILE" "Qoder CLI" || return 1
|
update_agent_file "$QODER_FILE" "Qoder CLI"
|
||||||
;;
|
;;
|
||||||
amp)
|
amp)
|
||||||
update_agent_file "$AMP_FILE" "Amp" || return 1
|
update_agent_file "$AMP_FILE" "Amp"
|
||||||
;;
|
;;
|
||||||
shai)
|
shai)
|
||||||
update_agent_file "$SHAI_FILE" "SHAI" || return 1
|
update_agent_file "$SHAI_FILE" "SHAI"
|
||||||
;;
|
;;
|
||||||
tabnine)
|
tabnine)
|
||||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI" || return 1
|
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
||||||
;;
|
;;
|
||||||
kiro-cli)
|
kiro-cli)
|
||||||
update_agent_file "$KIRO_FILE" "Kiro CLI" || return 1
|
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
||||||
;;
|
;;
|
||||||
agy)
|
agy)
|
||||||
update_agent_file "$AGY_FILE" "Antigravity" || return 1
|
update_agent_file "$AGY_FILE" "Antigravity"
|
||||||
;;
|
;;
|
||||||
bob)
|
bob)
|
||||||
update_agent_file "$BOB_FILE" "IBM Bob" || return 1
|
update_agent_file "$BOB_FILE" "IBM Bob"
|
||||||
;;
|
;;
|
||||||
vibe)
|
vibe)
|
||||||
update_agent_file "$VIBE_FILE" "Mistral Vibe" || return 1
|
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
||||||
;;
|
|
||||||
kimi)
|
|
||||||
update_agent_file "$KIMI_FILE" "Kimi Code" || return 1
|
|
||||||
;;
|
|
||||||
trae)
|
|
||||||
update_agent_file "$TRAE_FILE" "Trae" || return 1
|
|
||||||
;;
|
|
||||||
pi)
|
|
||||||
update_agent_file "$AGENTS_FILE" "Pi Coding Agent" || return 1
|
|
||||||
;;
|
|
||||||
iflow)
|
|
||||||
update_agent_file "$IFLOW_FILE" "iFlow CLI" || return 1
|
|
||||||
;;
|
;;
|
||||||
generic)
|
generic)
|
||||||
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
log_error "Unknown agent type '$agent_type'"
|
log_error "Unknown agent type '$agent_type'"
|
||||||
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic"
|
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
# Helper: skip non-existent files and files already updated (dedup by
|
|
||||||
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
|
|
||||||
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
|
|
||||||
# Uses a linear array instead of associative array for bash 3.2 compatibility.
|
|
||||||
# Note: defined at top level because bash 3.2 does not support true
|
|
||||||
# nested/local functions. _updated_paths, _found_agent, and _all_ok are
|
|
||||||
# initialised exclusively inside update_all_existing_agents so that
|
|
||||||
# sourcing this script has no side effects on the caller's environment.
|
|
||||||
|
|
||||||
_update_if_new() {
|
|
||||||
local file="$1" name="$2"
|
|
||||||
[[ -f "$file" ]] || return 0
|
|
||||||
local real_path
|
|
||||||
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
|
|
||||||
local p
|
|
||||||
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
|
|
||||||
for p in "${_updated_paths[@]}"; do
|
|
||||||
[[ "$p" == "$real_path" ]] && return 0
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
# Record the file as seen before attempting the update so that:
|
|
||||||
# (a) aliases pointing to the same path are not retried on failure
|
|
||||||
# (b) _found_agent reflects file existence, not update success
|
|
||||||
_updated_paths+=("$real_path")
|
|
||||||
_found_agent=true
|
|
||||||
update_agent_file "$file" "$name"
|
|
||||||
}
|
|
||||||
|
|
||||||
update_all_existing_agents() {
|
update_all_existing_agents() {
|
||||||
_found_agent=false
|
local found_agent=false
|
||||||
_updated_paths=()
|
|
||||||
local _all_ok=true
|
# Check each possible agent file and update if it exists
|
||||||
|
if [[ -f "$CLAUDE_FILE" ]]; then
|
||||||
_update_if_new "$CLAUDE_FILE" "Claude Code" || _all_ok=false
|
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
||||||
_update_if_new "$GEMINI_FILE" "Gemini CLI" || _all_ok=false
|
found_agent=true
|
||||||
_update_if_new "$COPILOT_FILE" "GitHub Copilot" || _all_ok=false
|
fi
|
||||||
_update_if_new "$CURSOR_FILE" "Cursor IDE" || _all_ok=false
|
|
||||||
_update_if_new "$QWEN_FILE" "Qwen Code" || _all_ok=false
|
if [[ -f "$GEMINI_FILE" ]]; then
|
||||||
_update_if_new "$AGENTS_FILE" "Codex/opencode" || _all_ok=false
|
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
||||||
_update_if_new "$AMP_FILE" "Amp" || _all_ok=false
|
found_agent=true
|
||||||
_update_if_new "$KIRO_FILE" "Kiro CLI" || _all_ok=false
|
fi
|
||||||
_update_if_new "$BOB_FILE" "IBM Bob" || _all_ok=false
|
|
||||||
_update_if_new "$WINDSURF_FILE" "Windsurf" || _all_ok=false
|
if [[ -f "$COPILOT_FILE" ]]; then
|
||||||
_update_if_new "$JUNIE_FILE" "Junie" || _all_ok=false
|
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
||||||
_update_if_new "$KILOCODE_FILE" "Kilo Code" || _all_ok=false
|
found_agent=true
|
||||||
_update_if_new "$AUGGIE_FILE" "Auggie CLI" || _all_ok=false
|
fi
|
||||||
_update_if_new "$ROO_FILE" "Roo Code" || _all_ok=false
|
|
||||||
_update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI" || _all_ok=false
|
if [[ -f "$CURSOR_FILE" ]]; then
|
||||||
_update_if_new "$SHAI_FILE" "SHAI" || _all_ok=false
|
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
||||||
_update_if_new "$TABNINE_FILE" "Tabnine CLI" || _all_ok=false
|
found_agent=true
|
||||||
_update_if_new "$QODER_FILE" "Qoder CLI" || _all_ok=false
|
fi
|
||||||
_update_if_new "$AGY_FILE" "Antigravity" || _all_ok=false
|
|
||||||
_update_if_new "$VIBE_FILE" "Mistral Vibe" || _all_ok=false
|
if [[ -f "$QWEN_FILE" ]]; then
|
||||||
_update_if_new "$KIMI_FILE" "Kimi Code" || _all_ok=false
|
update_agent_file "$QWEN_FILE" "Qwen Code"
|
||||||
_update_if_new "$TRAE_FILE" "Trae" || _all_ok=false
|
found_agent=true
|
||||||
_update_if_new "$IFLOW_FILE" "iFlow CLI" || _all_ok=false
|
fi
|
||||||
|
|
||||||
# If no agent files exist, create a default Claude file
|
if [[ -f "$AGENTS_FILE" ]]; then
|
||||||
if [[ "$_found_agent" == false ]]; then
|
update_agent_file "$AGENTS_FILE" "Codex/opencode"
|
||||||
log_info "No existing agent files found, creating default Claude file..."
|
found_agent=true
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$WINDSURF_FILE" ]]; then
|
||||||
|
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$KILOCODE_FILE" ]]; then
|
||||||
|
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
||||||
|
found_agent=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
[[ "$_all_ok" == true ]]
|
if [[ -f "$AUGGIE_FILE" ]]; then
|
||||||
|
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$ROO_FILE" ]]; then
|
||||||
|
update_agent_file "$ROO_FILE" "Roo Code"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$CODEBUDDY_FILE" ]]; then
|
||||||
|
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$SHAI_FILE" ]]; then
|
||||||
|
update_agent_file "$SHAI_FILE" "SHAI"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$TABNINE_FILE" ]]; then
|
||||||
|
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$QODER_FILE" ]]; then
|
||||||
|
update_agent_file "$QODER_FILE" "Qoder CLI"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$KIRO_FILE" ]]; then
|
||||||
|
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$AGY_FILE" ]]; then
|
||||||
|
update_agent_file "$AGY_FILE" "Antigravity"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
if [[ -f "$BOB_FILE" ]]; then
|
||||||
|
update_agent_file "$BOB_FILE" "IBM Bob"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$VIBE_FILE" ]]; then
|
||||||
|
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
||||||
|
found_agent=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If no agent files exist, create a default Claude file
|
||||||
|
if [[ "$found_agent" == false ]]; then
|
||||||
|
log_info "No existing agent files found, creating default Claude file..."
|
||||||
|
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
print_summary() {
|
print_summary() {
|
||||||
echo
|
echo
|
||||||
@@ -783,7 +792,7 @@ print_summary() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic]"
|
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]"
|
||||||
}
|
}
|
||||||
|
|
||||||
#==============================================================================
|
#==============================================================================
|
||||||
|
|||||||
@@ -38,28 +38,17 @@ function Get-CurrentBranch {
|
|||||||
if (Test-Path $specsDir) {
|
if (Test-Path $specsDir) {
|
||||||
$latestFeature = ""
|
$latestFeature = ""
|
||||||
$highest = 0
|
$highest = 0
|
||||||
$latestTimestamp = ""
|
|
||||||
|
|
||||||
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
|
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
|
||||||
if ($_.Name -match '^(\d{8}-\d{6})-') {
|
if ($_.Name -match '^(\d{3})-') {
|
||||||
# Timestamp-based branch: compare lexicographically
|
|
||||||
$ts = $matches[1]
|
|
||||||
if ($ts -gt $latestTimestamp) {
|
|
||||||
$latestTimestamp = $ts
|
|
||||||
$latestFeature = $_.Name
|
|
||||||
}
|
|
||||||
} elseif ($_.Name -match '^(\d{3})-') {
|
|
||||||
$num = [int]$matches[1]
|
$num = [int]$matches[1]
|
||||||
if ($num -gt $highest) {
|
if ($num -gt $highest) {
|
||||||
$highest = $num
|
$highest = $num
|
||||||
# Only update if no timestamp branch found yet
|
$latestFeature = $_.Name
|
||||||
if (-not $latestTimestamp) {
|
|
||||||
$latestFeature = $_.Name
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ($latestFeature) {
|
if ($latestFeature) {
|
||||||
return $latestFeature
|
return $latestFeature
|
||||||
}
|
}
|
||||||
@@ -90,9 +79,9 @@ function Test-FeatureBranch {
|
|||||||
return $true
|
return $true
|
||||||
}
|
}
|
||||||
|
|
||||||
if ($Branch -notmatch '^[0-9]{3}-' -and $Branch -notmatch '^\d{8}-\d{6}-') {
|
if ($Branch -notmatch '^[0-9]{3}-') {
|
||||||
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
||||||
Write-Output "Feature branches should be named like: 001-feature-name or 20260319-143022-feature-name"
|
Write-Output "Feature branches should be named like: 001-feature-name"
|
||||||
return $false
|
return $false
|
||||||
}
|
}
|
||||||
return $true
|
return $true
|
||||||
@@ -174,7 +163,7 @@ function Resolve-Template {
|
|||||||
$presets = $registryData.presets
|
$presets = $registryData.presets
|
||||||
if ($presets) {
|
if ($presets) {
|
||||||
$sortedPresets = $presets.PSObject.Properties |
|
$sortedPresets = $presets.PSObject.Properties |
|
||||||
Sort-Object { if ($null -ne $_.Value.priority) { $_.Value.priority } else { 10 } } |
|
Sort-Object { if ($_.Value.priority) { $_.Value.priority } else { 10 } } |
|
||||||
ForEach-Object { $_.Name }
|
ForEach-Object { $_.Name }
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|||||||
@@ -4,36 +4,32 @@
|
|||||||
param(
|
param(
|
||||||
[switch]$Json,
|
[switch]$Json,
|
||||||
[string]$ShortName,
|
[string]$ShortName,
|
||||||
[Parameter()]
|
|
||||||
[int]$Number = 0,
|
[int]$Number = 0,
|
||||||
[switch]$Timestamp,
|
|
||||||
[switch]$Help,
|
[switch]$Help,
|
||||||
[Parameter(Position = 0, ValueFromRemainingArguments = $true)]
|
[Parameter(ValueFromRemainingArguments = $true)]
|
||||||
[string[]]$FeatureDescription
|
[string[]]$FeatureDescription
|
||||||
)
|
)
|
||||||
$ErrorActionPreference = 'Stop'
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
# Show help if requested
|
# Show help if requested
|
||||||
if ($Help) {
|
if ($Help) {
|
||||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] <feature description>"
|
||||||
Write-Host ""
|
Write-Host ""
|
||||||
Write-Host "Options:"
|
Write-Host "Options:"
|
||||||
Write-Host " -Json Output in JSON format"
|
Write-Host " -Json Output in JSON format"
|
||||||
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||||
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
||||||
Write-Host " -Timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
|
||||||
Write-Host " -Help Show this help message"
|
Write-Host " -Help Show this help message"
|
||||||
Write-Host ""
|
Write-Host ""
|
||||||
Write-Host "Examples:"
|
Write-Host "Examples:"
|
||||||
Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'"
|
Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'"
|
||||||
Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'"
|
Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'"
|
||||||
Write-Host " ./create-new-feature.ps1 -Timestamp -ShortName 'user-auth' 'Add user authentication'"
|
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check if feature description provided
|
# Check if feature description provided
|
||||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] <feature description>"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,7 +71,7 @@ function Get-HighestNumberFromSpecs {
|
|||||||
$highest = 0
|
$highest = 0
|
||||||
if (Test-Path $SpecsDir) {
|
if (Test-Path $SpecsDir) {
|
||||||
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
|
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
|
||||||
if ($_.Name -match '^(\d{3})-') {
|
if ($_.Name -match '^(\d+)') {
|
||||||
$num = [int]$matches[1]
|
$num = [int]$matches[1]
|
||||||
if ($num -gt $highest) { $highest = $num }
|
if ($num -gt $highest) { $highest = $num }
|
||||||
}
|
}
|
||||||
@@ -96,7 +92,7 @@ function Get-HighestNumberFromBranches {
|
|||||||
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
|
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
|
||||||
|
|
||||||
# Extract feature number if branch matches pattern ###-*
|
# Extract feature number if branch matches pattern ###-*
|
||||||
if ($cleanBranch -match '^(\d{3})-') {
|
if ($cleanBranch -match '^(\d+)-') {
|
||||||
$num = [int]$matches[1]
|
$num = [int]$matches[1]
|
||||||
if ($num -gt $highest) { $highest = $num }
|
if ($num -gt $highest) { $highest = $num }
|
||||||
}
|
}
|
||||||
@@ -219,40 +215,27 @@ if ($ShortName) {
|
|||||||
$branchSuffix = Get-BranchName -Description $featureDesc
|
$branchSuffix = Get-BranchName -Description $featureDesc
|
||||||
}
|
}
|
||||||
|
|
||||||
# Warn if -Number and -Timestamp are both specified
|
# Determine branch number
|
||||||
if ($Timestamp -and $Number -ne 0) {
|
if ($Number -eq 0) {
|
||||||
Write-Warning "[specify] Warning: -Number is ignored when -Timestamp is used"
|
if ($hasGit) {
|
||||||
$Number = 0
|
# Check existing branches on remotes
|
||||||
}
|
$Number = Get-NextBranchNumber -SpecsDir $specsDir
|
||||||
|
} else {
|
||||||
# Determine branch prefix
|
# Fall back to local directory check
|
||||||
if ($Timestamp) {
|
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
|
||||||
$featureNum = Get-Date -Format 'yyyyMMdd-HHmmss'
|
|
||||||
$branchName = "$featureNum-$branchSuffix"
|
|
||||||
} else {
|
|
||||||
# Determine branch number
|
|
||||||
if ($Number -eq 0) {
|
|
||||||
if ($hasGit) {
|
|
||||||
# Check existing branches on remotes
|
|
||||||
$Number = Get-NextBranchNumber -SpecsDir $specsDir
|
|
||||||
} else {
|
|
||||||
# Fall back to local directory check
|
|
||||||
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
$featureNum = ('{0:000}' -f $Number)
|
|
||||||
$branchName = "$featureNum-$branchSuffix"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
$featureNum = ('{0:000}' -f $Number)
|
||||||
|
$branchName = "$featureNum-$branchSuffix"
|
||||||
|
|
||||||
# GitHub enforces a 244-byte limit on branch names
|
# GitHub enforces a 244-byte limit on branch names
|
||||||
# Validate and truncate if necessary
|
# Validate and truncate if necessary
|
||||||
$maxBranchLength = 244
|
$maxBranchLength = 244
|
||||||
if ($branchName.Length -gt $maxBranchLength) {
|
if ($branchName.Length -gt $maxBranchLength) {
|
||||||
# Calculate how much we need to trim from suffix
|
# Calculate how much we need to trim from suffix
|
||||||
# Account for prefix length: timestamp (15) + hyphen (1) = 16, or sequential (3) + hyphen (1) = 4
|
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||||
$prefixLength = $featureNum.Length + 1
|
$maxSuffixLength = $maxBranchLength - 4
|
||||||
$maxSuffixLength = $maxBranchLength - $prefixLength
|
|
||||||
|
|
||||||
# Truncate suffix
|
# Truncate suffix
|
||||||
$truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength))
|
$truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength))
|
||||||
@@ -270,7 +253,7 @@ if ($branchName.Length -gt $maxBranchLength) {
|
|||||||
if ($hasGit) {
|
if ($hasGit) {
|
||||||
$branchCreated = $false
|
$branchCreated = $false
|
||||||
try {
|
try {
|
||||||
git checkout -q -b $branchName 2>$null | Out-Null
|
git checkout -b $branchName 2>$null | Out-Null
|
||||||
if ($LASTEXITCODE -eq 0) {
|
if ($LASTEXITCODE -eq 0) {
|
||||||
$branchCreated = $true
|
$branchCreated = $true
|
||||||
}
|
}
|
||||||
@@ -282,11 +265,7 @@ if ($hasGit) {
|
|||||||
# Check if branch already exists
|
# Check if branch already exists
|
||||||
$existingBranch = git branch --list $branchName 2>$null
|
$existingBranch = git branch --list $branchName 2>$null
|
||||||
if ($existingBranch) {
|
if ($existingBranch) {
|
||||||
if ($Timestamp) {
|
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||||
Write-Error "Error: Branch '$branchName' already exists. Rerun to get a new timestamp or use a different -ShortName."
|
|
||||||
} else {
|
|
||||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
|
||||||
}
|
|
||||||
exit 1
|
exit 1
|
||||||
} else {
|
} else {
|
||||||
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Mirrors the behavior of scripts/bash/update-agent-context.sh:
|
|||||||
2. Plan Data Extraction
|
2. Plan Data Extraction
|
||||||
3. Agent File Management (create from template or update existing)
|
3. Agent File Management (create from template or update existing)
|
||||||
4. Content Generation (technology stack, recent changes, timestamp)
|
4. Content Generation (technology stack, recent changes, timestamp)
|
||||||
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, junie, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, kimi, trae, pi, iflow, generic)
|
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, generic)
|
||||||
|
|
||||||
.PARAMETER AgentType
|
.PARAMETER AgentType
|
||||||
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
|
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
|
||||||
@@ -25,7 +25,7 @@ Relies on common helper functions in common.ps1
|
|||||||
#>
|
#>
|
||||||
param(
|
param(
|
||||||
[Parameter(Position=0)]
|
[Parameter(Position=0)]
|
||||||
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','junie','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','kimi','trae','pi','iflow','generic')]
|
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','generic')]
|
||||||
[string]$AgentType
|
[string]$AgentType
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -51,7 +51,6 @@ $CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
|||||||
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
||||||
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||||
$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md'
|
$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md'
|
||||||
$JUNIE_FILE = Join-Path $REPO_ROOT '.junie/AGENTS.md'
|
|
||||||
$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md'
|
$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md'
|
||||||
$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md'
|
$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md'
|
||||||
$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md'
|
$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md'
|
||||||
@@ -64,9 +63,6 @@ $KIRO_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
|||||||
$AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
|
$AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
|
||||||
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||||
$VIBE_FILE = Join-Path $REPO_ROOT '.vibe/agents/specify-agents.md'
|
$VIBE_FILE = Join-Path $REPO_ROOT '.vibe/agents/specify-agents.md'
|
||||||
$KIMI_FILE = Join-Path $REPO_ROOT 'KIMI.md'
|
|
||||||
$TRAE_FILE = Join-Path $REPO_ROOT '.trae/rules/AGENTS.md'
|
|
||||||
$IFLOW_FILE = Join-Path $REPO_ROOT 'IFLOW.md'
|
|
||||||
|
|
||||||
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
|
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
|
||||||
|
|
||||||
@@ -334,7 +330,7 @@ function Update-ExistingAgentFile {
|
|||||||
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ($line -match '(\*\*)?Last updated(\*\*)?: .*\d{4}-\d{2}-\d{2}') {
|
if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') {
|
||||||
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -398,7 +394,6 @@ function Update-SpecificAgent {
|
|||||||
'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' }
|
'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' }
|
||||||
'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' }
|
'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' }
|
||||||
'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' }
|
'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' }
|
||||||
'junie' { Update-AgentFile -TargetFile $JUNIE_FILE -AgentName 'Junie' }
|
|
||||||
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
||||||
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
||||||
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
||||||
@@ -411,12 +406,8 @@ function Update-SpecificAgent {
|
|||||||
'agy' { Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity' }
|
'agy' { Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity' }
|
||||||
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
|
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
|
||||||
'vibe' { Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe' }
|
'vibe' { Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe' }
|
||||||
'kimi' { Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code' }
|
|
||||||
'trae' { Update-AgentFile -TargetFile $TRAE_FILE -AgentName 'Trae' }
|
|
||||||
'pi' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Pi Coding Agent' }
|
|
||||||
'iflow' { Update-AgentFile -TargetFile $IFLOW_FILE -AgentName 'iFlow CLI' }
|
|
||||||
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
|
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
|
||||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic'; return $false }
|
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic'; return $false }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -430,7 +421,6 @@ function Update-AllExistingAgents {
|
|||||||
if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true }
|
if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true }
|
if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true }
|
if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $JUNIE_FILE) { if (-not (Update-AgentFile -TargetFile $JUNIE_FILE -AgentName 'Junie')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
||||||
@@ -442,9 +432,6 @@ function Update-AllExistingAgents {
|
|||||||
if (Test-Path $AGY_FILE) { if (-not (Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity')) { $ok = $false }; $found = $true }
|
if (Test-Path $AGY_FILE) { if (-not (Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
|
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $VIBE_FILE) { if (-not (Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe')) { $ok = $false }; $found = $true }
|
if (Test-Path $VIBE_FILE) { if (-not (Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $KIMI_FILE) { if (-not (Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $TRAE_FILE) { if (-not (Update-AgentFile -TargetFile $TRAE_FILE -AgentName 'Trae')) { $ok = $false }; $found = $true }
|
|
||||||
if (Test-Path $IFLOW_FILE) { if (-not (Update-AgentFile -TargetFile $IFLOW_FILE -AgentName 'iFlow CLI')) { $ok = $false }; $found = $true }
|
|
||||||
if (-not $found) {
|
if (-not $found) {
|
||||||
Write-Info 'No existing agent files found, creating default Claude file...'
|
Write-Info 'No existing agent files found, creating default Claude file...'
|
||||||
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
|
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
|
||||||
@@ -459,7 +446,7 @@ function Print-Summary {
|
|||||||
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
|
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
|
||||||
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
|
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
|
||||||
Write-Host ''
|
Write-Host ''
|
||||||
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic]'
|
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]'
|
||||||
}
|
}
|
||||||
|
|
||||||
function Main {
|
function Main {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,6 @@ command files into agent-specific directories in the correct format.
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Any
|
from typing import Dict, List, Any
|
||||||
|
|
||||||
import platform
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
@@ -49,9 +48,9 @@ class CommandRegistrar:
|
|||||||
},
|
},
|
||||||
"qwen": {
|
"qwen": {
|
||||||
"dir": ".qwen/commands",
|
"dir": ".qwen/commands",
|
||||||
"format": "markdown",
|
"format": "toml",
|
||||||
"args": "$ARGUMENTS",
|
"args": "{{args}}",
|
||||||
"extension": ".md"
|
"extension": ".toml"
|
||||||
},
|
},
|
||||||
"opencode": {
|
"opencode": {
|
||||||
"dir": ".opencode/command",
|
"dir": ".opencode/command",
|
||||||
@@ -60,10 +59,10 @@ class CommandRegistrar:
|
|||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"codex": {
|
"codex": {
|
||||||
"dir": ".agents/skills",
|
"dir": ".codex/prompts",
|
||||||
"format": "markdown",
|
"format": "markdown",
|
||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": "/SKILL.md",
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"windsurf": {
|
"windsurf": {
|
||||||
"dir": ".windsurf/workflows",
|
"dir": ".windsurf/workflows",
|
||||||
@@ -71,26 +70,20 @@ class CommandRegistrar:
|
|||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"junie": {
|
|
||||||
"dir": ".junie/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"kilocode": {
|
"kilocode": {
|
||||||
"dir": ".kilocode/workflows",
|
"dir": ".kilocode/rules",
|
||||||
"format": "markdown",
|
"format": "markdown",
|
||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"auggie": {
|
"auggie": {
|
||||||
"dir": ".augment/commands",
|
"dir": ".augment/rules",
|
||||||
"format": "markdown",
|
"format": "markdown",
|
||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"roo": {
|
"roo": {
|
||||||
"dir": ".roo/commands",
|
"dir": ".roo/rules",
|
||||||
"format": "markdown",
|
"format": "markdown",
|
||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
@@ -113,12 +106,6 @@ class CommandRegistrar:
|
|||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"pi": {
|
|
||||||
"dir": ".pi/prompts",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"amp": {
|
"amp": {
|
||||||
"dir": ".agents/commands",
|
"dir": ".agents/commands",
|
||||||
"format": "markdown",
|
"format": "markdown",
|
||||||
@@ -142,24 +129,6 @@ class CommandRegistrar:
|
|||||||
"format": "markdown",
|
"format": "markdown",
|
||||||
"args": "$ARGUMENTS",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".md"
|
"extension": ".md"
|
||||||
},
|
|
||||||
"kimi": {
|
|
||||||
"dir": ".kimi/skills",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": "/SKILL.md",
|
|
||||||
},
|
|
||||||
"trae": {
|
|
||||||
"dir": ".trae/rules",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"iflow": {
|
|
||||||
"dir": ".iflow/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,9 +158,6 @@ class CommandRegistrar:
|
|||||||
except yaml.YAMLError:
|
except yaml.YAMLError:
|
||||||
frontmatter = {}
|
frontmatter = {}
|
||||||
|
|
||||||
if not isinstance(frontmatter, dict):
|
|
||||||
frontmatter = {}
|
|
||||||
|
|
||||||
return frontmatter, body
|
return frontmatter, body
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -219,14 +185,11 @@ class CommandRegistrar:
|
|||||||
Returns:
|
Returns:
|
||||||
Modified frontmatter with adjusted paths
|
Modified frontmatter with adjusted paths
|
||||||
"""
|
"""
|
||||||
for script_key in ("scripts", "agent_scripts"):
|
if "scripts" in frontmatter:
|
||||||
scripts = frontmatter.get(script_key)
|
for key in frontmatter["scripts"]:
|
||||||
if not isinstance(scripts, dict):
|
script_path = frontmatter["scripts"][key]
|
||||||
continue
|
if script_path.startswith("../../scripts/"):
|
||||||
|
frontmatter["scripts"][key] = f".specify/scripts/{script_path[14:]}"
|
||||||
for key, script_path in scripts.items():
|
|
||||||
if isinstance(script_path, str) and script_path.startswith("../../scripts/"):
|
|
||||||
scripts[key] = f".specify/scripts/{script_path[14:]}"
|
|
||||||
return frontmatter
|
return frontmatter
|
||||||
|
|
||||||
def render_markdown_command(
|
def render_markdown_command(
|
||||||
@@ -283,101 +246,6 @@ class CommandRegistrar:
|
|||||||
|
|
||||||
return "\n".join(toml_lines)
|
return "\n".join(toml_lines)
|
||||||
|
|
||||||
def render_skill_command(
|
|
||||||
self,
|
|
||||||
agent_name: str,
|
|
||||||
skill_name: str,
|
|
||||||
frontmatter: dict,
|
|
||||||
body: str,
|
|
||||||
source_id: str,
|
|
||||||
source_file: str,
|
|
||||||
project_root: Path,
|
|
||||||
) -> str:
|
|
||||||
"""Render a command override as a SKILL.md file.
|
|
||||||
|
|
||||||
SKILL-target agents should receive the same skills-oriented
|
|
||||||
frontmatter shape used elsewhere in the project instead of the
|
|
||||||
original command frontmatter.
|
|
||||||
|
|
||||||
Technical debt note:
|
|
||||||
Spec-kit currently has multiple SKILL.md generators (template packaging,
|
|
||||||
init-time conversion, and extension/preset overrides). Keep the skill
|
|
||||||
frontmatter keys aligned (name/description/compatibility/metadata, with
|
|
||||||
metadata.author and metadata.source subkeys) to avoid drift across agents.
|
|
||||||
"""
|
|
||||||
if not isinstance(frontmatter, dict):
|
|
||||||
frontmatter = {}
|
|
||||||
|
|
||||||
if agent_name == "codex":
|
|
||||||
body = self._resolve_codex_skill_placeholders(frontmatter, body, project_root)
|
|
||||||
|
|
||||||
description = frontmatter.get("description", f"Spec-kit workflow command: {skill_name}")
|
|
||||||
skill_frontmatter = {
|
|
||||||
"name": skill_name,
|
|
||||||
"description": description,
|
|
||||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
|
||||||
"metadata": {
|
|
||||||
"author": "github-spec-kit",
|
|
||||||
"source": f"{source_id}:{source_file}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return self.render_frontmatter(skill_frontmatter) + "\n" + body
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _resolve_codex_skill_placeholders(frontmatter: dict, body: str, project_root: Path) -> str:
|
|
||||||
"""Resolve script placeholders for Codex skill overrides.
|
|
||||||
|
|
||||||
This intentionally scopes the fix to Codex, which is the newly
|
|
||||||
migrated runtime path in this PR. Existing Kimi behavior is left
|
|
||||||
unchanged for now.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
from . import load_init_options
|
|
||||||
except ImportError:
|
|
||||||
return body
|
|
||||||
|
|
||||||
if not isinstance(frontmatter, dict):
|
|
||||||
frontmatter = {}
|
|
||||||
|
|
||||||
scripts = frontmatter.get("scripts", {}) or {}
|
|
||||||
agent_scripts = frontmatter.get("agent_scripts", {}) or {}
|
|
||||||
if not isinstance(scripts, dict):
|
|
||||||
scripts = {}
|
|
||||||
if not isinstance(agent_scripts, dict):
|
|
||||||
agent_scripts = {}
|
|
||||||
|
|
||||||
script_variant = load_init_options(project_root).get("script")
|
|
||||||
if script_variant not in {"sh", "ps"}:
|
|
||||||
fallback_order = []
|
|
||||||
default_variant = "ps" if platform.system().lower().startswith("win") else "sh"
|
|
||||||
secondary_variant = "sh" if default_variant == "ps" else "ps"
|
|
||||||
|
|
||||||
if default_variant in scripts or default_variant in agent_scripts:
|
|
||||||
fallback_order.append(default_variant)
|
|
||||||
if secondary_variant in scripts or secondary_variant in agent_scripts:
|
|
||||||
fallback_order.append(secondary_variant)
|
|
||||||
|
|
||||||
for key in scripts:
|
|
||||||
if key not in fallback_order:
|
|
||||||
fallback_order.append(key)
|
|
||||||
for key in agent_scripts:
|
|
||||||
if key not in fallback_order:
|
|
||||||
fallback_order.append(key)
|
|
||||||
|
|
||||||
script_variant = fallback_order[0] if fallback_order else None
|
|
||||||
|
|
||||||
script_command = scripts.get(script_variant) if script_variant else None
|
|
||||||
if script_command:
|
|
||||||
script_command = script_command.replace("{ARGS}", "$ARGUMENTS")
|
|
||||||
body = body.replace("{SCRIPT}", script_command)
|
|
||||||
|
|
||||||
agent_script_command = agent_scripts.get(script_variant) if script_variant else None
|
|
||||||
if agent_script_command:
|
|
||||||
agent_script_command = agent_script_command.replace("{ARGS}", "$ARGUMENTS")
|
|
||||||
body = body.replace("{AGENT_SCRIPT}", agent_script_command)
|
|
||||||
|
|
||||||
return body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", "codex")
|
|
||||||
|
|
||||||
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
||||||
"""Convert argument placeholder format.
|
"""Convert argument placeholder format.
|
||||||
|
|
||||||
@@ -391,18 +259,6 @@ class CommandRegistrar:
|
|||||||
"""
|
"""
|
||||||
return content.replace(from_placeholder, to_placeholder)
|
return content.replace(from_placeholder, to_placeholder)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _compute_output_name(agent_name: str, cmd_name: str, agent_config: Dict[str, Any]) -> str:
|
|
||||||
"""Compute the on-disk command or skill name for an agent."""
|
|
||||||
if agent_config["extension"] != "/SKILL.md":
|
|
||||||
return cmd_name
|
|
||||||
|
|
||||||
short_name = cmd_name
|
|
||||||
if short_name.startswith("speckit."):
|
|
||||||
short_name = short_name[len("speckit."):]
|
|
||||||
|
|
||||||
return f"speckit.{short_name}" if agent_name == "kimi" else f"speckit-{short_name}"
|
|
||||||
|
|
||||||
def register_commands(
|
def register_commands(
|
||||||
self,
|
self,
|
||||||
agent_name: str,
|
agent_name: str,
|
||||||
@@ -454,21 +310,14 @@ class CommandRegistrar:
|
|||||||
body, "$ARGUMENTS", agent_config["args"]
|
body, "$ARGUMENTS", agent_config["args"]
|
||||||
)
|
)
|
||||||
|
|
||||||
output_name = self._compute_output_name(agent_name, cmd_name, agent_config)
|
if agent_config["format"] == "markdown":
|
||||||
|
|
||||||
if agent_config["extension"] == "/SKILL.md":
|
|
||||||
output = self.render_skill_command(
|
|
||||||
agent_name, output_name, frontmatter, body, source_id, cmd_file, project_root
|
|
||||||
)
|
|
||||||
elif agent_config["format"] == "markdown":
|
|
||||||
output = self.render_markdown_command(frontmatter, body, source_id, context_note)
|
output = self.render_markdown_command(frontmatter, body, source_id, context_note)
|
||||||
elif agent_config["format"] == "toml":
|
elif agent_config["format"] == "toml":
|
||||||
output = self.render_toml_command(frontmatter, body, source_id)
|
output = self.render_toml_command(frontmatter, body, source_id)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported format: {agent_config['format']}")
|
raise ValueError(f"Unsupported format: {agent_config['format']}")
|
||||||
|
|
||||||
dest_file = commands_dir / f"{output_name}{agent_config['extension']}"
|
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||||
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
dest_file.write_text(output, encoding="utf-8")
|
dest_file.write_text(output, encoding="utf-8")
|
||||||
|
|
||||||
if agent_name == "copilot":
|
if agent_name == "copilot":
|
||||||
@@ -477,15 +326,8 @@ class CommandRegistrar:
|
|||||||
registered.append(cmd_name)
|
registered.append(cmd_name)
|
||||||
|
|
||||||
for alias in cmd_info.get("aliases", []):
|
for alias in cmd_info.get("aliases", []):
|
||||||
alias_output_name = self._compute_output_name(agent_name, alias, agent_config)
|
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
|
||||||
alias_output = output
|
alias_file.write_text(output, encoding="utf-8")
|
||||||
if agent_config["extension"] == "/SKILL.md":
|
|
||||||
alias_output = self.render_skill_command(
|
|
||||||
agent_name, alias_output_name, frontmatter, body, source_id, cmd_file, project_root
|
|
||||||
)
|
|
||||||
alias_file = commands_dir / f"{alias_output_name}{agent_config['extension']}"
|
|
||||||
alias_file.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
alias_file.write_text(alias_output, encoding="utf-8")
|
|
||||||
if agent_name == "copilot":
|
if agent_name == "copilot":
|
||||||
self.write_copilot_prompt(project_root, alias)
|
self.write_copilot_prompt(project_root, alias)
|
||||||
registered.append(alias)
|
registered.append(alias)
|
||||||
@@ -528,7 +370,7 @@ class CommandRegistrar:
|
|||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
for agent_name, agent_config in self.AGENT_CONFIGS.items():
|
for agent_name, agent_config in self.AGENT_CONFIGS.items():
|
||||||
agent_dir = project_root / agent_config["dir"]
|
agent_dir = project_root / agent_config["dir"].split("/")[0]
|
||||||
|
|
||||||
if agent_dir.exists():
|
if agent_dir.exists():
|
||||||
try:
|
try:
|
||||||
@@ -562,8 +404,7 @@ class CommandRegistrar:
|
|||||||
commands_dir = project_root / agent_config["dir"]
|
commands_dir = project_root / agent_config["dir"]
|
||||||
|
|
||||||
for cmd_name in cmd_names:
|
for cmd_name in cmd_names:
|
||||||
output_name = self._compute_output_name(agent_name, cmd_name, agent_config)
|
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||||
cmd_file = commands_dir / f"{output_name}{agent_config['extension']}"
|
|
||||||
if cmd_file.exists():
|
if cmd_file.exists():
|
||||||
cmd_file.unlink()
|
cmd_file.unlink()
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
import zipfile
|
import zipfile
|
||||||
import shutil
|
import shutil
|
||||||
import copy
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional, Dict, List, Any, Callable, Set
|
from typing import Optional, Dict, List, Any, Callable, Set
|
||||||
@@ -41,26 +40,6 @@ class CompatibilityError(ExtensionError):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def normalize_priority(value: Any, default: int = 10) -> int:
|
|
||||||
"""Normalize a stored priority value for sorting and display.
|
|
||||||
|
|
||||||
Corrupted registry data may contain missing, non-numeric, or non-positive
|
|
||||||
values. In those cases, fall back to the default priority.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
value: Priority value to normalize (may be int, str, None, etc.)
|
|
||||||
default: Default priority to use for invalid values (default: 10)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Normalized priority as positive integer (>= 1)
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
priority = int(value)
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
return default
|
|
||||||
return priority if priority >= 1 else default
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class CatalogEntry:
|
class CatalogEntry:
|
||||||
"""Represents a single catalog entry in the catalog stack."""
|
"""Represents a single catalog entry in the catalog stack."""
|
||||||
@@ -222,17 +201,7 @@ class ExtensionRegistry:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
with open(self.registry_path, 'r') as f:
|
with open(self.registry_path, 'r') as f:
|
||||||
data = json.load(f)
|
return json.load(f)
|
||||||
# Validate loaded data is a dict (handles corrupted registry files)
|
|
||||||
if not isinstance(data, dict):
|
|
||||||
return {
|
|
||||||
"schema_version": self.SCHEMA_VERSION,
|
|
||||||
"extensions": {}
|
|
||||||
}
|
|
||||||
# Normalize extensions field (handles corrupted extensions value)
|
|
||||||
if not isinstance(data.get("extensions"), dict):
|
|
||||||
data["extensions"] = {}
|
|
||||||
return data
|
|
||||||
except (json.JSONDecodeError, FileNotFoundError):
|
except (json.JSONDecodeError, FileNotFoundError):
|
||||||
# Corrupted or missing registry, start fresh
|
# Corrupted or missing registry, start fresh
|
||||||
return {
|
return {
|
||||||
@@ -254,137 +223,39 @@ class ExtensionRegistry:
|
|||||||
metadata: Extension metadata (version, source, etc.)
|
metadata: Extension metadata (version, source, etc.)
|
||||||
"""
|
"""
|
||||||
self.data["extensions"][extension_id] = {
|
self.data["extensions"][extension_id] = {
|
||||||
**copy.deepcopy(metadata),
|
**metadata,
|
||||||
"installed_at": datetime.now(timezone.utc).isoformat()
|
"installed_at": datetime.now(timezone.utc).isoformat()
|
||||||
}
|
}
|
||||||
self._save()
|
self._save()
|
||||||
|
|
||||||
def update(self, extension_id: str, metadata: dict):
|
|
||||||
"""Update extension metadata in registry, merging with existing entry.
|
|
||||||
|
|
||||||
Merges the provided metadata with the existing entry, preserving any
|
|
||||||
fields not specified in the new metadata. The installed_at timestamp
|
|
||||||
is always preserved from the original entry.
|
|
||||||
|
|
||||||
Use this method instead of add() when updating existing extension
|
|
||||||
metadata (e.g., enabling/disabling) to preserve the original
|
|
||||||
installation timestamp and other existing fields.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
extension_id: Extension ID
|
|
||||||
metadata: Extension metadata fields to update (merged with existing)
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
KeyError: If extension is not installed
|
|
||||||
"""
|
|
||||||
extensions = self.data.get("extensions")
|
|
||||||
if not isinstance(extensions, dict) or extension_id not in extensions:
|
|
||||||
raise KeyError(f"Extension '{extension_id}' is not installed")
|
|
||||||
# Merge new metadata with existing, preserving original installed_at
|
|
||||||
existing = extensions[extension_id]
|
|
||||||
# Handle corrupted registry entries (e.g., string/list instead of dict)
|
|
||||||
if not isinstance(existing, dict):
|
|
||||||
existing = {}
|
|
||||||
# Merge: existing fields preserved, new fields override (deep copy to prevent caller mutation)
|
|
||||||
merged = {**existing, **copy.deepcopy(metadata)}
|
|
||||||
# Always preserve original installed_at based on key existence, not truthiness,
|
|
||||||
# to handle cases where the field exists but may be falsy (legacy/corruption)
|
|
||||||
if "installed_at" in existing:
|
|
||||||
merged["installed_at"] = existing["installed_at"]
|
|
||||||
else:
|
|
||||||
# If not present in existing, explicitly remove from merged if caller provided it
|
|
||||||
merged.pop("installed_at", None)
|
|
||||||
extensions[extension_id] = merged
|
|
||||||
self._save()
|
|
||||||
|
|
||||||
def restore(self, extension_id: str, metadata: dict):
|
|
||||||
"""Restore extension metadata to registry without modifying timestamps.
|
|
||||||
|
|
||||||
Use this method for rollback scenarios where you have a complete backup
|
|
||||||
of the registry entry (including installed_at) and want to restore it
|
|
||||||
exactly as it was.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
extension_id: Extension ID
|
|
||||||
metadata: Complete extension metadata including installed_at
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If metadata is None or not a dict
|
|
||||||
"""
|
|
||||||
if metadata is None or not isinstance(metadata, dict):
|
|
||||||
raise ValueError(f"Cannot restore '{extension_id}': metadata must be a dict")
|
|
||||||
# Ensure extensions dict exists (handle corrupted registry)
|
|
||||||
if not isinstance(self.data.get("extensions"), dict):
|
|
||||||
self.data["extensions"] = {}
|
|
||||||
self.data["extensions"][extension_id] = copy.deepcopy(metadata)
|
|
||||||
self._save()
|
|
||||||
|
|
||||||
def remove(self, extension_id: str):
|
def remove(self, extension_id: str):
|
||||||
"""Remove extension from registry.
|
"""Remove extension from registry.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
extension_id: Extension ID
|
extension_id: Extension ID
|
||||||
"""
|
"""
|
||||||
extensions = self.data.get("extensions")
|
if extension_id in self.data["extensions"]:
|
||||||
if not isinstance(extensions, dict):
|
del self.data["extensions"][extension_id]
|
||||||
return
|
|
||||||
if extension_id in extensions:
|
|
||||||
del extensions[extension_id]
|
|
||||||
self._save()
|
self._save()
|
||||||
|
|
||||||
def get(self, extension_id: str) -> Optional[dict]:
|
def get(self, extension_id: str) -> Optional[dict]:
|
||||||
"""Get extension metadata from registry.
|
"""Get extension metadata from registry.
|
||||||
|
|
||||||
Returns a deep copy to prevent callers from accidentally mutating
|
|
||||||
nested internal registry state without going through the write path.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
extension_id: Extension ID
|
extension_id: Extension ID
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deep copy of extension metadata, or None if not found or corrupted
|
Extension metadata or None if not found
|
||||||
"""
|
"""
|
||||||
extensions = self.data.get("extensions")
|
return self.data["extensions"].get(extension_id)
|
||||||
if not isinstance(extensions, dict):
|
|
||||||
return None
|
|
||||||
entry = extensions.get(extension_id)
|
|
||||||
# Return None for missing or corrupted (non-dict) entries
|
|
||||||
if entry is None or not isinstance(entry, dict):
|
|
||||||
return None
|
|
||||||
return copy.deepcopy(entry)
|
|
||||||
|
|
||||||
def list(self) -> Dict[str, dict]:
|
def list(self) -> Dict[str, dict]:
|
||||||
"""Get all installed extensions with valid metadata.
|
"""Get all installed extensions.
|
||||||
|
|
||||||
Returns a deep copy of extensions with dict metadata only.
|
|
||||||
Corrupted entries (non-dict values) are filtered out.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary of extension_id -> metadata (deep copies), empty dict if corrupted
|
Dictionary of extension_id -> metadata
|
||||||
"""
|
"""
|
||||||
extensions = self.data.get("extensions", {}) or {}
|
return self.data["extensions"]
|
||||||
if not isinstance(extensions, dict):
|
|
||||||
return {}
|
|
||||||
# Filter to only valid dict entries to match type contract
|
|
||||||
return {
|
|
||||||
ext_id: copy.deepcopy(meta)
|
|
||||||
for ext_id, meta in extensions.items()
|
|
||||||
if isinstance(meta, dict)
|
|
||||||
}
|
|
||||||
|
|
||||||
def keys(self) -> set:
|
|
||||||
"""Get all extension IDs including corrupted entries.
|
|
||||||
|
|
||||||
Lightweight method that returns IDs without deep-copying metadata.
|
|
||||||
Use this when you only need to check which extensions are tracked.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Set of extension IDs (includes corrupted entries)
|
|
||||||
"""
|
|
||||||
extensions = self.data.get("extensions", {}) or {}
|
|
||||||
if not isinstance(extensions, dict):
|
|
||||||
return set()
|
|
||||||
return set(extensions.keys())
|
|
||||||
|
|
||||||
def is_installed(self, extension_id: str) -> bool:
|
def is_installed(self, extension_id: str) -> bool:
|
||||||
"""Check if extension is installed.
|
"""Check if extension is installed.
|
||||||
@@ -393,44 +264,9 @@ class ExtensionRegistry:
|
|||||||
extension_id: Extension ID
|
extension_id: Extension ID
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if extension is installed, False if not or registry corrupted
|
True if extension is installed
|
||||||
"""
|
"""
|
||||||
extensions = self.data.get("extensions")
|
return extension_id in self.data["extensions"]
|
||||||
if not isinstance(extensions, dict):
|
|
||||||
return False
|
|
||||||
return extension_id in extensions
|
|
||||||
|
|
||||||
def list_by_priority(self, include_disabled: bool = False) -> List[tuple]:
|
|
||||||
"""Get all installed extensions sorted by priority.
|
|
||||||
|
|
||||||
Lower priority number = higher precedence (checked first).
|
|
||||||
Extensions with equal priority are sorted alphabetically by ID
|
|
||||||
for deterministic ordering.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
include_disabled: If True, include disabled extensions. Default False.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of (extension_id, metadata_copy) tuples sorted by priority.
|
|
||||||
Metadata is deep-copied to prevent accidental mutation.
|
|
||||||
"""
|
|
||||||
extensions = self.data.get("extensions", {}) or {}
|
|
||||||
if not isinstance(extensions, dict):
|
|
||||||
extensions = {}
|
|
||||||
sortable_extensions = []
|
|
||||||
for ext_id, meta in extensions.items():
|
|
||||||
if not isinstance(meta, dict):
|
|
||||||
continue
|
|
||||||
# Skip disabled extensions unless explicitly requested
|
|
||||||
if not include_disabled and not meta.get("enabled", True):
|
|
||||||
continue
|
|
||||||
metadata_copy = copy.deepcopy(meta)
|
|
||||||
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
|
|
||||||
sortable_extensions.append((ext_id, metadata_copy))
|
|
||||||
return sorted(
|
|
||||||
sortable_extensions,
|
|
||||||
key=lambda item: (item[1]["priority"], item[0]),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ExtensionManager:
|
class ExtensionManager:
|
||||||
@@ -548,8 +384,7 @@ class ExtensionManager:
|
|||||||
self,
|
self,
|
||||||
source_dir: Path,
|
source_dir: Path,
|
||||||
speckit_version: str,
|
speckit_version: str,
|
||||||
register_commands: bool = True,
|
register_commands: bool = True
|
||||||
priority: int = 10,
|
|
||||||
) -> ExtensionManifest:
|
) -> ExtensionManifest:
|
||||||
"""Install extension from a local directory.
|
"""Install extension from a local directory.
|
||||||
|
|
||||||
@@ -557,19 +392,14 @@ class ExtensionManager:
|
|||||||
source_dir: Path to extension directory
|
source_dir: Path to extension directory
|
||||||
speckit_version: Current spec-kit version
|
speckit_version: Current spec-kit version
|
||||||
register_commands: If True, register commands with AI agents
|
register_commands: If True, register commands with AI agents
|
||||||
priority: Resolution priority (lower = higher precedence, default 10)
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Installed extension manifest
|
Installed extension manifest
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValidationError: If manifest is invalid or priority is invalid
|
ValidationError: If manifest is invalid
|
||||||
CompatibilityError: If extension is incompatible
|
CompatibilityError: If extension is incompatible
|
||||||
"""
|
"""
|
||||||
# Validate priority
|
|
||||||
if priority < 1:
|
|
||||||
raise ValidationError("Priority must be a positive integer (1 or higher)")
|
|
||||||
|
|
||||||
# Load and validate manifest
|
# Load and validate manifest
|
||||||
manifest_path = source_dir / "extension.yml"
|
manifest_path = source_dir / "extension.yml"
|
||||||
manifest = ExtensionManifest(manifest_path)
|
manifest = ExtensionManifest(manifest_path)
|
||||||
@@ -611,7 +441,6 @@ class ExtensionManager:
|
|||||||
"source": "local",
|
"source": "local",
|
||||||
"manifest_hash": manifest.get_hash(),
|
"manifest_hash": manifest.get_hash(),
|
||||||
"enabled": True,
|
"enabled": True,
|
||||||
"priority": priority,
|
|
||||||
"registered_commands": registered_commands
|
"registered_commands": registered_commands
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -620,27 +449,21 @@ class ExtensionManager:
|
|||||||
def install_from_zip(
|
def install_from_zip(
|
||||||
self,
|
self,
|
||||||
zip_path: Path,
|
zip_path: Path,
|
||||||
speckit_version: str,
|
speckit_version: str
|
||||||
priority: int = 10,
|
|
||||||
) -> ExtensionManifest:
|
) -> ExtensionManifest:
|
||||||
"""Install extension from ZIP file.
|
"""Install extension from ZIP file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
zip_path: Path to extension ZIP file
|
zip_path: Path to extension ZIP file
|
||||||
speckit_version: Current spec-kit version
|
speckit_version: Current spec-kit version
|
||||||
priority: Resolution priority (lower = higher precedence, default 10)
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Installed extension manifest
|
Installed extension manifest
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValidationError: If manifest is invalid or priority is invalid
|
ValidationError: If manifest is invalid
|
||||||
CompatibilityError: If extension is incompatible
|
CompatibilityError: If extension is incompatible
|
||||||
"""
|
"""
|
||||||
# Validate priority early
|
|
||||||
if priority < 1:
|
|
||||||
raise ValidationError("Priority must be a positive integer (1 or higher)")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
temp_path = Path(tmpdir)
|
temp_path = Path(tmpdir)
|
||||||
|
|
||||||
@@ -675,7 +498,7 @@ class ExtensionManager:
|
|||||||
raise ValidationError("No extension.yml found in ZIP file")
|
raise ValidationError("No extension.yml found in ZIP file")
|
||||||
|
|
||||||
# Install from extracted directory
|
# Install from extracted directory
|
||||||
return self.install_from_directory(extension_dir, speckit_version, priority=priority)
|
return self.install_from_directory(extension_dir, speckit_version)
|
||||||
|
|
||||||
def remove(self, extension_id: str, keep_config: bool = False) -> bool:
|
def remove(self, extension_id: str, keep_config: bool = False) -> bool:
|
||||||
"""Remove an installed extension.
|
"""Remove an installed extension.
|
||||||
@@ -692,7 +515,7 @@ class ExtensionManager:
|
|||||||
|
|
||||||
# Get registered commands before removal
|
# Get registered commands before removal
|
||||||
metadata = self.registry.get(extension_id)
|
metadata = self.registry.get(extension_id)
|
||||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
registered_commands = metadata.get("registered_commands", {})
|
||||||
|
|
||||||
extension_dir = self.extensions_dir / extension_id
|
extension_dir = self.extensions_dir / extension_id
|
||||||
|
|
||||||
@@ -753,9 +576,6 @@ class ExtensionManager:
|
|||||||
result = []
|
result = []
|
||||||
|
|
||||||
for ext_id, metadata in self.registry.list().items():
|
for ext_id, metadata in self.registry.list().items():
|
||||||
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
|
|
||||||
if not isinstance(metadata, dict):
|
|
||||||
metadata = {}
|
|
||||||
ext_dir = self.extensions_dir / ext_id
|
ext_dir = self.extensions_dir / ext_id
|
||||||
manifest_path = ext_dir / "extension.yml"
|
manifest_path = ext_dir / "extension.yml"
|
||||||
|
|
||||||
@@ -764,10 +584,9 @@ class ExtensionManager:
|
|||||||
result.append({
|
result.append({
|
||||||
"id": ext_id,
|
"id": ext_id,
|
||||||
"name": manifest.name,
|
"name": manifest.name,
|
||||||
"version": metadata.get("version", "unknown"),
|
"version": metadata["version"],
|
||||||
"description": manifest.description,
|
"description": manifest.description,
|
||||||
"enabled": metadata.get("enabled", True),
|
"enabled": metadata.get("enabled", True),
|
||||||
"priority": normalize_priority(metadata.get("priority")),
|
|
||||||
"installed_at": metadata.get("installed_at"),
|
"installed_at": metadata.get("installed_at"),
|
||||||
"command_count": len(manifest.commands),
|
"command_count": len(manifest.commands),
|
||||||
"hook_count": len(manifest.hooks)
|
"hook_count": len(manifest.hooks)
|
||||||
@@ -780,7 +599,6 @@ class ExtensionManager:
|
|||||||
"version": metadata.get("version", "unknown"),
|
"version": metadata.get("version", "unknown"),
|
||||||
"description": "⚠️ Corrupted extension",
|
"description": "⚠️ Corrupted extension",
|
||||||
"enabled": False,
|
"enabled": False,
|
||||||
"priority": normalize_priority(metadata.get("priority")),
|
|
||||||
"installed_at": metadata.get("installed_at"),
|
"installed_at": metadata.get("installed_at"),
|
||||||
"command_count": 0,
|
"command_count": 0,
|
||||||
"hook_count": 0
|
"hook_count": 0
|
||||||
@@ -865,10 +683,7 @@ class CommandRegistrar:
|
|||||||
return self._registrar.render_frontmatter(frontmatter) + "\n" + context_note + body
|
return self._registrar.render_frontmatter(frontmatter) + "\n" + context_note + body
|
||||||
|
|
||||||
def _render_toml_command(self, frontmatter, body, ext_id):
|
def _render_toml_command(self, frontmatter, body, ext_id):
|
||||||
# Preserve extension-specific context comments for backward compatibility
|
return self._registrar.render_toml_command(frontmatter, body, ext_id)
|
||||||
base = self._registrar.render_toml_command(frontmatter, body, ext_id)
|
|
||||||
context_lines = f"# Extension: {ext_id}\n# Config: .specify/extensions/{ext_id}/\n"
|
|
||||||
return base.rstrip("\n") + "\n" + context_lines
|
|
||||||
|
|
||||||
def register_commands_for_agent(
|
def register_commands_for_agent(
|
||||||
self,
|
self,
|
||||||
@@ -964,13 +779,12 @@ class ExtensionCatalog:
|
|||||||
config_path: Path to extension-catalogs.yml
|
config_path: Path to extension-catalogs.yml
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Ordered list of CatalogEntry objects, or None if file doesn't exist.
|
Ordered list of CatalogEntry objects, or None if file doesn't exist
|
||||||
|
or contains no valid catalog entries.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValidationError: If any catalog entry has an invalid URL,
|
ValidationError: If any catalog entry has an invalid URL,
|
||||||
the file cannot be parsed, a priority value is invalid,
|
the file cannot be parsed, or a priority value is invalid.
|
||||||
or the file exists but contains no valid catalog entries
|
|
||||||
(fail-closed for security).
|
|
||||||
"""
|
"""
|
||||||
if not config_path.exists():
|
if not config_path.exists():
|
||||||
return None
|
return None
|
||||||
@@ -982,17 +796,12 @@ class ExtensionCatalog:
|
|||||||
)
|
)
|
||||||
catalogs_data = data.get("catalogs", [])
|
catalogs_data = data.get("catalogs", [])
|
||||||
if not catalogs_data:
|
if not catalogs_data:
|
||||||
# File exists but has no catalogs key or empty list - fail closed
|
return None
|
||||||
raise ValidationError(
|
|
||||||
f"Catalog config {config_path} exists but contains no 'catalogs' entries. "
|
|
||||||
f"Remove the file to use built-in defaults, or add valid catalog entries."
|
|
||||||
)
|
|
||||||
if not isinstance(catalogs_data, list):
|
if not isinstance(catalogs_data, list):
|
||||||
raise ValidationError(
|
raise ValidationError(
|
||||||
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
||||||
)
|
)
|
||||||
entries: List[CatalogEntry] = []
|
entries: List[CatalogEntry] = []
|
||||||
skipped_entries: List[int] = []
|
|
||||||
for idx, item in enumerate(catalogs_data):
|
for idx, item in enumerate(catalogs_data):
|
||||||
if not isinstance(item, dict):
|
if not isinstance(item, dict):
|
||||||
raise ValidationError(
|
raise ValidationError(
|
||||||
@@ -1000,7 +809,6 @@ class ExtensionCatalog:
|
|||||||
)
|
)
|
||||||
url = str(item.get("url", "")).strip()
|
url = str(item.get("url", "")).strip()
|
||||||
if not url:
|
if not url:
|
||||||
skipped_entries.append(idx)
|
|
||||||
continue
|
continue
|
||||||
self._validate_catalog_url(url)
|
self._validate_catalog_url(url)
|
||||||
try:
|
try:
|
||||||
@@ -1023,14 +831,7 @@ class ExtensionCatalog:
|
|||||||
description=str(item.get("description", "")),
|
description=str(item.get("description", "")),
|
||||||
))
|
))
|
||||||
entries.sort(key=lambda e: e.priority)
|
entries.sort(key=lambda e: e.priority)
|
||||||
if not entries:
|
return entries if entries else None
|
||||||
# All entries were invalid (missing URLs) - fail closed for security
|
|
||||||
raise ValidationError(
|
|
||||||
f"Catalog config {config_path} contains {len(catalogs_data)} entries but none have valid URLs "
|
|
||||||
f"(entries at indices {skipped_entries} were skipped). "
|
|
||||||
f"Each catalog entry must have a 'url' field."
|
|
||||||
)
|
|
||||||
return entries
|
|
||||||
|
|
||||||
def get_active_catalogs(self) -> List[CatalogEntry]:
|
def get_active_catalogs(self) -> List[CatalogEntry]:
|
||||||
"""Get the ordered list of active catalogs.
|
"""Get the ordered list of active catalogs.
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ Presets are self-contained, versioned collections of templates
|
|||||||
customize the Spec-Driven Development workflow.
|
customize the Spec-Driven Development workflow.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import copy
|
|
||||||
import json
|
import json
|
||||||
import hashlib
|
import hashlib
|
||||||
import os
|
import os
|
||||||
@@ -24,8 +23,6 @@ import yaml
|
|||||||
from packaging import version as pkg_version
|
from packaging import version as pkg_version
|
||||||
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
||||||
|
|
||||||
from .extensions import ExtensionRegistry, normalize_priority
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class PresetCatalogEntry:
|
class PresetCatalogEntry:
|
||||||
@@ -142,15 +139,6 @@ class PresetManifest:
|
|||||||
f"must be one of {sorted(VALID_PRESET_TEMPLATE_TYPES)}"
|
f"must be one of {sorted(VALID_PRESET_TEMPLATE_TYPES)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Validate file path safety: must be relative, no parent traversal
|
|
||||||
file_path = tmpl["file"]
|
|
||||||
normalized = os.path.normpath(file_path)
|
|
||||||
if os.path.isabs(normalized) or normalized.startswith(".."):
|
|
||||||
raise PresetValidationError(
|
|
||||||
f"Invalid template file path '{file_path}': "
|
|
||||||
"must be a relative path within the preset directory"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validate template name format
|
# Validate template name format
|
||||||
if tmpl["type"] == "command":
|
if tmpl["type"] == "command":
|
||||||
# Commands use dot notation (e.g. speckit.specify)
|
# Commands use dot notation (e.g. speckit.specify)
|
||||||
@@ -238,17 +226,7 @@ class PresetRegistry:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
with open(self.registry_path, 'r') as f:
|
with open(self.registry_path, 'r') as f:
|
||||||
data = json.load(f)
|
return json.load(f)
|
||||||
# Validate loaded data is a dict (handles corrupted registry files)
|
|
||||||
if not isinstance(data, dict):
|
|
||||||
return {
|
|
||||||
"schema_version": self.SCHEMA_VERSION,
|
|
||||||
"presets": {}
|
|
||||||
}
|
|
||||||
# Normalize presets field (handles corrupted presets value)
|
|
||||||
if not isinstance(data.get("presets"), dict):
|
|
||||||
data["presets"] = {}
|
|
||||||
return data
|
|
||||||
except (json.JSONDecodeError, FileNotFoundError):
|
except (json.JSONDecodeError, FileNotFoundError):
|
||||||
return {
|
return {
|
||||||
"schema_version": self.SCHEMA_VERSION,
|
"schema_version": self.SCHEMA_VERSION,
|
||||||
@@ -269,7 +247,7 @@ class PresetRegistry:
|
|||||||
metadata: Pack metadata (version, source, etc.)
|
metadata: Pack metadata (version, source, etc.)
|
||||||
"""
|
"""
|
||||||
self.data["presets"][pack_id] = {
|
self.data["presets"][pack_id] = {
|
||||||
**copy.deepcopy(metadata),
|
**metadata,
|
||||||
"installed_at": datetime.now(timezone.utc).isoformat()
|
"installed_at": datetime.now(timezone.utc).isoformat()
|
||||||
}
|
}
|
||||||
self._save()
|
self._save()
|
||||||
@@ -280,152 +258,41 @@ class PresetRegistry:
|
|||||||
Args:
|
Args:
|
||||||
pack_id: Preset ID
|
pack_id: Preset ID
|
||||||
"""
|
"""
|
||||||
packs = self.data.get("presets")
|
if pack_id in self.data["presets"]:
|
||||||
if not isinstance(packs, dict):
|
del self.data["presets"][pack_id]
|
||||||
return
|
|
||||||
if pack_id in packs:
|
|
||||||
del packs[pack_id]
|
|
||||||
self._save()
|
self._save()
|
||||||
|
|
||||||
def update(self, pack_id: str, updates: dict):
|
|
||||||
"""Update preset metadata in registry.
|
|
||||||
|
|
||||||
Merges the provided updates with the existing entry, preserving any
|
|
||||||
fields not specified. The installed_at timestamp is always preserved
|
|
||||||
from the original entry.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pack_id: Preset ID
|
|
||||||
updates: Partial metadata to merge into existing metadata
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
KeyError: If preset is not installed
|
|
||||||
"""
|
|
||||||
packs = self.data.get("presets")
|
|
||||||
if not isinstance(packs, dict) or pack_id not in packs:
|
|
||||||
raise KeyError(f"Preset '{pack_id}' not found in registry")
|
|
||||||
existing = packs[pack_id]
|
|
||||||
# Handle corrupted registry entries (e.g., string/list instead of dict)
|
|
||||||
if not isinstance(existing, dict):
|
|
||||||
existing = {}
|
|
||||||
# Merge: existing fields preserved, new fields override (deep copy to prevent caller mutation)
|
|
||||||
merged = {**existing, **copy.deepcopy(updates)}
|
|
||||||
# Always preserve original installed_at based on key existence, not truthiness,
|
|
||||||
# to handle cases where the field exists but may be falsy (legacy/corruption)
|
|
||||||
if "installed_at" in existing:
|
|
||||||
merged["installed_at"] = existing["installed_at"]
|
|
||||||
else:
|
|
||||||
# If not present in existing, explicitly remove from merged if caller provided it
|
|
||||||
merged.pop("installed_at", None)
|
|
||||||
packs[pack_id] = merged
|
|
||||||
self._save()
|
|
||||||
|
|
||||||
def restore(self, pack_id: str, metadata: dict):
|
|
||||||
"""Restore preset metadata to registry without modifying timestamps.
|
|
||||||
|
|
||||||
Use this method for rollback scenarios where you have a complete backup
|
|
||||||
of the registry entry (including installed_at) and want to restore it
|
|
||||||
exactly as it was.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pack_id: Preset ID
|
|
||||||
metadata: Complete preset metadata including installed_at
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If metadata is None or not a dict
|
|
||||||
"""
|
|
||||||
if metadata is None or not isinstance(metadata, dict):
|
|
||||||
raise ValueError(f"Cannot restore '{pack_id}': metadata must be a dict")
|
|
||||||
# Ensure presets dict exists (handle corrupted registry)
|
|
||||||
if not isinstance(self.data.get("presets"), dict):
|
|
||||||
self.data["presets"] = {}
|
|
||||||
self.data["presets"][pack_id] = copy.deepcopy(metadata)
|
|
||||||
self._save()
|
|
||||||
|
|
||||||
def get(self, pack_id: str) -> Optional[dict]:
|
def get(self, pack_id: str) -> Optional[dict]:
|
||||||
"""Get preset metadata from registry.
|
"""Get preset metadata from registry.
|
||||||
|
|
||||||
Returns a deep copy to prevent callers from accidentally mutating
|
|
||||||
nested internal registry state without going through the write path.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
pack_id: Preset ID
|
pack_id: Preset ID
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deep copy of preset metadata, or None if not found or corrupted
|
Pack metadata or None if not found
|
||||||
"""
|
"""
|
||||||
packs = self.data.get("presets")
|
return self.data["presets"].get(pack_id)
|
||||||
if not isinstance(packs, dict):
|
|
||||||
return None
|
|
||||||
entry = packs.get(pack_id)
|
|
||||||
# Return None for missing or corrupted (non-dict) entries
|
|
||||||
if entry is None or not isinstance(entry, dict):
|
|
||||||
return None
|
|
||||||
return copy.deepcopy(entry)
|
|
||||||
|
|
||||||
def list(self) -> Dict[str, dict]:
|
def list(self) -> Dict[str, dict]:
|
||||||
"""Get all installed presets with valid metadata.
|
"""Get all installed presets.
|
||||||
|
|
||||||
Returns a deep copy of presets with dict metadata only.
|
|
||||||
Corrupted entries (non-dict values) are filtered out.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary of pack_id -> metadata (deep copies), empty dict if corrupted
|
Dictionary of pack_id -> metadata
|
||||||
"""
|
"""
|
||||||
packs = self.data.get("presets", {}) or {}
|
return self.data["presets"]
|
||||||
if not isinstance(packs, dict):
|
|
||||||
return {}
|
|
||||||
# Filter to only valid dict entries to match type contract
|
|
||||||
return {
|
|
||||||
pack_id: copy.deepcopy(meta)
|
|
||||||
for pack_id, meta in packs.items()
|
|
||||||
if isinstance(meta, dict)
|
|
||||||
}
|
|
||||||
|
|
||||||
def keys(self) -> set:
|
def list_by_priority(self) -> List[tuple]:
|
||||||
"""Get all preset IDs including corrupted entries.
|
|
||||||
|
|
||||||
Lightweight method that returns IDs without deep-copying metadata.
|
|
||||||
Use this when you only need to check which presets are tracked.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Set of preset IDs (includes corrupted entries)
|
|
||||||
"""
|
|
||||||
packs = self.data.get("presets", {}) or {}
|
|
||||||
if not isinstance(packs, dict):
|
|
||||||
return set()
|
|
||||||
return set(packs.keys())
|
|
||||||
|
|
||||||
def list_by_priority(self, include_disabled: bool = False) -> List[tuple]:
|
|
||||||
"""Get all installed presets sorted by priority.
|
"""Get all installed presets sorted by priority.
|
||||||
|
|
||||||
Lower priority number = higher precedence (checked first).
|
Lower priority number = higher precedence (checked first).
|
||||||
Presets with equal priority are sorted alphabetically by ID
|
|
||||||
for deterministic ordering.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
include_disabled: If True, include disabled presets. Default False.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of (pack_id, metadata_copy) tuples sorted by priority.
|
List of (pack_id, metadata) tuples sorted by priority
|
||||||
Metadata is deep-copied to prevent accidental mutation.
|
|
||||||
"""
|
"""
|
||||||
packs = self.data.get("presets", {}) or {}
|
packs = self.data["presets"]
|
||||||
if not isinstance(packs, dict):
|
|
||||||
packs = {}
|
|
||||||
sortable_packs = []
|
|
||||||
for pack_id, meta in packs.items():
|
|
||||||
if not isinstance(meta, dict):
|
|
||||||
continue
|
|
||||||
# Skip disabled presets unless explicitly requested
|
|
||||||
if not include_disabled and not meta.get("enabled", True):
|
|
||||||
continue
|
|
||||||
metadata_copy = copy.deepcopy(meta)
|
|
||||||
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
|
|
||||||
sortable_packs.append((pack_id, metadata_copy))
|
|
||||||
return sorted(
|
return sorted(
|
||||||
sortable_packs,
|
packs.items(),
|
||||||
key=lambda item: (item[1]["priority"], item[0]),
|
key=lambda item: item[1].get("priority", 10),
|
||||||
)
|
)
|
||||||
|
|
||||||
def is_installed(self, pack_id: str) -> bool:
|
def is_installed(self, pack_id: str) -> bool:
|
||||||
@@ -435,12 +302,9 @@ class PresetRegistry:
|
|||||||
pack_id: Preset ID
|
pack_id: Preset ID
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if pack is installed, False if not or registry corrupted
|
True if pack is installed
|
||||||
"""
|
"""
|
||||||
packs = self.data.get("presets")
|
return pack_id in self.data["presets"]
|
||||||
if not isinstance(packs, dict):
|
|
||||||
return False
|
|
||||||
return pack_id in packs
|
|
||||||
|
|
||||||
|
|
||||||
class PresetManager:
|
class PresetManager:
|
||||||
@@ -500,7 +364,7 @@ class PresetManager:
|
|||||||
|
|
||||||
Scans the preset's templates for type "command", reads each command
|
Scans the preset's templates for type "command", reads each command
|
||||||
file, and writes it to every detected agent directory using the
|
file, and writes it to every detected agent directory using the
|
||||||
CommandRegistrar from the agents module.
|
CommandRegistrar from the extensions module.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
manifest: Preset manifest
|
manifest: Preset manifest
|
||||||
@@ -560,7 +424,7 @@ class PresetManager:
|
|||||||
|
|
||||||
Reads ``.specify/init-options.json`` to determine whether skills
|
Reads ``.specify/init-options.json`` to determine whether skills
|
||||||
are enabled and which agent was selected, then delegates to
|
are enabled and which agent was selected, then delegates to
|
||||||
the module-level ``_get_skills_dir()`` helper for the concrete path.
|
``_get_skills_dir()`` for the concrete path.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The skills directory ``Path``, or ``None`` if skills were not
|
The skills directory ``Path``, or ``None`` if skills were not
|
||||||
@@ -609,33 +473,15 @@ class PresetManager:
|
|||||||
if not command_templates:
|
if not command_templates:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Filter out extension command overrides if the extension isn't installed,
|
|
||||||
# matching the same logic used by _register_commands().
|
|
||||||
extensions_dir = self.project_root / ".specify" / "extensions"
|
|
||||||
filtered = []
|
|
||||||
for cmd in command_templates:
|
|
||||||
parts = cmd["name"].split(".")
|
|
||||||
if len(parts) >= 3 and parts[0] == "speckit":
|
|
||||||
ext_id = parts[1]
|
|
||||||
if not (extensions_dir / ext_id).is_dir():
|
|
||||||
continue
|
|
||||||
filtered.append(cmd)
|
|
||||||
|
|
||||||
if not filtered:
|
|
||||||
return []
|
|
||||||
|
|
||||||
skills_dir = self._get_skills_dir()
|
skills_dir = self._get_skills_dir()
|
||||||
if not skills_dir:
|
if not skills_dir:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
from . import SKILL_DESCRIPTIONS, load_init_options
|
from . import SKILL_DESCRIPTIONS
|
||||||
|
|
||||||
opts = load_init_options(self.project_root)
|
|
||||||
selected_ai = opts.get("ai", "")
|
|
||||||
|
|
||||||
written: List[str] = []
|
written: List[str] = []
|
||||||
|
|
||||||
for cmd_tmpl in filtered:
|
for cmd_tmpl in command_templates:
|
||||||
cmd_name = cmd_tmpl["name"]
|
cmd_name = cmd_tmpl["name"]
|
||||||
cmd_file_rel = cmd_tmpl["file"]
|
cmd_file_rel = cmd_tmpl["file"]
|
||||||
source_file = preset_dir / cmd_file_rel
|
source_file = preset_dir / cmd_file_rel
|
||||||
@@ -646,10 +492,7 @@ class PresetManager:
|
|||||||
short_name = cmd_name
|
short_name = cmd_name
|
||||||
if short_name.startswith("speckit."):
|
if short_name.startswith("speckit."):
|
||||||
short_name = short_name[len("speckit."):]
|
short_name = short_name[len("speckit."):]
|
||||||
if selected_ai == "kimi":
|
skill_name = f"speckit-{short_name}"
|
||||||
skill_name = f"speckit.{short_name}"
|
|
||||||
else:
|
|
||||||
skill_name = f"speckit-{short_name}"
|
|
||||||
|
|
||||||
# Only overwrite if the skill already exists (i.e. --ai-skills was used)
|
# Only overwrite if the skill already exists (i.e. --ai-skills was used)
|
||||||
skill_subdir = skills_dir / skill_name
|
skill_subdir = skills_dir / skill_name
|
||||||
@@ -730,8 +573,6 @@ class PresetManager:
|
|||||||
short_name = skill_name
|
short_name = skill_name
|
||||||
if short_name.startswith("speckit-"):
|
if short_name.startswith("speckit-"):
|
||||||
short_name = short_name[len("speckit-"):]
|
short_name = short_name[len("speckit-"):]
|
||||||
elif short_name.startswith("speckit."):
|
|
||||||
short_name = short_name[len("speckit."):]
|
|
||||||
|
|
||||||
skill_subdir = skills_dir / skill_name
|
skill_subdir = skills_dir / skill_name
|
||||||
skill_file = skill_subdir / "SKILL.md"
|
skill_file = skill_subdir / "SKILL.md"
|
||||||
@@ -805,13 +646,9 @@ class PresetManager:
|
|||||||
Installed preset manifest
|
Installed preset manifest
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
PresetValidationError: If manifest is invalid or priority is invalid
|
PresetValidationError: If manifest is invalid
|
||||||
PresetCompatibilityError: If pack is incompatible
|
PresetCompatibilityError: If pack is incompatible
|
||||||
"""
|
"""
|
||||||
# Validate priority
|
|
||||||
if priority < 1:
|
|
||||||
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
|
|
||||||
|
|
||||||
manifest_path = source_dir / "preset.yml"
|
manifest_path = source_dir / "preset.yml"
|
||||||
manifest = PresetManifest(manifest_path)
|
manifest = PresetManifest(manifest_path)
|
||||||
|
|
||||||
@@ -858,19 +695,14 @@ class PresetManager:
|
|||||||
Args:
|
Args:
|
||||||
zip_path: Path to preset ZIP file
|
zip_path: Path to preset ZIP file
|
||||||
speckit_version: Current spec-kit version
|
speckit_version: Current spec-kit version
|
||||||
priority: Resolution priority (lower = higher precedence, default 10)
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Installed preset manifest
|
Installed preset manifest
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
PresetValidationError: If manifest is invalid or priority is invalid
|
PresetValidationError: If manifest is invalid
|
||||||
PresetCompatibilityError: If pack is incompatible
|
PresetCompatibilityError: If pack is incompatible
|
||||||
"""
|
"""
|
||||||
# Validate priority early
|
|
||||||
if priority < 1:
|
|
||||||
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
temp_path = Path(tmpdir)
|
temp_path = Path(tmpdir)
|
||||||
|
|
||||||
@@ -942,9 +774,6 @@ class PresetManager:
|
|||||||
result = []
|
result = []
|
||||||
|
|
||||||
for pack_id, metadata in self.registry.list().items():
|
for pack_id, metadata in self.registry.list().items():
|
||||||
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
|
|
||||||
if not isinstance(metadata, dict):
|
|
||||||
metadata = {}
|
|
||||||
pack_dir = self.presets_dir / pack_id
|
pack_dir = self.presets_dir / pack_id
|
||||||
manifest_path = pack_dir / "preset.yml"
|
manifest_path = pack_dir / "preset.yml"
|
||||||
|
|
||||||
@@ -953,13 +782,13 @@ class PresetManager:
|
|||||||
result.append({
|
result.append({
|
||||||
"id": pack_id,
|
"id": pack_id,
|
||||||
"name": manifest.name,
|
"name": manifest.name,
|
||||||
"version": metadata.get("version", manifest.version),
|
"version": metadata["version"],
|
||||||
"description": manifest.description,
|
"description": manifest.description,
|
||||||
"enabled": metadata.get("enabled", True),
|
"enabled": metadata.get("enabled", True),
|
||||||
"installed_at": metadata.get("installed_at"),
|
"installed_at": metadata.get("installed_at"),
|
||||||
"template_count": len(manifest.templates),
|
"template_count": len(manifest.templates),
|
||||||
"tags": manifest.tags,
|
"tags": manifest.tags,
|
||||||
"priority": normalize_priority(metadata.get("priority")),
|
"priority": metadata.get("priority", 10),
|
||||||
})
|
})
|
||||||
except PresetValidationError:
|
except PresetValidationError:
|
||||||
result.append({
|
result.append({
|
||||||
@@ -971,7 +800,7 @@ class PresetManager:
|
|||||||
"installed_at": metadata.get("installed_at"),
|
"installed_at": metadata.get("installed_at"),
|
||||||
"template_count": 0,
|
"template_count": 0,
|
||||||
"tags": [],
|
"tags": [],
|
||||||
"priority": normalize_priority(metadata.get("priority")),
|
"priority": metadata.get("priority", 10),
|
||||||
})
|
})
|
||||||
|
|
||||||
return result
|
return result
|
||||||
@@ -1067,10 +896,6 @@ class PresetCatalog:
|
|||||||
raise PresetValidationError(
|
raise PresetValidationError(
|
||||||
f"Failed to read catalog config {config_path}: {e}"
|
f"Failed to read catalog config {config_path}: {e}"
|
||||||
)
|
)
|
||||||
if not isinstance(data, dict):
|
|
||||||
raise PresetValidationError(
|
|
||||||
f"Invalid catalog config {config_path}: expected a mapping at root, got {type(data).__name__}"
|
|
||||||
)
|
|
||||||
catalogs_data = data.get("catalogs", [])
|
catalogs_data = data.get("catalogs", [])
|
||||||
if not catalogs_data:
|
if not catalogs_data:
|
||||||
return None
|
return None
|
||||||
@@ -1501,11 +1326,11 @@ class PresetCatalog:
|
|||||||
raise PresetError(f"Failed to save preset ZIP: {e}")
|
raise PresetError(f"Failed to save preset ZIP: {e}")
|
||||||
|
|
||||||
def clear_cache(self):
|
def clear_cache(self):
|
||||||
"""Clear all catalog cache files, including per-URL hashed caches."""
|
"""Clear the catalog cache."""
|
||||||
if self.cache_dir.exists():
|
if self.cache_file.exists():
|
||||||
for f in self.cache_dir.iterdir():
|
self.cache_file.unlink()
|
||||||
if f.is_file() and f.name.startswith("catalog"):
|
if self.cache_metadata_file.exists():
|
||||||
f.unlink(missing_ok=True)
|
self.cache_metadata_file.unlink()
|
||||||
|
|
||||||
|
|
||||||
class PresetResolver:
|
class PresetResolver:
|
||||||
@@ -1530,48 +1355,6 @@ class PresetResolver:
|
|||||||
self.overrides_dir = self.templates_dir / "overrides"
|
self.overrides_dir = self.templates_dir / "overrides"
|
||||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||||
|
|
||||||
def _get_all_extensions_by_priority(self) -> list[tuple[int, str, dict | None]]:
|
|
||||||
"""Build unified list of registered and unregistered extensions sorted by priority.
|
|
||||||
|
|
||||||
Registered extensions use their stored priority; unregistered directories
|
|
||||||
get implicit priority=10. Results are sorted by (priority, ext_id) for
|
|
||||||
deterministic ordering.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of (priority, ext_id, metadata_or_none) tuples sorted by priority.
|
|
||||||
"""
|
|
||||||
if not self.extensions_dir.exists():
|
|
||||||
return []
|
|
||||||
|
|
||||||
registry = ExtensionRegistry(self.extensions_dir)
|
|
||||||
# Use keys() to track ALL extensions (including corrupted entries) without deep copy
|
|
||||||
# This prevents corrupted entries from being picked up as "unregistered" dirs
|
|
||||||
registered_extension_ids = registry.keys()
|
|
||||||
|
|
||||||
# Get all registered extensions including disabled; we filter disabled manually below
|
|
||||||
all_registered = registry.list_by_priority(include_disabled=True)
|
|
||||||
|
|
||||||
all_extensions: list[tuple[int, str, dict | None]] = []
|
|
||||||
|
|
||||||
# Only include enabled extensions in the result
|
|
||||||
for ext_id, metadata in all_registered:
|
|
||||||
# Skip disabled extensions
|
|
||||||
if not metadata.get("enabled", True):
|
|
||||||
continue
|
|
||||||
priority = normalize_priority(metadata.get("priority") if metadata else None)
|
|
||||||
all_extensions.append((priority, ext_id, metadata))
|
|
||||||
|
|
||||||
# Add unregistered directories with implicit priority=10
|
|
||||||
for ext_dir in self.extensions_dir.iterdir():
|
|
||||||
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
|
|
||||||
continue
|
|
||||||
if ext_dir.name not in registered_extension_ids:
|
|
||||||
all_extensions.append((10, ext_dir.name, None))
|
|
||||||
|
|
||||||
# Sort by (priority, ext_id) for deterministic ordering
|
|
||||||
all_extensions.sort(key=lambda x: (x[0], x[1]))
|
|
||||||
return all_extensions
|
|
||||||
|
|
||||||
def resolve(
|
def resolve(
|
||||||
self,
|
self,
|
||||||
template_name: str,
|
template_name: str,
|
||||||
@@ -1624,18 +1407,18 @@ class PresetResolver:
|
|||||||
if candidate.exists():
|
if candidate.exists():
|
||||||
return candidate
|
return candidate
|
||||||
|
|
||||||
# Priority 3: Extension-provided templates (sorted by priority — lower number wins)
|
# Priority 3: Extension-provided templates
|
||||||
for _priority, ext_id, _metadata in self._get_all_extensions_by_priority():
|
if self.extensions_dir.exists():
|
||||||
ext_dir = self.extensions_dir / ext_id
|
for ext_dir in sorted(self.extensions_dir.iterdir()):
|
||||||
if not ext_dir.is_dir():
|
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
|
||||||
continue
|
continue
|
||||||
for subdir in subdirs:
|
for subdir in subdirs:
|
||||||
if subdir:
|
if subdir:
|
||||||
candidate = ext_dir / subdir / f"{template_name}{ext}"
|
candidate = ext_dir / subdir / f"{template_name}{ext}"
|
||||||
else:
|
else:
|
||||||
candidate = ext_dir / f"{template_name}{ext}"
|
candidate = ext_dir / "templates" / f"{template_name}{ext}"
|
||||||
if candidate.exists():
|
if candidate.exists():
|
||||||
return candidate
|
return candidate
|
||||||
|
|
||||||
# Priority 4: Core templates
|
# Priority 4: Core templates
|
||||||
if template_type == "template":
|
if template_type == "template":
|
||||||
@@ -1693,24 +1476,17 @@ class PresetResolver:
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for _priority, ext_id, ext_meta in self._get_all_extensions_by_priority():
|
if self.extensions_dir.exists():
|
||||||
ext_dir = self.extensions_dir / ext_id
|
for ext_dir in sorted(self.extensions_dir.iterdir()):
|
||||||
if not ext_dir.is_dir():
|
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
resolved.relative_to(ext_dir)
|
resolved.relative_to(ext_dir)
|
||||||
if ext_meta:
|
|
||||||
version = ext_meta.get("version", "?")
|
|
||||||
return {
|
return {
|
||||||
"path": resolved_str,
|
"path": resolved_str,
|
||||||
"source": f"extension:{ext_id} v{version}",
|
"source": f"extension:{ext_dir.name}",
|
||||||
}
|
}
|
||||||
else:
|
except ValueError:
|
||||||
return {
|
continue
|
||||||
"path": resolved_str,
|
|
||||||
"source": f"extension:{ext_id} (unregistered)",
|
|
||||||
}
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
return {"path": resolved_str, "source": "core"}
|
return {"path": resolved_str, "source": "core"}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- Check if `.specify/extensions.yml` exists in the project root.
|
- Check if `.specify/extensions.yml` exists in the project root.
|
||||||
- If it exists, read it and look for entries under the `hooks.before_implement` key
|
- If it exists, read it and look for entries under the `hooks.before_implement` key
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
- Filter to only hooks where `enabled: true`
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||||
@@ -174,7 +174,7 @@ Note: This command assumes a complete task breakdown exists in tasks.md. If task
|
|||||||
10. **Check for extension hooks**: After completion validation, check if `.specify/extensions.yml` exists in the project root.
|
10. **Check for extension hooks**: After completion validation, check if `.specify/extensions.yml` exists in the project root.
|
||||||
- If it exists, read it and look for entries under the `hooks.after_implement` key
|
- If it exists, read it and look for entries under the `hooks.after_implement` key
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
- Filter to only hooks where `enabled: true`
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||||
|
|||||||
@@ -24,40 +24,6 @@ $ARGUMENTS
|
|||||||
|
|
||||||
You **MUST** consider the user input before proceeding (if not empty).
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
## Pre-Execution Checks
|
|
||||||
|
|
||||||
**Check for extension hooks (before planning)**:
|
|
||||||
- Check if `.specify/extensions.yml` exists in the project root.
|
|
||||||
- If it exists, read it and look for entries under the `hooks.before_plan` key
|
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
|
||||||
- For each executable hook, output the following based on its `optional` flag:
|
|
||||||
- **Optional hook** (`optional: true`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Optional Pre-Hook**: {extension}
|
|
||||||
Command: `/{command}`
|
|
||||||
Description: {description}
|
|
||||||
|
|
||||||
Prompt: {prompt}
|
|
||||||
To execute: `/{command}`
|
|
||||||
```
|
|
||||||
- **Mandatory hook** (`optional: false`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Automatic Pre-Hook**: {extension}
|
|
||||||
Executing: `/{command}`
|
|
||||||
EXECUTE_COMMAND: {command}
|
|
||||||
|
|
||||||
Wait for the result of the hook command before proceeding to the Outline.
|
|
||||||
```
|
|
||||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
|
||||||
|
|
||||||
## Outline
|
## Outline
|
||||||
|
|
||||||
1. **Setup**: Run `{SCRIPT}` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
1. **Setup**: Run `{SCRIPT}` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
@@ -75,35 +41,6 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
|
|
||||||
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
|
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
|
||||||
|
|
||||||
5. **Check for extension hooks**: After reporting, check if `.specify/extensions.yml` exists in the project root.
|
|
||||||
- If it exists, read it and look for entries under the `hooks.after_plan` key
|
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
|
||||||
- For each executable hook, output the following based on its `optional` flag:
|
|
||||||
- **Optional hook** (`optional: true`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Optional Hook**: {extension}
|
|
||||||
Command: `/{command}`
|
|
||||||
Description: {description}
|
|
||||||
|
|
||||||
Prompt: {prompt}
|
|
||||||
To execute: `/{command}`
|
|
||||||
```
|
|
||||||
- **Mandatory hook** (`optional: false`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Automatic Hook**: {extension}
|
|
||||||
Executing: `/{command}`
|
|
||||||
EXECUTE_COMMAND: {command}
|
|
||||||
```
|
|
||||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
|
||||||
|
|
||||||
## Phases
|
## Phases
|
||||||
|
|
||||||
### Phase 0: Outline & Research
|
### Phase 0: Outline & Research
|
||||||
|
|||||||
@@ -21,40 +21,6 @@ $ARGUMENTS
|
|||||||
|
|
||||||
You **MUST** consider the user input before proceeding (if not empty).
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
## Pre-Execution Checks
|
|
||||||
|
|
||||||
**Check for extension hooks (before specification)**:
|
|
||||||
- Check if `.specify/extensions.yml` exists in the project root.
|
|
||||||
- If it exists, read it and look for entries under the `hooks.before_specify` key
|
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
|
||||||
- For each executable hook, output the following based on its `optional` flag:
|
|
||||||
- **Optional hook** (`optional: true`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Optional Pre-Hook**: {extension}
|
|
||||||
Command: `/{command}`
|
|
||||||
Description: {description}
|
|
||||||
|
|
||||||
Prompt: {prompt}
|
|
||||||
To execute: `/{command}`
|
|
||||||
```
|
|
||||||
- **Mandatory hook** (`optional: false`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Automatic Pre-Hook**: {extension}
|
|
||||||
Executing: `/{command}`
|
|
||||||
EXECUTE_COMMAND: {command}
|
|
||||||
|
|
||||||
Wait for the result of the hook command before proceeding to the Outline.
|
|
||||||
```
|
|
||||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
|
||||||
|
|
||||||
## Outline
|
## Outline
|
||||||
|
|
||||||
The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `{ARGS}` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
|
The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `{ARGS}` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
|
||||||
@@ -73,16 +39,10 @@ Given that feature description, do this:
|
|||||||
- "Create a dashboard for analytics" → "analytics-dashboard"
|
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||||
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||||
|
|
||||||
2. **Create the feature branch** by running the script with `--short-name` (and `--json`). In sequential mode, do NOT pass `--number` — the script auto-detects the next available number. In timestamp mode, the script generates a `YYYYMMDD-HHMMSS` prefix automatically:
|
2. **Create the feature branch** by running the script with `--short-name` (and `--json`), and do NOT pass `--number` (the script auto-detects the next globally available number across all branches and spec directories):
|
||||||
|
|
||||||
**Branch numbering mode**: Before running the script, check if `.specify/init-options.json` exists and read the `branch_numbering` value.
|
|
||||||
- If `"timestamp"`, add `--timestamp` (Bash) or `-Timestamp` (PowerShell) to the script invocation
|
|
||||||
- If `"sequential"` or absent, do not add any extra flag (default behavior)
|
|
||||||
|
|
||||||
- Bash example: `{SCRIPT} --json --short-name "user-auth" "Add user authentication"`
|
- Bash example: `{SCRIPT} --json --short-name "user-auth" "Add user authentication"`
|
||||||
- Bash (timestamp): `{SCRIPT} --json --timestamp --short-name "user-auth" "Add user authentication"`
|
|
||||||
- PowerShell example: `{SCRIPT} -Json -ShortName "user-auth" "Add user authentication"`
|
- PowerShell example: `{SCRIPT} -Json -ShortName "user-auth" "Add user authentication"`
|
||||||
- PowerShell (timestamp): `{SCRIPT} -Json -Timestamp -ShortName "user-auth" "Add user authentication"`
|
|
||||||
|
|
||||||
**IMPORTANT**:
|
**IMPORTANT**:
|
||||||
- Do NOT pass `--number` — the script determines the correct next number automatically
|
- Do NOT pass `--number` — the script determines the correct next number automatically
|
||||||
@@ -169,7 +129,7 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
c. **Handle Validation Results**:
|
c. **Handle Validation Results**:
|
||||||
|
|
||||||
- **If all items pass**: Mark checklist complete and proceed to step 7
|
- **If all items pass**: Mark checklist complete and proceed to step 6
|
||||||
|
|
||||||
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
||||||
1. List the failing items and specific issues
|
1. List the failing items and specific issues
|
||||||
@@ -216,37 +176,10 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
||||||
|
|
||||||
8. **Check for extension hooks**: After reporting completion, check if `.specify/extensions.yml` exists in the project root.
|
|
||||||
- If it exists, read it and look for entries under the `hooks.after_specify` key
|
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
|
||||||
- For each executable hook, output the following based on its `optional` flag:
|
|
||||||
- **Optional hook** (`optional: true`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Optional Hook**: {extension}
|
|
||||||
Command: `/{command}`
|
|
||||||
Description: {description}
|
|
||||||
|
|
||||||
Prompt: {prompt}
|
|
||||||
To execute: `/{command}`
|
|
||||||
```
|
|
||||||
- **Mandatory hook** (`optional: false`):
|
|
||||||
```
|
|
||||||
## Extension Hooks
|
|
||||||
|
|
||||||
**Automatic Hook**: {extension}
|
|
||||||
Executing: `/{command}`
|
|
||||||
EXECUTE_COMMAND: {command}
|
|
||||||
```
|
|
||||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
|
||||||
|
|
||||||
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||||
|
|
||||||
|
## General Guidelines
|
||||||
|
|
||||||
## Quick Guidelines
|
## Quick Guidelines
|
||||||
|
|
||||||
- Focus on **WHAT** users need and **WHY**.
|
- Focus on **WHAT** users need and **WHY**.
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- Check if `.specify/extensions.yml` exists in the project root.
|
- Check if `.specify/extensions.yml` exists in the project root.
|
||||||
- If it exists, read it and look for entries under the `hooks.before_tasks` key
|
- If it exists, read it and look for entries under the `hooks.before_tasks` key
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
- Filter to only hooks where `enabled: true`
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||||
@@ -100,7 +100,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
6. **Check for extension hooks**: After tasks.md is generated, check if `.specify/extensions.yml` exists in the project root.
|
6. **Check for extension hooks**: After tasks.md is generated, check if `.specify/extensions.yml` exists in the project root.
|
||||||
- If it exists, read it and look for entries under the `hooks.after_tasks` key
|
- If it exists, read it and look for entries under the `hooks.after_tasks` key
|
||||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
- Filter to only hooks where `enabled: true`
|
||||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||||
|
|||||||
@@ -29,17 +29,11 @@ class TestAgentConfigConsistency:
|
|||||||
assert "q" not in cfg
|
assert "q" not in cfg
|
||||||
|
|
||||||
def test_extension_registrar_includes_codex(self):
|
def test_extension_registrar_includes_codex(self):
|
||||||
"""Extension command registrar should include codex targeting .agents/skills."""
|
"""Extension command registrar should include codex targeting .codex/prompts."""
|
||||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
cfg = CommandRegistrar.AGENT_CONFIGS
|
||||||
|
|
||||||
assert "codex" in cfg
|
assert "codex" in cfg
|
||||||
assert cfg["codex"]["dir"] == ".agents/skills"
|
assert cfg["codex"]["dir"] == ".codex/prompts"
|
||||||
assert cfg["codex"]["extension"] == "/SKILL.md"
|
|
||||||
|
|
||||||
def test_runtime_codex_uses_native_skills(self):
|
|
||||||
"""Codex runtime config should point at .agents/skills."""
|
|
||||||
assert AGENT_CONFIG["codex"]["folder"] == ".agents/"
|
|
||||||
assert AGENT_CONFIG["codex"]["commands_subdir"] == "skills"
|
|
||||||
|
|
||||||
def test_release_agent_lists_include_kiro_cli_and_exclude_q(self):
|
def test_release_agent_lists_include_kiro_cli_and_exclude_q(self):
|
||||||
"""Bash and PowerShell release scripts should agree on agent key set for Kiro."""
|
"""Bash and PowerShell release scripts should agree on agent key set for Kiro."""
|
||||||
@@ -68,24 +62,7 @@ class TestAgentConfigConsistency:
|
|||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||||
|
|
||||||
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
||||||
assert re.search(r"'agy'\s*\{.*?\.agent/commands", ps_text, re.S) is not None
|
assert re.search(r"'agy'\s*\{.*?\.agent/workflows", ps_text, re.S) is not None
|
||||||
|
|
||||||
def test_release_sh_switch_has_shai_and_agy_generation(self):
|
|
||||||
"""Bash release builder must generate files for shai and agy agents."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert re.search(r"shai\)\s*\n.*?\.shai/commands", sh_text, re.S) is not None
|
|
||||||
assert re.search(r"agy\)\s*\n.*?\.agent/commands", sh_text, re.S) is not None
|
|
||||||
|
|
||||||
def test_release_scripts_generate_codex_skills(self):
|
|
||||||
"""Release scripts should generate Codex skills in .agents/skills."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert ".agents/skills" in sh_text
|
|
||||||
assert ".agents/skills" in ps_text
|
|
||||||
assert re.search(r"codex\)\s*\n.*?create_skills.*?\.agents/skills.*?\"-\"", sh_text, re.S) is not None
|
|
||||||
assert re.search(r"'codex'\s*\{.*?\.agents/skills.*?New-Skills.*?-Separator '-'", ps_text, re.S) is not None
|
|
||||||
|
|
||||||
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
||||||
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
||||||
@@ -194,276 +171,3 @@ class TestAgentConfigConsistency:
|
|||||||
def test_ai_help_includes_tabnine(self):
|
def test_ai_help_includes_tabnine(self):
|
||||||
"""CLI help text for --ai should include tabnine."""
|
"""CLI help text for --ai should include tabnine."""
|
||||||
assert "tabnine" in AI_ASSISTANT_HELP
|
assert "tabnine" in AI_ASSISTANT_HELP
|
||||||
|
|
||||||
# --- Kimi Code CLI consistency checks ---
|
|
||||||
|
|
||||||
def test_kimi_in_agent_config(self):
|
|
||||||
"""AGENT_CONFIG should include kimi with correct folder and commands_subdir."""
|
|
||||||
assert "kimi" in AGENT_CONFIG
|
|
||||||
assert AGENT_CONFIG["kimi"]["folder"] == ".kimi/"
|
|
||||||
assert AGENT_CONFIG["kimi"]["commands_subdir"] == "skills"
|
|
||||||
assert AGENT_CONFIG["kimi"]["requires_cli"] is True
|
|
||||||
|
|
||||||
def test_kimi_in_extension_registrar(self):
|
|
||||||
"""Extension command registrar should include kimi using .kimi/skills and SKILL.md."""
|
|
||||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
|
||||||
|
|
||||||
assert "kimi" in cfg
|
|
||||||
kimi_cfg = cfg["kimi"]
|
|
||||||
assert kimi_cfg["dir"] == ".kimi/skills"
|
|
||||||
assert kimi_cfg["extension"] == "/SKILL.md"
|
|
||||||
|
|
||||||
def test_kimi_in_release_agent_lists(self):
|
|
||||||
"""Bash and PowerShell release scripts should include kimi in agent lists."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
|
||||||
assert sh_match is not None
|
|
||||||
sh_agents = sh_match.group(1).split()
|
|
||||||
|
|
||||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
|
||||||
assert ps_match is not None
|
|
||||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
|
||||||
|
|
||||||
assert "kimi" in sh_agents
|
|
||||||
assert "kimi" in ps_agents
|
|
||||||
|
|
||||||
def test_kimi_in_powershell_validate_set(self):
|
|
||||||
"""PowerShell update-agent-context script should include 'kimi' in ValidateSet."""
|
|
||||||
ps_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
validate_set_match = re.search(r"\[ValidateSet\(([^)]*)\)\]", ps_text)
|
|
||||||
assert validate_set_match is not None
|
|
||||||
validate_set_values = re.findall(r"'([^']+)'", validate_set_match.group(1))
|
|
||||||
|
|
||||||
assert "kimi" in validate_set_values
|
|
||||||
|
|
||||||
def test_kimi_in_github_release_output(self):
|
|
||||||
"""GitHub release script should include kimi template packages."""
|
|
||||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "spec-kit-template-kimi-sh-" in gh_release_text
|
|
||||||
assert "spec-kit-template-kimi-ps-" in gh_release_text
|
|
||||||
|
|
||||||
def test_ai_help_includes_kimi(self):
|
|
||||||
"""CLI help text for --ai should include kimi."""
|
|
||||||
assert "kimi" in AI_ASSISTANT_HELP
|
|
||||||
|
|
||||||
# --- Trae IDE consistency checks ---
|
|
||||||
|
|
||||||
def test_trae_in_agent_config(self):
|
|
||||||
"""AGENT_CONFIG should include trae with correct folder and commands_subdir."""
|
|
||||||
assert "trae" in AGENT_CONFIG
|
|
||||||
assert AGENT_CONFIG["trae"]["folder"] == ".trae/"
|
|
||||||
assert AGENT_CONFIG["trae"]["commands_subdir"] == "rules"
|
|
||||||
assert AGENT_CONFIG["trae"]["requires_cli"] is False
|
|
||||||
assert AGENT_CONFIG["trae"]["install_url"] is None
|
|
||||||
|
|
||||||
def test_trae_in_extension_registrar(self):
|
|
||||||
"""Extension command registrar should include trae using .trae/rules and markdown, if present."""
|
|
||||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
|
||||||
|
|
||||||
assert "trae" in cfg
|
|
||||||
trae_cfg = cfg["trae"]
|
|
||||||
assert trae_cfg["format"] == "markdown"
|
|
||||||
assert trae_cfg["args"] == "$ARGUMENTS"
|
|
||||||
assert trae_cfg["extension"] == ".md"
|
|
||||||
|
|
||||||
def test_trae_in_release_agent_lists(self):
|
|
||||||
"""Bash and PowerShell release scripts should include trae in agent lists."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
|
||||||
assert sh_match is not None
|
|
||||||
sh_agents = sh_match.group(1).split()
|
|
||||||
|
|
||||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
|
||||||
assert ps_match is not None
|
|
||||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
|
||||||
|
|
||||||
assert "trae" in sh_agents
|
|
||||||
assert "trae" in ps_agents
|
|
||||||
|
|
||||||
def test_trae_in_release_scripts_generate_commands(self):
|
|
||||||
"""Release scripts should generate markdown commands for trae in .trae/rules."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert ".trae/rules" in sh_text
|
|
||||||
assert ".trae/rules" in ps_text
|
|
||||||
assert re.search(r"'trae'\s*\{.*?\.trae/rules", ps_text, re.S) is not None
|
|
||||||
|
|
||||||
def test_trae_in_github_release_output(self):
|
|
||||||
"""GitHub release script should include trae template packages."""
|
|
||||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "spec-kit-template-trae-sh-" in gh_release_text
|
|
||||||
assert "spec-kit-template-trae-ps-" in gh_release_text
|
|
||||||
|
|
||||||
def test_trae_in_agent_context_scripts(self):
|
|
||||||
"""Agent context scripts should support trae agent type."""
|
|
||||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
|
||||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "trae" in bash_text
|
|
||||||
assert "TRAE_FILE" in bash_text
|
|
||||||
assert "trae" in pwsh_text
|
|
||||||
assert "TRAE_FILE" in pwsh_text
|
|
||||||
|
|
||||||
def test_trae_in_powershell_validate_set(self):
|
|
||||||
"""PowerShell update-agent-context script should include 'trae' in ValidateSet."""
|
|
||||||
ps_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
validate_set_match = re.search(r"\[ValidateSet\(([^)]*)\)\]", ps_text)
|
|
||||||
assert validate_set_match is not None
|
|
||||||
validate_set_values = re.findall(r"'([^']+)'", validate_set_match.group(1))
|
|
||||||
|
|
||||||
assert "trae" in validate_set_values
|
|
||||||
|
|
||||||
def test_ai_help_includes_trae(self):
|
|
||||||
"""CLI help text for --ai should include trae."""
|
|
||||||
assert "trae" in AI_ASSISTANT_HELP
|
|
||||||
|
|
||||||
# --- Pi Coding Agent consistency checks ---
|
|
||||||
|
|
||||||
def test_pi_in_agent_config(self):
|
|
||||||
"""AGENT_CONFIG should include pi with correct folder and commands_subdir."""
|
|
||||||
assert "pi" in AGENT_CONFIG
|
|
||||||
assert AGENT_CONFIG["pi"]["folder"] == ".pi/"
|
|
||||||
assert AGENT_CONFIG["pi"]["commands_subdir"] == "prompts"
|
|
||||||
assert AGENT_CONFIG["pi"]["requires_cli"] is True
|
|
||||||
assert AGENT_CONFIG["pi"]["install_url"] is not None
|
|
||||||
|
|
||||||
def test_pi_in_extension_registrar(self):
|
|
||||||
"""Extension command registrar should include pi using .pi/prompts."""
|
|
||||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
|
||||||
|
|
||||||
assert "pi" in cfg
|
|
||||||
pi_cfg = cfg["pi"]
|
|
||||||
assert pi_cfg["dir"] == ".pi/prompts"
|
|
||||||
assert pi_cfg["format"] == "markdown"
|
|
||||||
assert pi_cfg["args"] == "$ARGUMENTS"
|
|
||||||
assert pi_cfg["extension"] == ".md"
|
|
||||||
|
|
||||||
def test_pi_in_release_agent_lists(self):
|
|
||||||
"""Bash and PowerShell release scripts should include pi in agent lists."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
|
||||||
assert sh_match is not None
|
|
||||||
sh_agents = sh_match.group(1).split()
|
|
||||||
|
|
||||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
|
||||||
assert ps_match is not None
|
|
||||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
|
||||||
|
|
||||||
assert "pi" in sh_agents
|
|
||||||
assert "pi" in ps_agents
|
|
||||||
|
|
||||||
def test_release_scripts_generate_pi_prompt_templates(self):
|
|
||||||
"""Release scripts should generate Markdown prompt templates for pi in .pi/prompts."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert ".pi/prompts" in sh_text
|
|
||||||
assert ".pi/prompts" in ps_text
|
|
||||||
assert re.search(r"pi\)\s*\n.*?\.pi/prompts", sh_text, re.S) is not None
|
|
||||||
assert re.search(r"'pi'\s*\{.*?\.pi/prompts", ps_text, re.S) is not None
|
|
||||||
|
|
||||||
def test_pi_in_powershell_validate_set(self):
|
|
||||||
"""PowerShell update-agent-context script should include 'pi' in ValidateSet."""
|
|
||||||
ps_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
validate_set_match = re.search(r"\[ValidateSet\(([^)]*)\)\]", ps_text)
|
|
||||||
assert validate_set_match is not None
|
|
||||||
validate_set_values = re.findall(r"'([^']+)'", validate_set_match.group(1))
|
|
||||||
|
|
||||||
assert "pi" in validate_set_values
|
|
||||||
|
|
||||||
def test_pi_in_github_release_output(self):
|
|
||||||
"""GitHub release script should include pi template packages."""
|
|
||||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "spec-kit-template-pi-sh-" in gh_release_text
|
|
||||||
assert "spec-kit-template-pi-ps-" in gh_release_text
|
|
||||||
|
|
||||||
def test_agent_context_scripts_include_pi(self):
|
|
||||||
"""Agent context scripts should support pi agent type."""
|
|
||||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
|
||||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "pi" in bash_text
|
|
||||||
assert "Pi Coding Agent" in bash_text
|
|
||||||
assert "pi" in pwsh_text
|
|
||||||
assert "Pi Coding Agent" in pwsh_text
|
|
||||||
|
|
||||||
def test_ai_help_includes_pi(self):
|
|
||||||
"""CLI help text for --ai should include pi."""
|
|
||||||
assert "pi" in AI_ASSISTANT_HELP
|
|
||||||
|
|
||||||
# --- iFlow CLI consistency checks ---
|
|
||||||
|
|
||||||
def test_iflow_in_agent_config(self):
|
|
||||||
"""AGENT_CONFIG should include iflow with correct folder and commands_subdir."""
|
|
||||||
assert "iflow" in AGENT_CONFIG
|
|
||||||
assert AGENT_CONFIG["iflow"]["folder"] == ".iflow/"
|
|
||||||
assert AGENT_CONFIG["iflow"]["commands_subdir"] == "commands"
|
|
||||||
assert AGENT_CONFIG["iflow"]["requires_cli"] is True
|
|
||||||
|
|
||||||
def test_iflow_in_extension_registrar(self):
|
|
||||||
"""Extension command registrar should include iflow targeting .iflow/commands."""
|
|
||||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
|
||||||
|
|
||||||
assert "iflow" in cfg
|
|
||||||
assert cfg["iflow"]["dir"] == ".iflow/commands"
|
|
||||||
assert cfg["iflow"]["format"] == "markdown"
|
|
||||||
assert cfg["iflow"]["args"] == "$ARGUMENTS"
|
|
||||||
|
|
||||||
def test_iflow_in_release_agent_lists(self):
|
|
||||||
"""Bash and PowerShell release scripts should include iflow in agent lists."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
|
||||||
assert sh_match is not None
|
|
||||||
sh_agents = sh_match.group(1).split()
|
|
||||||
|
|
||||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
|
||||||
assert ps_match is not None
|
|
||||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
|
||||||
|
|
||||||
assert "iflow" in sh_agents
|
|
||||||
assert "iflow" in ps_agents
|
|
||||||
|
|
||||||
def test_iflow_in_release_scripts_build_variant(self):
|
|
||||||
"""Release scripts should generate Markdown commands for iflow in .iflow/commands."""
|
|
||||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
|
||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert ".iflow/commands" in sh_text
|
|
||||||
assert ".iflow/commands" in ps_text
|
|
||||||
assert re.search(r"'iflow'\s*\{.*?\.iflow/commands", ps_text, re.S) is not None
|
|
||||||
|
|
||||||
def test_iflow_in_github_release_output(self):
|
|
||||||
"""GitHub release script should include iflow template packages."""
|
|
||||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "spec-kit-template-iflow-sh-" in gh_release_text
|
|
||||||
assert "spec-kit-template-iflow-ps-" in gh_release_text
|
|
||||||
|
|
||||||
def test_iflow_in_agent_context_scripts(self):
|
|
||||||
"""Agent context scripts should support iflow agent type."""
|
|
||||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
|
||||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
assert "iflow" in bash_text
|
|
||||||
assert "IFLOW_FILE" in bash_text
|
|
||||||
assert "iflow" in pwsh_text
|
|
||||||
assert "IFLOW_FILE" in pwsh_text
|
|
||||||
|
|
||||||
def test_ai_help_includes_iflow(self):
|
|
||||||
"""CLI help text for --ai should include iflow."""
|
|
||||||
assert "iflow" in AI_ASSISTANT_HELP
|
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ def templates_dir(project_dir):
|
|||||||
tpl_root.mkdir(parents=True, exist_ok=True)
|
tpl_root.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
# Template with valid YAML frontmatter
|
# Template with valid YAML frontmatter
|
||||||
(tpl_root / "speckit.specify.md").write_text(
|
(tpl_root / "specify.md").write_text(
|
||||||
"---\n"
|
"---\n"
|
||||||
"description: Create or update the feature specification.\n"
|
"description: Create or update the feature specification.\n"
|
||||||
"handoffs:\n"
|
"handoffs:\n"
|
||||||
@@ -79,7 +79,7 @@ def templates_dir(project_dir):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Template with minimal frontmatter
|
# Template with minimal frontmatter
|
||||||
(tpl_root / "speckit.plan.md").write_text(
|
(tpl_root / "plan.md").write_text(
|
||||||
"---\n"
|
"---\n"
|
||||||
"description: Generate implementation plan.\n"
|
"description: Generate implementation plan.\n"
|
||||||
"---\n"
|
"---\n"
|
||||||
@@ -91,7 +91,7 @@ def templates_dir(project_dir):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Template with no frontmatter
|
# Template with no frontmatter
|
||||||
(tpl_root / "speckit.tasks.md").write_text(
|
(tpl_root / "tasks.md").write_text(
|
||||||
"# Tasks Command\n"
|
"# Tasks Command\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Body without frontmatter.\n",
|
"Body without frontmatter.\n",
|
||||||
@@ -99,7 +99,7 @@ def templates_dir(project_dir):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Template with empty YAML frontmatter (yaml.safe_load returns None)
|
# Template with empty YAML frontmatter (yaml.safe_load returns None)
|
||||||
(tpl_root / "speckit.empty_fm.md").write_text(
|
(tpl_root / "empty_fm.md").write_text(
|
||||||
"---\n"
|
"---\n"
|
||||||
"---\n"
|
"---\n"
|
||||||
"\n"
|
"\n"
|
||||||
@@ -132,16 +132,6 @@ def commands_dir_gemini(project_dir):
|
|||||||
return cmd_dir
|
return cmd_dir
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def commands_dir_qwen(project_dir):
|
|
||||||
"""Create a populated .qwen/commands directory (Markdown format)."""
|
|
||||||
cmd_dir = project_dir / ".qwen" / "commands"
|
|
||||||
cmd_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
for name in ["speckit.specify.md", "speckit.plan.md", "speckit.tasks.md"]:
|
|
||||||
(cmd_dir / name).write_text(f"# {name}\nContent here\n")
|
|
||||||
return cmd_dir
|
|
||||||
|
|
||||||
|
|
||||||
# ===== _get_skills_dir Tests =====
|
# ===== _get_skills_dir Tests =====
|
||||||
|
|
||||||
class TestGetSkillsDir:
|
class TestGetSkillsDir:
|
||||||
@@ -182,11 +172,6 @@ class TestGetSkillsDir:
|
|||||||
result = _get_skills_dir(project_dir, "kiro-cli")
|
result = _get_skills_dir(project_dir, "kiro-cli")
|
||||||
assert result == project_dir / ".kiro" / "skills"
|
assert result == project_dir / ".kiro" / "skills"
|
||||||
|
|
||||||
def test_pi_skills_dir(self, project_dir):
|
|
||||||
"""Pi should use .pi/skills/."""
|
|
||||||
result = _get_skills_dir(project_dir, "pi")
|
|
||||||
assert result == project_dir / ".pi" / "skills"
|
|
||||||
|
|
||||||
def test_unknown_agent_uses_default(self, project_dir):
|
def test_unknown_agent_uses_default(self, project_dir):
|
||||||
"""Unknown agents should fall back to DEFAULT_SKILLS_DIR."""
|
"""Unknown agents should fall back to DEFAULT_SKILLS_DIR."""
|
||||||
result = _get_skills_dir(project_dir, "nonexistent-agent")
|
result = _get_skills_dir(project_dir, "nonexistent-agent")
|
||||||
@@ -342,7 +327,7 @@ class TestInstallAiSkills:
|
|||||||
cmds_dir = project_dir / ".claude" / "commands"
|
cmds_dir = project_dir / ".claude" / "commands"
|
||||||
cmds_dir.mkdir(parents=True)
|
cmds_dir.mkdir(parents=True)
|
||||||
|
|
||||||
(cmds_dir / "speckit.broken.md").write_text(
|
(cmds_dir / "broken.md").write_text(
|
||||||
"---\n"
|
"---\n"
|
||||||
"description: [unclosed bracket\n"
|
"description: [unclosed bracket\n"
|
||||||
" invalid: yaml: content: here\n"
|
" invalid: yaml: content: here\n"
|
||||||
@@ -405,49 +390,6 @@ class TestInstallAiSkills:
|
|||||||
# .toml commands should be untouched
|
# .toml commands should be untouched
|
||||||
assert (cmds_dir / "speckit.specify.toml").exists()
|
assert (cmds_dir / "speckit.specify.toml").exists()
|
||||||
|
|
||||||
def test_qwen_md_commands_dir_installs_skills(self, project_dir):
|
|
||||||
"""Qwen now uses Markdown format; skills should install directly from .qwen/commands/."""
|
|
||||||
cmds_dir = project_dir / ".qwen" / "commands"
|
|
||||||
cmds_dir.mkdir(parents=True)
|
|
||||||
(cmds_dir / "speckit.specify.md").write_text(
|
|
||||||
"---\ndescription: Create or update the feature specification.\n---\n\n# Specify\n\nBody.\n"
|
|
||||||
)
|
|
||||||
(cmds_dir / "speckit.plan.md").write_text(
|
|
||||||
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
result = install_ai_skills(project_dir, "qwen")
|
|
||||||
|
|
||||||
assert result is True
|
|
||||||
skills_dir = project_dir / ".qwen" / "skills"
|
|
||||||
assert skills_dir.exists()
|
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
|
||||||
assert len(skill_dirs) >= 1
|
|
||||||
# .md commands should be untouched
|
|
||||||
assert (cmds_dir / "speckit.specify.md").exists()
|
|
||||||
assert (cmds_dir / "speckit.plan.md").exists()
|
|
||||||
|
|
||||||
def test_pi_prompt_dir_installs_skills(self, project_dir):
|
|
||||||
"""Pi should install skills directly from .pi/prompts/."""
|
|
||||||
prompts_dir = project_dir / ".pi" / "prompts"
|
|
||||||
prompts_dir.mkdir(parents=True)
|
|
||||||
(prompts_dir / "speckit.specify.md").write_text(
|
|
||||||
"---\ndescription: Create or update the feature specification.\n---\n\n# Specify\n\nBody.\n"
|
|
||||||
)
|
|
||||||
(prompts_dir / "speckit.plan.md").write_text(
|
|
||||||
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
result = install_ai_skills(project_dir, "pi")
|
|
||||||
|
|
||||||
assert result is True
|
|
||||||
skills_dir = project_dir / ".pi" / "skills"
|
|
||||||
assert skills_dir.exists()
|
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
|
||||||
assert len(skill_dirs) >= 1
|
|
||||||
assert (prompts_dir / "speckit.specify.md").exists()
|
|
||||||
assert (prompts_dir / "speckit.plan.md").exists()
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
||||||
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
||||||
"""install_ai_skills should produce skills for every configured agent."""
|
"""install_ai_skills should produce skills for every configured agent."""
|
||||||
@@ -456,12 +398,9 @@ class TestInstallAiSkills:
|
|||||||
|
|
||||||
# Place .md templates in the agent's commands directory
|
# Place .md templates in the agent's commands directory
|
||||||
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
||||||
commands_subdir = AGENT_CONFIG[agent_key].get("commands_subdir", "commands")
|
cmds_dir = proj / agent_folder.rstrip("/") / "commands"
|
||||||
cmds_dir = proj / agent_folder.rstrip("/") / commands_subdir
|
|
||||||
cmds_dir.mkdir(parents=True)
|
cmds_dir.mkdir(parents=True)
|
||||||
# Copilot uses speckit.*.agent.md templates; other agents use speckit.*.md
|
(cmds_dir / "specify.md").write_text(
|
||||||
fname = "speckit.specify.agent.md" if agent_key == "copilot" else "speckit.specify.md"
|
|
||||||
(cmds_dir / fname).write_text(
|
|
||||||
"---\ndescription: Test command\n---\n\n# Test\n\nBody.\n"
|
"---\ndescription: Test command\n---\n\n# Test\n\nBody.\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -471,105 +410,10 @@ class TestInstallAiSkills:
|
|||||||
skills_dir = _get_skills_dir(proj, agent_key)
|
skills_dir = _get_skills_dir(proj, agent_key)
|
||||||
assert skills_dir.exists()
|
assert skills_dir.exists()
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||||
# Kimi uses dotted skill names; other agents use hyphen-separated names.
|
|
||||||
expected_skill_name = "speckit.specify" if agent_key == "kimi" else "speckit-specify"
|
|
||||||
assert expected_skill_name in skill_dirs
|
|
||||||
assert (skills_dir / expected_skill_name / "SKILL.md").exists()
|
|
||||||
|
|
||||||
def test_copilot_ignores_non_speckit_agents(self, project_dir):
|
|
||||||
"""Non-speckit markdown in .github/agents/ must not produce skills."""
|
|
||||||
agents_dir = project_dir / ".github" / "agents"
|
|
||||||
agents_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
(agents_dir / "speckit.plan.agent.md").write_text(
|
|
||||||
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
|
||||||
)
|
|
||||||
(agents_dir / "my-custom-agent.agent.md").write_text(
|
|
||||||
"---\ndescription: A user custom agent\n---\n\n# Custom\n\nBody.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
result = install_ai_skills(project_dir, "copilot")
|
|
||||||
|
|
||||||
assert result is True
|
|
||||||
skills_dir = _get_skills_dir(project_dir, "copilot")
|
|
||||||
assert skills_dir.exists()
|
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
|
||||||
assert "speckit-plan" in skill_dirs
|
|
||||||
assert "speckit-my-custom-agent.agent" not in skill_dirs
|
|
||||||
assert "speckit-my-custom-agent" not in skill_dirs
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent_key,custom_file", [
|
|
||||||
("claude", "review.md"),
|
|
||||||
("cursor-agent", "deploy.md"),
|
|
||||||
("qwen", "my-workflow.md"),
|
|
||||||
])
|
|
||||||
def test_non_speckit_commands_ignored_for_all_agents(self, temp_dir, agent_key, custom_file):
|
|
||||||
"""User-authored command files must not produce skills for any agent."""
|
|
||||||
proj = temp_dir / f"proj-{agent_key}"
|
|
||||||
proj.mkdir()
|
|
||||||
|
|
||||||
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
|
||||||
commands_subdir = AGENT_CONFIG[agent_key].get("commands_subdir", "commands")
|
|
||||||
cmds_dir = proj / agent_folder.rstrip("/") / commands_subdir
|
|
||||||
cmds_dir.mkdir(parents=True)
|
|
||||||
(cmds_dir / "speckit.specify.md").write_text(
|
|
||||||
"---\ndescription: Create spec.\n---\n\n# Specify\n\nBody.\n"
|
|
||||||
)
|
|
||||||
(cmds_dir / custom_file).write_text(
|
|
||||||
"---\ndescription: User custom command\n---\n\n# Custom\n\nBody.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
result = install_ai_skills(proj, agent_key)
|
|
||||||
|
|
||||||
assert result is True
|
|
||||||
skills_dir = _get_skills_dir(proj, agent_key)
|
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
|
||||||
assert "speckit-specify" in skill_dirs
|
assert "speckit-specify" in skill_dirs
|
||||||
custom_stem = Path(custom_file).stem
|
assert (skills_dir / "speckit-specify" / "SKILL.md").exists()
|
||||||
assert f"speckit-{custom_stem}" not in skill_dirs
|
|
||||||
|
|
||||||
def test_copilot_fallback_when_only_non_speckit_agents(self, project_dir):
|
|
||||||
"""Fallback to templates/commands/ when .github/agents/ has no speckit.*.md files."""
|
|
||||||
agents_dir = project_dir / ".github" / "agents"
|
|
||||||
agents_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
# Only a user-authored agent, no speckit.* templates
|
|
||||||
(agents_dir / "my-custom-agent.agent.md").write_text(
|
|
||||||
"---\ndescription: A user custom agent\n---\n\n# Custom\n\nBody.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
result = install_ai_skills(project_dir, "copilot")
|
|
||||||
|
|
||||||
# Should succeed via fallback to templates/commands/
|
|
||||||
assert result is True
|
|
||||||
skills_dir = _get_skills_dir(project_dir, "copilot")
|
|
||||||
assert skills_dir.exists()
|
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
|
||||||
# Should have skills from fallback templates, not from the custom agent
|
|
||||||
assert "speckit-plan" in skill_dirs
|
|
||||||
assert not any("my-custom" in d for d in skill_dirs)
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent_key", ["claude", "cursor-agent", "qwen"])
|
|
||||||
def test_fallback_when_only_non_speckit_commands(self, temp_dir, agent_key):
|
|
||||||
"""Fallback to templates/commands/ when agent dir has no speckit.*.md files."""
|
|
||||||
proj = temp_dir / f"proj-{agent_key}"
|
|
||||||
proj.mkdir()
|
|
||||||
|
|
||||||
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
|
||||||
commands_subdir = AGENT_CONFIG[agent_key].get("commands_subdir", "commands")
|
|
||||||
cmds_dir = proj / agent_folder.rstrip("/") / commands_subdir
|
|
||||||
cmds_dir.mkdir(parents=True)
|
|
||||||
# Only a user-authored command, no speckit.* templates
|
|
||||||
(cmds_dir / "my-custom-command.md").write_text(
|
|
||||||
"---\ndescription: User custom command\n---\n\n# Custom\n\nBody.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
result = install_ai_skills(proj, agent_key)
|
|
||||||
|
|
||||||
# Should succeed via fallback to templates/commands/
|
|
||||||
assert result is True
|
|
||||||
skills_dir = _get_skills_dir(proj, agent_key)
|
|
||||||
assert skills_dir.exists()
|
|
||||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
|
||||||
assert not any("my-custom" in d for d in skill_dirs)
|
|
||||||
|
|
||||||
class TestCommandCoexistence:
|
class TestCommandCoexistence:
|
||||||
"""Verify install_ai_skills never touches command files.
|
"""Verify install_ai_skills never touches command files.
|
||||||
@@ -581,16 +425,14 @@ class TestCommandCoexistence:
|
|||||||
|
|
||||||
def test_existing_commands_preserved_claude(self, project_dir, templates_dir, commands_dir_claude):
|
def test_existing_commands_preserved_claude(self, project_dir, templates_dir, commands_dir_claude):
|
||||||
"""install_ai_skills must NOT remove pre-existing .claude/commands files."""
|
"""install_ai_skills must NOT remove pre-existing .claude/commands files."""
|
||||||
# Verify commands exist before (templates_dir adds 4 speckit.* files,
|
# Verify commands exist before
|
||||||
# commands_dir_claude overlaps with 3 of them)
|
assert len(list(commands_dir_claude.glob("speckit.*"))) == 3
|
||||||
before = list(commands_dir_claude.glob("speckit.*"))
|
|
||||||
assert len(before) >= 3
|
|
||||||
|
|
||||||
install_ai_skills(project_dir, "claude")
|
install_ai_skills(project_dir, "claude")
|
||||||
|
|
||||||
# Commands must still be there — install_ai_skills never touches them
|
# Commands must still be there — install_ai_skills never touches them
|
||||||
remaining = list(commands_dir_claude.glob("speckit.*"))
|
remaining = list(commands_dir_claude.glob("speckit.*"))
|
||||||
assert len(remaining) == len(before)
|
assert len(remaining) == 3
|
||||||
|
|
||||||
def test_existing_commands_preserved_gemini(self, project_dir, templates_dir, commands_dir_gemini):
|
def test_existing_commands_preserved_gemini(self, project_dir, templates_dir, commands_dir_gemini):
|
||||||
"""install_ai_skills must NOT remove pre-existing .gemini/commands files."""
|
"""install_ai_skills must NOT remove pre-existing .gemini/commands files."""
|
||||||
@@ -601,15 +443,6 @@ class TestCommandCoexistence:
|
|||||||
remaining = list(commands_dir_gemini.glob("speckit.*"))
|
remaining = list(commands_dir_gemini.glob("speckit.*"))
|
||||||
assert len(remaining) == 3
|
assert len(remaining) == 3
|
||||||
|
|
||||||
def test_existing_commands_preserved_qwen(self, project_dir, templates_dir, commands_dir_qwen):
|
|
||||||
"""install_ai_skills must NOT remove pre-existing .qwen/commands files."""
|
|
||||||
assert len(list(commands_dir_qwen.glob("speckit.*"))) == 3
|
|
||||||
|
|
||||||
install_ai_skills(project_dir, "qwen")
|
|
||||||
|
|
||||||
remaining = list(commands_dir_qwen.glob("speckit.*"))
|
|
||||||
assert len(remaining) == 3
|
|
||||||
|
|
||||||
def test_commands_dir_not_removed(self, project_dir, templates_dir, commands_dir_claude):
|
def test_commands_dir_not_removed(self, project_dir, templates_dir, commands_dir_claude):
|
||||||
"""install_ai_skills must not remove the commands directory."""
|
"""install_ai_skills must not remove the commands directory."""
|
||||||
install_ai_skills(project_dir, "claude")
|
install_ai_skills(project_dir, "claude")
|
||||||
@@ -693,82 +526,6 @@ class TestNewProjectCommandSkip:
|
|||||||
prompts_dir = target / ".kiro" / "prompts"
|
prompts_dir = target / ".kiro" / "prompts"
|
||||||
assert not prompts_dir.exists()
|
assert not prompts_dir.exists()
|
||||||
|
|
||||||
def test_codex_native_skills_preserved_without_conversion(self, tmp_path):
|
|
||||||
"""Codex should keep bundled .agents/skills and skip install_ai_skills conversion."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
target = tmp_path / "new-codex-proj"
|
|
||||||
|
|
||||||
def fake_download(project_path, *args, **kwargs):
|
|
||||||
skill_dir = project_path / ".agents" / "skills" / "speckit-specify"
|
|
||||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
|
||||||
|
|
||||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
|
||||||
patch("specify_cli.ensure_executable_scripts"), \
|
|
||||||
patch("specify_cli.ensure_constitution_from_template"), \
|
|
||||||
patch("specify_cli.install_ai_skills") as mock_skills, \
|
|
||||||
patch("specify_cli.is_git_repo", return_value=False), \
|
|
||||||
patch("specify_cli.shutil.which", return_value="/usr/bin/codex"):
|
|
||||||
result = runner.invoke(
|
|
||||||
app,
|
|
||||||
["init", str(target), "--ai", "codex", "--ai-skills", "--script", "sh", "--no-git"],
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result.exit_code == 0
|
|
||||||
mock_skills.assert_not_called()
|
|
||||||
assert (target / ".agents" / "skills" / "speckit-specify" / "SKILL.md").exists()
|
|
||||||
|
|
||||||
def test_codex_native_skills_missing_fails_clearly(self, tmp_path):
|
|
||||||
"""Codex native skills init should fail if bundled skills are missing."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
target = tmp_path / "missing-codex-skills"
|
|
||||||
|
|
||||||
with patch("specify_cli.download_and_extract_template", lambda *args, **kwargs: None), \
|
|
||||||
patch("specify_cli.ensure_executable_scripts"), \
|
|
||||||
patch("specify_cli.ensure_constitution_from_template"), \
|
|
||||||
patch("specify_cli.install_ai_skills") as mock_skills, \
|
|
||||||
patch("specify_cli.is_git_repo", return_value=False), \
|
|
||||||
patch("specify_cli.shutil.which", return_value="/usr/bin/codex"):
|
|
||||||
result = runner.invoke(
|
|
||||||
app,
|
|
||||||
["init", str(target), "--ai", "codex", "--ai-skills", "--script", "sh", "--no-git"],
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result.exit_code == 1
|
|
||||||
mock_skills.assert_not_called()
|
|
||||||
assert "Expected bundled agent skills" in result.output
|
|
||||||
|
|
||||||
def test_codex_native_skills_ignores_non_speckit_skill_dirs(self, tmp_path):
|
|
||||||
"""Non-spec-kit SKILL.md files should not satisfy Codex bundled-skills validation."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
target = tmp_path / "foreign-codex-skills"
|
|
||||||
|
|
||||||
def fake_download(project_path, *args, **kwargs):
|
|
||||||
skill_dir = project_path / ".agents" / "skills" / "other-tool"
|
|
||||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Foreign skill\n---\n\nBody.\n")
|
|
||||||
|
|
||||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
|
||||||
patch("specify_cli.ensure_executable_scripts"), \
|
|
||||||
patch("specify_cli.ensure_constitution_from_template"), \
|
|
||||||
patch("specify_cli.install_ai_skills") as mock_skills, \
|
|
||||||
patch("specify_cli.is_git_repo", return_value=False), \
|
|
||||||
patch("specify_cli.shutil.which", return_value="/usr/bin/codex"):
|
|
||||||
result = runner.invoke(
|
|
||||||
app,
|
|
||||||
["init", str(target), "--ai", "codex", "--ai-skills", "--script", "sh", "--no-git"],
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result.exit_code == 1
|
|
||||||
mock_skills.assert_not_called()
|
|
||||||
assert "Expected bundled agent skills" in result.output
|
|
||||||
|
|
||||||
def test_commands_preserved_when_skills_fail(self, tmp_path):
|
def test_commands_preserved_when_skills_fail(self, tmp_path):
|
||||||
"""If skills fail, commands should NOT be removed (safety net)."""
|
"""If skills fail, commands should NOT be removed (safety net)."""
|
||||||
from typer.testing import CliRunner
|
from typer.testing import CliRunner
|
||||||
@@ -901,136 +658,6 @@ class TestCliValidation:
|
|||||||
assert "Usage:" in result.output
|
assert "Usage:" in result.output
|
||||||
assert "--ai" in result.output
|
assert "--ai" in result.output
|
||||||
|
|
||||||
def test_agy_without_ai_skills_fails(self):
|
|
||||||
"""--ai agy without --ai-skills should fail with exit code 1."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
result = runner.invoke(app, ["init", "test-proj", "--ai", "agy"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1
|
|
||||||
assert "Explicit command support was deprecated in Antigravity version 1.20.5." in result.output
|
|
||||||
assert "--ai-skills" in result.output
|
|
||||||
|
|
||||||
def test_codex_without_ai_skills_fails(self):
|
|
||||||
"""--ai codex without --ai-skills should fail with exit code 1."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
result = runner.invoke(app, ["init", "test-proj", "--ai", "codex"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1
|
|
||||||
assert "Custom prompt-based spec-kit initialization is deprecated for Codex CLI" in result.output
|
|
||||||
assert "--ai-skills" in result.output
|
|
||||||
|
|
||||||
def test_interactive_agy_without_ai_skills_prompts_skills(self, monkeypatch):
|
|
||||||
"""Interactive selector returning agy without --ai-skills should automatically enable --ai-skills."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
# Mock select_with_arrows to simulate the user picking 'agy' for AI,
|
|
||||||
# and return a deterministic default for any other prompts to avoid
|
|
||||||
# calling the real interactive implementation.
|
|
||||||
def _fake_select_with_arrows(*args, **kwargs):
|
|
||||||
options = kwargs.get("options")
|
|
||||||
if options is None and len(args) >= 1:
|
|
||||||
options = args[0]
|
|
||||||
|
|
||||||
# If the options include 'agy', simulate selecting it.
|
|
||||||
if isinstance(options, dict) and "agy" in options:
|
|
||||||
return "agy"
|
|
||||||
if isinstance(options, (list, tuple)) and "agy" in options:
|
|
||||||
return "agy"
|
|
||||||
|
|
||||||
# For any other prompt, return a deterministic, non-interactive default:
|
|
||||||
# pick the first option if available.
|
|
||||||
if isinstance(options, dict) and options:
|
|
||||||
return next(iter(options.keys()))
|
|
||||||
if isinstance(options, (list, tuple)) and options:
|
|
||||||
return options[0]
|
|
||||||
|
|
||||||
# If no options are provided, fall back to None (should not occur in normal use).
|
|
||||||
return None
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.select_with_arrows", _fake_select_with_arrows)
|
|
||||||
|
|
||||||
# Mock download_and_extract_template to prevent real HTTP downloads during testing
|
|
||||||
monkeypatch.setattr("specify_cli.download_and_extract_template", lambda *args, **kwargs: None)
|
|
||||||
# We need to bypass the `git init` step, wait, it has `--no-git` by default in tests maybe?
|
|
||||||
runner = CliRunner()
|
|
||||||
# Create temp dir to avoid directory already exists errors or whatever
|
|
||||||
with runner.isolated_filesystem():
|
|
||||||
result = runner.invoke(app, ["init", "test-proj", "--no-git"])
|
|
||||||
|
|
||||||
# Interactive selection should NOT raise the deprecation error!
|
|
||||||
assert result.exit_code == 0
|
|
||||||
assert "Explicit command support was deprecated" not in result.output
|
|
||||||
|
|
||||||
def test_interactive_codex_without_ai_skills_enables_skills(self, monkeypatch):
|
|
||||||
"""Interactive selector returning codex without --ai-skills should automatically enable --ai-skills."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
def _fake_select_with_arrows(*args, **kwargs):
|
|
||||||
options = kwargs.get("options")
|
|
||||||
if options is None and len(args) >= 1:
|
|
||||||
options = args[0]
|
|
||||||
|
|
||||||
if isinstance(options, dict) and "codex" in options:
|
|
||||||
return "codex"
|
|
||||||
if isinstance(options, (list, tuple)) and "codex" in options:
|
|
||||||
return "codex"
|
|
||||||
|
|
||||||
if isinstance(options, dict) and options:
|
|
||||||
return next(iter(options.keys()))
|
|
||||||
if isinstance(options, (list, tuple)) and options:
|
|
||||||
return options[0]
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.select_with_arrows", _fake_select_with_arrows)
|
|
||||||
|
|
||||||
def _fake_download(*args, **kwargs):
|
|
||||||
project_path = Path(args[0])
|
|
||||||
skill_dir = project_path / ".agents" / "skills" / "speckit-specify"
|
|
||||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
with runner.isolated_filesystem():
|
|
||||||
result = runner.invoke(app, ["init", "test-proj", "--no-git", "--ignore-agent-tools"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0
|
|
||||||
assert "Custom prompt-based spec-kit initialization is deprecated for Codex CLI" not in result.output
|
|
||||||
assert ".agents/skills" in result.output
|
|
||||||
assert "$speckit-constitution" in result.output
|
|
||||||
assert "/speckit.constitution" not in result.output
|
|
||||||
assert "Optional skills that you can use for your specs" in result.output
|
|
||||||
|
|
||||||
def test_kimi_next_steps_show_skill_invocation(self, monkeypatch):
|
|
||||||
"""Kimi next-steps guidance should display /skill:speckit.* usage."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
|
|
||||||
def _fake_download(*args, **kwargs):
|
|
||||||
project_path = Path(args[0])
|
|
||||||
skill_dir = project_path / ".kimi" / "skills" / "speckit.specify"
|
|
||||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
with runner.isolated_filesystem():
|
|
||||||
result = runner.invoke(
|
|
||||||
app,
|
|
||||||
["init", "test-proj", "--ai", "kimi", "--no-git", "--ignore-agent-tools"],
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result.exit_code == 0
|
|
||||||
assert "/skill:speckit.constitution" in result.output
|
|
||||||
assert "/speckit.constitution" not in result.output
|
|
||||||
assert "Optional skills that you can use for your specs" in result.output
|
|
||||||
|
|
||||||
def test_ai_skills_flag_appears_in_help(self):
|
def test_ai_skills_flag_appears_in_help(self):
|
||||||
"""--ai-skills should appear in init --help output."""
|
"""--ai-skills should appear in init --help output."""
|
||||||
from typer.testing import CliRunner
|
from typer.testing import CliRunner
|
||||||
@@ -1050,12 +677,10 @@ class TestCliValidation:
|
|||||||
target = tmp_path / "kiro-alias-proj"
|
target = tmp_path / "kiro-alias-proj"
|
||||||
|
|
||||||
with patch("specify_cli.download_and_extract_template") as mock_download, \
|
with patch("specify_cli.download_and_extract_template") as mock_download, \
|
||||||
patch("specify_cli.scaffold_from_core_pack", create=True) as mock_scaffold, \
|
|
||||||
patch("specify_cli.ensure_executable_scripts"), \
|
patch("specify_cli.ensure_executable_scripts"), \
|
||||||
patch("specify_cli.ensure_constitution_from_template"), \
|
patch("specify_cli.ensure_constitution_from_template"), \
|
||||||
patch("specify_cli.is_git_repo", return_value=False), \
|
patch("specify_cli.is_git_repo", return_value=False), \
|
||||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||||
mock_scaffold.return_value = True
|
|
||||||
result = runner.invoke(
|
result = runner.invoke(
|
||||||
app,
|
app,
|
||||||
[
|
[
|
||||||
@@ -1071,14 +696,9 @@ class TestCliValidation:
|
|||||||
)
|
)
|
||||||
|
|
||||||
assert result.exit_code == 0
|
assert result.exit_code == 0
|
||||||
# Without --offline, the download path should be taken.
|
assert mock_download.called
|
||||||
assert mock_download.called, (
|
# download_and_extract_template(project_path, ai_assistant, script_type, ...)
|
||||||
"Expected download_and_extract_template to be called (default non-offline path)"
|
|
||||||
)
|
|
||||||
assert mock_download.call_args.args[1] == "kiro-cli"
|
assert mock_download.call_args.args[1] == "kiro-cli"
|
||||||
assert not mock_scaffold.called, (
|
|
||||||
"scaffold_from_core_pack should not be called without --offline"
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_q_removed_from_agent_config(self):
|
def test_q_removed_from_agent_config(self):
|
||||||
"""Amazon Q legacy key should not remain in AGENT_CONFIG."""
|
"""Amazon Q legacy key should not remain in AGENT_CONFIG."""
|
||||||
|
|||||||
@@ -1,89 +0,0 @@
|
|||||||
"""
|
|
||||||
Unit tests for branch numbering options (sequential vs timestamp).
|
|
||||||
|
|
||||||
Tests cover:
|
|
||||||
- Persisting branch_numbering in init-options.json
|
|
||||||
- Default value when branch_numbering is None
|
|
||||||
- Validation of branch_numbering values
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from specify_cli import save_init_options
|
|
||||||
|
|
||||||
|
|
||||||
class TestSaveBranchNumbering:
|
|
||||||
"""Tests for save_init_options with branch_numbering."""
|
|
||||||
|
|
||||||
def test_save_branch_numbering_timestamp(self, tmp_path: Path):
|
|
||||||
opts = {"branch_numbering": "timestamp", "ai": "claude"}
|
|
||||||
save_init_options(tmp_path, opts)
|
|
||||||
|
|
||||||
saved = json.loads((tmp_path / ".specify/init-options.json").read_text())
|
|
||||||
assert saved["branch_numbering"] == "timestamp"
|
|
||||||
|
|
||||||
def test_save_branch_numbering_sequential(self, tmp_path: Path):
|
|
||||||
opts = {"branch_numbering": "sequential", "ai": "claude"}
|
|
||||||
save_init_options(tmp_path, opts)
|
|
||||||
|
|
||||||
saved = json.loads((tmp_path / ".specify/init-options.json").read_text())
|
|
||||||
assert saved["branch_numbering"] == "sequential"
|
|
||||||
|
|
||||||
def test_branch_numbering_defaults_to_sequential(self, tmp_path: Path, monkeypatch):
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
def _fake_download(project_path, *args, **kwargs):
|
|
||||||
Path(project_path).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
|
||||||
|
|
||||||
project_dir = tmp_path / "proj"
|
|
||||||
runner = CliRunner()
|
|
||||||
result = runner.invoke(app, ["init", str(project_dir), "--ai", "claude", "--ignore-agent-tools"])
|
|
||||||
assert result.exit_code == 0
|
|
||||||
|
|
||||||
saved = json.loads((project_dir / ".specify/init-options.json").read_text())
|
|
||||||
assert saved["branch_numbering"] == "sequential"
|
|
||||||
|
|
||||||
|
|
||||||
class TestBranchNumberingValidation:
|
|
||||||
"""Tests for branch_numbering CLI validation via CliRunner."""
|
|
||||||
|
|
||||||
def test_invalid_branch_numbering_rejected(self, tmp_path: Path):
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--ai", "claude", "--branch-numbering", "foobar"])
|
|
||||||
assert result.exit_code == 1
|
|
||||||
assert "Invalid --branch-numbering" in result.output
|
|
||||||
|
|
||||||
def test_valid_branch_numbering_sequential(self, tmp_path: Path, monkeypatch):
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
def _fake_download(project_path, *args, **kwargs):
|
|
||||||
Path(project_path).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--ai", "claude", "--branch-numbering", "sequential", "--ignore-agent-tools"])
|
|
||||||
assert result.exit_code == 0
|
|
||||||
assert "Invalid --branch-numbering" not in (result.output or "")
|
|
||||||
|
|
||||||
def test_valid_branch_numbering_timestamp(self, tmp_path: Path, monkeypatch):
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
def _fake_download(project_path, *args, **kwargs):
|
|
||||||
Path(project_path).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--ai", "claude", "--branch-numbering", "timestamp", "--ignore-agent-tools"])
|
|
||||||
assert result.exit_code == 0
|
|
||||||
assert "Invalid --branch-numbering" not in (result.output or "")
|
|
||||||
@@ -1,613 +0,0 @@
|
|||||||
"""
|
|
||||||
Validation tests for offline/air-gapped scaffolding (PR #1803).
|
|
||||||
|
|
||||||
For every supported AI agent (except "generic") the scaffold output is verified
|
|
||||||
against invariants and compared byte-for-byte with the canonical output produced
|
|
||||||
by create-release-packages.sh.
|
|
||||||
|
|
||||||
Since scaffold_from_core_pack() now invokes the release script at runtime, the
|
|
||||||
parity test (section 9) runs the script independently and compares the results
|
|
||||||
to ensure the integration is correct.
|
|
||||||
|
|
||||||
Per-agent invariants verified
|
|
||||||
──────────────────────────────
|
|
||||||
• Command files are written to the directory declared in AGENT_CONFIG
|
|
||||||
• File count matches the number of source templates
|
|
||||||
• Extension is correct: .toml (TOML agents), .agent.md (copilot), .md (rest)
|
|
||||||
• No unresolved placeholders remain ({SCRIPT}, {ARGS}, __AGENT__)
|
|
||||||
• Argument token is correct: {{args}} for TOML agents, $ARGUMENTS for others
|
|
||||||
• Path rewrites applied: scripts/ → .specify/scripts/ etc.
|
|
||||||
• TOML files have "description" and "prompt" fields
|
|
||||||
• Markdown files have parseable YAML frontmatter
|
|
||||||
• Copilot: companion speckit.*.prompt.md files are generated in prompts/
|
|
||||||
• .specify/scripts/ contains at least one script file
|
|
||||||
• .specify/templates/ contains at least one template file
|
|
||||||
|
|
||||||
Parity invariant
|
|
||||||
────────────────
|
|
||||||
Every file produced by scaffold_from_core_pack() must be byte-for-byte
|
|
||||||
identical to the same file in the ZIP produced by the release script.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import tomllib
|
|
||||||
import zipfile
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from specify_cli import (
|
|
||||||
AGENT_CONFIG,
|
|
||||||
_TOML_AGENTS,
|
|
||||||
_locate_core_pack,
|
|
||||||
scaffold_from_core_pack,
|
|
||||||
)
|
|
||||||
|
|
||||||
_REPO_ROOT = Path(__file__).parent.parent
|
|
||||||
_RELEASE_SCRIPT = _REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh"
|
|
||||||
|
|
||||||
|
|
||||||
def _find_bash() -> str | None:
|
|
||||||
"""Return the path to a usable bash on this machine, or None."""
|
|
||||||
# Prefer PATH lookup so non-standard install locations (Nix, CI) are found.
|
|
||||||
on_path = shutil.which("bash")
|
|
||||||
if on_path:
|
|
||||||
return on_path
|
|
||||||
candidates = [
|
|
||||||
"/opt/homebrew/bin/bash",
|
|
||||||
"/usr/local/bin/bash",
|
|
||||||
"/bin/bash",
|
|
||||||
"/usr/bin/bash",
|
|
||||||
]
|
|
||||||
for candidate in candidates:
|
|
||||||
try:
|
|
||||||
result = subprocess.run(
|
|
||||||
[candidate, "--version"],
|
|
||||||
capture_output=True, text=True, timeout=5,
|
|
||||||
)
|
|
||||||
if result.returncode == 0:
|
|
||||||
return candidate
|
|
||||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
||||||
continue
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _run_release_script(agent: str, script_type: str, bash: str, output_dir: Path) -> Path:
|
|
||||||
"""Run create-release-packages.sh for *agent*/*script_type* and return the
|
|
||||||
path to the generated ZIP. *output_dir* receives the build artifacts so
|
|
||||||
the repo working tree stays clean."""
|
|
||||||
env = os.environ.copy()
|
|
||||||
env["AGENTS"] = agent
|
|
||||||
env["SCRIPTS"] = script_type
|
|
||||||
env["GENRELEASES_DIR"] = str(output_dir)
|
|
||||||
|
|
||||||
result = subprocess.run(
|
|
||||||
[bash, str(_RELEASE_SCRIPT), "v0.0.0"],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
cwd=str(_REPO_ROOT),
|
|
||||||
env=env,
|
|
||||||
timeout=300,
|
|
||||||
)
|
|
||||||
|
|
||||||
if result.returncode != 0:
|
|
||||||
pytest.fail(
|
|
||||||
f"Release script failed with exit code {result.returncode}\n"
|
|
||||||
f"stdout:\n{result.stdout}\nstderr:\n{result.stderr}"
|
|
||||||
)
|
|
||||||
|
|
||||||
zip_pattern = f"spec-kit-template-{agent}-{script_type}-v0.0.0.zip"
|
|
||||||
zip_path = output_dir / zip_pattern
|
|
||||||
if not zip_path.exists():
|
|
||||||
pytest.fail(
|
|
||||||
f"Release script did not produce expected ZIP: {zip_path}\n"
|
|
||||||
f"stdout:\n{result.stdout}\nstderr:\n{result.stderr}"
|
|
||||||
)
|
|
||||||
return zip_path
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Helpers
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# Number of source command templates (one per .md file in templates/commands/)
|
|
||||||
|
|
||||||
|
|
||||||
def _commands_dir() -> Path:
|
|
||||||
"""Return the command templates directory (source-checkout or core_pack)."""
|
|
||||||
core = _locate_core_pack()
|
|
||||||
if core and (core / "commands").is_dir():
|
|
||||||
return core / "commands"
|
|
||||||
# Source-checkout fallback
|
|
||||||
repo_root = Path(__file__).parent.parent
|
|
||||||
return repo_root / "templates" / "commands"
|
|
||||||
|
|
||||||
|
|
||||||
def _get_source_template_stems() -> list[str]:
|
|
||||||
"""Return the stems of source command template files (e.g. ['specify', 'plan', ...])."""
|
|
||||||
return sorted(p.stem for p in _commands_dir().glob("*.md"))
|
|
||||||
|
|
||||||
|
|
||||||
def _expected_cmd_dir(project_path: Path, agent: str) -> Path:
|
|
||||||
"""Return the expected command-files directory for a given agent."""
|
|
||||||
cfg = AGENT_CONFIG[agent]
|
|
||||||
folder = (cfg.get("folder") or "").rstrip("/")
|
|
||||||
subdir = cfg.get("commands_subdir", "commands")
|
|
||||||
if folder:
|
|
||||||
return project_path / folder / subdir
|
|
||||||
return project_path / ".speckit" / subdir
|
|
||||||
|
|
||||||
|
|
||||||
# Agents whose commands are laid out as <skills_dir>/<name>/SKILL.md.
|
|
||||||
# Maps agent -> separator used in skill directory names.
|
|
||||||
_SKILL_AGENTS: dict[str, str] = {"codex": "-", "kimi": "."}
|
|
||||||
|
|
||||||
|
|
||||||
def _expected_ext(agent: str) -> str:
|
|
||||||
if agent in _TOML_AGENTS:
|
|
||||||
return "toml"
|
|
||||||
if agent == "copilot":
|
|
||||||
return "agent.md"
|
|
||||||
if agent in _SKILL_AGENTS:
|
|
||||||
return "SKILL.md"
|
|
||||||
return "md"
|
|
||||||
|
|
||||||
|
|
||||||
def _list_command_files(cmd_dir: Path, agent: str) -> list[Path]:
|
|
||||||
"""List generated command files, handling skills-based directory layouts."""
|
|
||||||
if agent in _SKILL_AGENTS:
|
|
||||||
sep = _SKILL_AGENTS[agent]
|
|
||||||
return sorted(cmd_dir.glob(f"speckit{sep}*/SKILL.md"))
|
|
||||||
ext = _expected_ext(agent)
|
|
||||||
return sorted(cmd_dir.glob(f"speckit.*.{ext}"))
|
|
||||||
|
|
||||||
|
|
||||||
def _collect_relative_files(root: Path) -> dict[str, bytes]:
|
|
||||||
"""Walk *root* and return {relative_posix_path: file_bytes}."""
|
|
||||||
result: dict[str, bytes] = {}
|
|
||||||
for p in root.rglob("*"):
|
|
||||||
if p.is_file():
|
|
||||||
result[p.relative_to(root).as_posix()] = p.read_bytes()
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Fixtures
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def source_template_stems() -> list[str]:
|
|
||||||
return _get_source_template_stems()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def scaffolded_sh(tmp_path_factory):
|
|
||||||
"""Session-scoped cache: scaffold once per agent with script_type='sh'."""
|
|
||||||
cache = {}
|
|
||||||
def _get(agent: str) -> Path:
|
|
||||||
if agent not in cache:
|
|
||||||
project = tmp_path_factory.mktemp(f"scaffold_sh_{agent}")
|
|
||||||
ok = scaffold_from_core_pack(project, agent, "sh")
|
|
||||||
assert ok, f"scaffold_from_core_pack returned False for agent '{agent}'"
|
|
||||||
cache[agent] = project
|
|
||||||
return cache[agent]
|
|
||||||
return _get
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def scaffolded_ps(tmp_path_factory):
|
|
||||||
"""Session-scoped cache: scaffold once per agent with script_type='ps'."""
|
|
||||||
cache = {}
|
|
||||||
def _get(agent: str) -> Path:
|
|
||||||
if agent not in cache:
|
|
||||||
project = tmp_path_factory.mktemp(f"scaffold_ps_{agent}")
|
|
||||||
ok = scaffold_from_core_pack(project, agent, "ps")
|
|
||||||
assert ok, f"scaffold_from_core_pack returned False for agent '{agent}'"
|
|
||||||
cache[agent] = project
|
|
||||||
return cache[agent]
|
|
||||||
return _get
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Parametrize over all agents except "generic"
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
_TESTABLE_AGENTS = [a for a in AGENT_CONFIG if a != "generic"]
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 1. Bundled scaffold — directory structure
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_scaffold_creates_specify_scripts(agent, scaffolded_sh):
|
|
||||||
"""scaffold_from_core_pack copies at least one script into .specify/scripts/."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
scripts_dir = project / ".specify" / "scripts" / "bash"
|
|
||||||
assert scripts_dir.is_dir(), f".specify/scripts/bash/ missing for agent '{agent}'"
|
|
||||||
assert any(scripts_dir.iterdir()), f".specify/scripts/bash/ is empty for agent '{agent}'"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_scaffold_creates_specify_templates(agent, scaffolded_sh):
|
|
||||||
"""scaffold_from_core_pack copies at least one page template into .specify/templates/."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
tpl_dir = project / ".specify" / "templates"
|
|
||||||
assert tpl_dir.is_dir(), f".specify/templates/ missing for agent '{agent}'"
|
|
||||||
assert any(tpl_dir.iterdir()), ".specify/templates/ is empty"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_scaffold_command_dir_location(agent, scaffolded_sh):
|
|
||||||
"""Command files land in the directory declared by AGENT_CONFIG."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
assert cmd_dir.is_dir(), (
|
|
||||||
f"Command dir '{cmd_dir.relative_to(project)}' not created for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 2. Bundled scaffold — file count
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_scaffold_command_file_count(agent, scaffolded_sh, source_template_stems):
|
|
||||||
"""One command file is generated per source template for every agent."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
generated = _list_command_files(cmd_dir, agent)
|
|
||||||
|
|
||||||
if cmd_dir.is_dir():
|
|
||||||
dir_listing = list(cmd_dir.iterdir())
|
|
||||||
else:
|
|
||||||
dir_listing = f"<command dir missing: {cmd_dir}>"
|
|
||||||
|
|
||||||
assert len(generated) == len(source_template_stems), (
|
|
||||||
f"Agent '{agent}': expected {len(source_template_stems)} command files "
|
|
||||||
f"({_expected_ext(agent)}), found {len(generated)}. Dir: {dir_listing}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_scaffold_command_file_names(agent, scaffolded_sh, source_template_stems):
|
|
||||||
"""Each source template stem maps to a corresponding speckit.<stem>.<ext> file."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for stem in source_template_stems:
|
|
||||||
if agent in _SKILL_AGENTS:
|
|
||||||
sep = _SKILL_AGENTS[agent]
|
|
||||||
expected = cmd_dir / f"speckit{sep}{stem}" / "SKILL.md"
|
|
||||||
else:
|
|
||||||
ext = _expected_ext(agent)
|
|
||||||
expected = cmd_dir / f"speckit.{stem}.{ext}"
|
|
||||||
assert expected.is_file(), (
|
|
||||||
f"Agent '{agent}': expected file '{expected.name}' not found in '{cmd_dir}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 3. Bundled scaffold — content invariants
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_no_unresolved_script_placeholder(agent, scaffolded_sh):
|
|
||||||
"""{SCRIPT} must not appear in any generated command file."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for f in cmd_dir.rglob("*"):
|
|
||||||
if f.is_file():
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
assert "{SCRIPT}" not in content, (
|
|
||||||
f"Unresolved {{SCRIPT}} in '{f.relative_to(project)}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_no_unresolved_agent_placeholder(agent, scaffolded_sh):
|
|
||||||
"""__AGENT__ must not appear in any generated command file."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for f in cmd_dir.rglob("*"):
|
|
||||||
if f.is_file():
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
assert "__AGENT__" not in content, (
|
|
||||||
f"Unresolved __AGENT__ in '{f.relative_to(project)}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_no_unresolved_args_placeholder(agent, scaffolded_sh):
|
|
||||||
"""{ARGS} must not appear in any generated command file (replaced with agent-specific token)."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for f in cmd_dir.rglob("*"):
|
|
||||||
if f.is_file():
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
assert "{ARGS}" not in content, (
|
|
||||||
f"Unresolved {{ARGS}} in '{f.relative_to(project)}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Build a set of template stems that actually contain {ARGS} in their source.
|
|
||||||
_TEMPLATES_WITH_ARGS: frozenset[str] = frozenset(
|
|
||||||
p.stem
|
|
||||||
for p in _commands_dir().glob("*.md")
|
|
||||||
if "{ARGS}" in p.read_text(encoding="utf-8")
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_argument_token_format(agent, scaffolded_sh):
|
|
||||||
"""For templates that carry an {ARGS} token:
|
|
||||||
- TOML agents must emit {{args}}
|
|
||||||
- Markdown agents must emit $ARGUMENTS
|
|
||||||
Templates without {ARGS} (e.g. implement, plan) are skipped.
|
|
||||||
"""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
|
|
||||||
for f in _list_command_files(cmd_dir, agent):
|
|
||||||
# Recover the stem from the file path
|
|
||||||
if agent in _SKILL_AGENTS:
|
|
||||||
sep = _SKILL_AGENTS[agent]
|
|
||||||
stem = f.parent.name.removeprefix(f"speckit{sep}")
|
|
||||||
else:
|
|
||||||
ext = _expected_ext(agent)
|
|
||||||
stem = f.name.removeprefix("speckit.").removesuffix(f".{ext}")
|
|
||||||
if stem not in _TEMPLATES_WITH_ARGS:
|
|
||||||
continue # this template has no argument token
|
|
||||||
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
if agent in _TOML_AGENTS:
|
|
||||||
assert "{{args}}" in content, (
|
|
||||||
f"TOML agent '{agent}': expected '{{{{args}}}}' in '{f.name}'"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
assert "$ARGUMENTS" in content, (
|
|
||||||
f"Markdown agent '{agent}': expected '$ARGUMENTS' in '{f.name}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_path_rewrites_applied(agent, scaffolded_sh):
|
|
||||||
"""Bare scripts/ and templates/ paths must be rewritten to .specify/ variants.
|
|
||||||
|
|
||||||
YAML frontmatter 'source:' metadata fields are excluded — they reference
|
|
||||||
the original template path for provenance, not a runtime path.
|
|
||||||
"""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for f in cmd_dir.rglob("*"):
|
|
||||||
if not f.is_file():
|
|
||||||
continue
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
# Strip YAML frontmatter before checking — source: metadata is not a runtime path
|
|
||||||
body = content
|
|
||||||
if content.startswith("---"):
|
|
||||||
parts = content.split("---", 2)
|
|
||||||
if len(parts) >= 3:
|
|
||||||
body = parts[2]
|
|
||||||
|
|
||||||
# Should not contain bare (non-.specify/) script paths
|
|
||||||
assert not re.search(r'(?<!\.specify/)scripts/', body), (
|
|
||||||
f"Bare scripts/ path found in '{f.relative_to(project)}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
assert not re.search(r'(?<!\.specify/)templates/', body), (
|
|
||||||
f"Bare templates/ path found in '{f.relative_to(project)}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 4. TOML format checks
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", sorted(_TOML_AGENTS))
|
|
||||||
def test_toml_format_valid(agent, scaffolded_sh):
|
|
||||||
"""TOML agents: every command file must have description and prompt fields."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for f in cmd_dir.glob("speckit.*.toml"):
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
assert 'description = "' in content, (
|
|
||||||
f"Missing 'description' in '{f.name}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
assert 'prompt = """' in content, (
|
|
||||||
f"Missing 'prompt' block in '{f.name}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 5. Markdown frontmatter checks
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
_MARKDOWN_AGENTS = [a for a in _TESTABLE_AGENTS if a not in _TOML_AGENTS]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _MARKDOWN_AGENTS)
|
|
||||||
def test_markdown_has_frontmatter(agent, scaffolded_sh):
|
|
||||||
"""Markdown agents: every command file must start with valid YAML frontmatter."""
|
|
||||||
project = scaffolded_sh(agent)
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
for f in _list_command_files(cmd_dir, agent):
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
assert content.startswith("---"), (
|
|
||||||
f"No YAML frontmatter in '{f.name}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
parts = content.split("---", 2)
|
|
||||||
assert len(parts) >= 3, f"Incomplete frontmatter in '{f.name}'"
|
|
||||||
fm = yaml.safe_load(parts[1])
|
|
||||||
assert fm is not None, f"Empty frontmatter in '{f.name}'"
|
|
||||||
assert "description" in fm, (
|
|
||||||
f"'description' key missing from frontmatter in '{f.name}' for agent '{agent}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 6. Copilot-specific: companion .prompt.md files
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def test_copilot_companion_prompt_files(scaffolded_sh, source_template_stems):
|
|
||||||
"""Copilot: a speckit.<stem>.prompt.md companion is created for every .agent.md file."""
|
|
||||||
project = scaffolded_sh("copilot")
|
|
||||||
|
|
||||||
prompts_dir = project / ".github" / "prompts"
|
|
||||||
assert prompts_dir.is_dir(), ".github/prompts/ not created for copilot"
|
|
||||||
|
|
||||||
for stem in source_template_stems:
|
|
||||||
prompt_file = prompts_dir / f"speckit.{stem}.prompt.md"
|
|
||||||
assert prompt_file.is_file(), (
|
|
||||||
f"Companion prompt file '{prompt_file.name}' missing for copilot"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_copilot_prompt_file_content(scaffolded_sh, source_template_stems):
|
|
||||||
"""Copilot companion .prompt.md files must reference their parent .agent.md."""
|
|
||||||
project = scaffolded_sh("copilot")
|
|
||||||
|
|
||||||
prompts_dir = project / ".github" / "prompts"
|
|
||||||
for stem in source_template_stems:
|
|
||||||
f = prompts_dir / f"speckit.{stem}.prompt.md"
|
|
||||||
content = f.read_text(encoding="utf-8")
|
|
||||||
assert f"agent: speckit.{stem}" in content, (
|
|
||||||
f"Companion '{f.name}' does not reference 'speckit.{stem}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 7. PowerShell script variant
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_scaffold_powershell_variant(agent, scaffolded_ps, source_template_stems):
|
|
||||||
"""scaffold_from_core_pack with script_type='ps' creates correct files."""
|
|
||||||
project = scaffolded_ps(agent)
|
|
||||||
|
|
||||||
scripts_dir = project / ".specify" / "scripts" / "powershell"
|
|
||||||
assert scripts_dir.is_dir(), f".specify/scripts/powershell/ missing for '{agent}'"
|
|
||||||
assert any(scripts_dir.iterdir()), ".specify/scripts/powershell/ is empty"
|
|
||||||
|
|
||||||
cmd_dir = _expected_cmd_dir(project, agent)
|
|
||||||
generated = _list_command_files(cmd_dir, agent)
|
|
||||||
assert len(generated) == len(source_template_stems)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 8. Parity: bundled vs. real create-release-packages.sh ZIP
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def release_script_trees(tmp_path_factory):
|
|
||||||
"""Session-scoped cache: run release script once per (agent, script_type)."""
|
|
||||||
cache: dict[tuple[str, str], dict[str, bytes]] = {}
|
|
||||||
bash = _find_bash()
|
|
||||||
|
|
||||||
def _get(agent: str, script_type: str) -> dict[str, bytes] | None:
|
|
||||||
if bash is None:
|
|
||||||
return None
|
|
||||||
key = (agent, script_type)
|
|
||||||
if key not in cache:
|
|
||||||
tmp = tmp_path_factory.mktemp(f"release_{agent}_{script_type}")
|
|
||||||
gen_dir = tmp / "genreleases"
|
|
||||||
gen_dir.mkdir()
|
|
||||||
zip_path = _run_release_script(agent, script_type, bash, gen_dir)
|
|
||||||
extracted = tmp / "extracted"
|
|
||||||
extracted.mkdir()
|
|
||||||
with zipfile.ZipFile(zip_path) as zf:
|
|
||||||
zf.extractall(extracted)
|
|
||||||
cache[key] = _collect_relative_files(extracted)
|
|
||||||
return cache[key]
|
|
||||||
return _get
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("script_type", ["sh", "ps"])
|
|
||||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
|
||||||
def test_parity_bundled_vs_release_script(agent, script_type, scaffolded_sh, scaffolded_ps, release_script_trees):
|
|
||||||
"""scaffold_from_core_pack() file tree is identical to the ZIP produced by
|
|
||||||
create-release-packages.sh for every agent and script type.
|
|
||||||
|
|
||||||
This is the true end-to-end parity check: the Python offline path must
|
|
||||||
produce exactly the same artifacts as the canonical shell release script.
|
|
||||||
|
|
||||||
Both sides are session-cached: each agent/script_type combination is
|
|
||||||
scaffolded and release-scripted only once across all tests.
|
|
||||||
"""
|
|
||||||
script_tree = release_script_trees(agent, script_type)
|
|
||||||
if script_tree is None:
|
|
||||||
pytest.skip("bash required to run create-release-packages.sh")
|
|
||||||
|
|
||||||
# Reuse session-cached scaffold output
|
|
||||||
if script_type == "sh":
|
|
||||||
bundled_dir = scaffolded_sh(agent)
|
|
||||||
else:
|
|
||||||
bundled_dir = scaffolded_ps(agent)
|
|
||||||
|
|
||||||
bundled_tree = _collect_relative_files(bundled_dir)
|
|
||||||
|
|
||||||
only_bundled = set(bundled_tree) - set(script_tree)
|
|
||||||
only_script = set(script_tree) - set(bundled_tree)
|
|
||||||
|
|
||||||
assert not only_bundled, (
|
|
||||||
f"Agent '{agent}' ({script_type}): files only in bundled output (not in release ZIP):\n "
|
|
||||||
+ "\n ".join(sorted(only_bundled))
|
|
||||||
)
|
|
||||||
assert not only_script, (
|
|
||||||
f"Agent '{agent}' ({script_type}): files only in release ZIP (not in bundled output):\n "
|
|
||||||
+ "\n ".join(sorted(only_script))
|
|
||||||
)
|
|
||||||
|
|
||||||
for name in bundled_tree:
|
|
||||||
assert bundled_tree[name] == script_tree[name], (
|
|
||||||
f"Agent '{agent}' ({script_type}): file '{name}' content differs between "
|
|
||||||
f"bundled output and release script ZIP"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Section 10 – pyproject.toml force-include covers all template files
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def test_pyproject_force_include_covers_all_templates():
|
|
||||||
"""Every file in templates/ (excluding commands/) must be listed in
|
|
||||||
pyproject.toml's [tool.hatch.build.targets.wheel.force-include] section.
|
|
||||||
|
|
||||||
This prevents new template files from being silently omitted from the
|
|
||||||
wheel, which would break ``specify init --offline``.
|
|
||||||
"""
|
|
||||||
templates_dir = _REPO_ROOT / "templates"
|
|
||||||
# Collect all files directly in templates/ (not in subdirectories like commands/)
|
|
||||||
repo_template_files = sorted(
|
|
||||||
f.name for f in templates_dir.iterdir()
|
|
||||||
if f.is_file()
|
|
||||||
)
|
|
||||||
assert repo_template_files, "Expected at least one template file in templates/"
|
|
||||||
|
|
||||||
pyproject_path = _REPO_ROOT / "pyproject.toml"
|
|
||||||
with open(pyproject_path, "rb") as f:
|
|
||||||
pyproject = tomllib.load(f)
|
|
||||||
force_include = pyproject.get("tool", {}).get("hatch", {}).get("build", {}).get("targets", {}).get("wheel", {}).get("force-include", {})
|
|
||||||
|
|
||||||
missing = [
|
|
||||||
name for name in repo_template_files
|
|
||||||
if f"templates/{name}" not in force_include
|
|
||||||
]
|
|
||||||
assert not missing, (
|
|
||||||
"Template files not listed in pyproject.toml force-include "
|
|
||||||
"(offline scaffolding will miss them):\n "
|
|
||||||
+ "\n ".join(missing)
|
|
||||||
)
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,190 +0,0 @@
|
|||||||
import stat
|
|
||||||
|
|
||||||
from specify_cli import merge_json_files
|
|
||||||
from specify_cli import handle_vscode_settings
|
|
||||||
|
|
||||||
# --- Dimension 2: Polite Deep Merge Strategy ---
|
|
||||||
|
|
||||||
def test_merge_json_files_type_mismatch_preservation(tmp_path):
|
|
||||||
"""If user has a string but template wants a dict, PRESERVE user's string."""
|
|
||||||
existing_file = tmp_path / "settings.json"
|
|
||||||
# User might have overridden a setting with a simple string or different type
|
|
||||||
existing_file.write_text('{"chat.editor.fontFamily": "CustomFont"}')
|
|
||||||
|
|
||||||
# Template might expect a dict for the same key (hypothetically)
|
|
||||||
new_settings = {
|
|
||||||
"chat.editor.fontFamily": {"font": "TemplateFont"}
|
|
||||||
}
|
|
||||||
|
|
||||||
merged = merge_json_files(existing_file, new_settings)
|
|
||||||
# Result is None because user settings were preserved and nothing else changed
|
|
||||||
assert merged is None
|
|
||||||
|
|
||||||
def test_merge_json_files_deep_nesting(tmp_path):
|
|
||||||
"""Verify deep recursive merging of new keys."""
|
|
||||||
existing_file = tmp_path / "settings.json"
|
|
||||||
existing_file.write_text("""
|
|
||||||
{
|
|
||||||
"a": {
|
|
||||||
"b": {
|
|
||||||
"c": 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
""")
|
|
||||||
|
|
||||||
new_settings = {
|
|
||||||
"a": {
|
|
||||||
"b": {
|
|
||||||
"d": 2 # New nested key
|
|
||||||
},
|
|
||||||
"e": 3 # New mid-level key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
merged = merge_json_files(existing_file, new_settings)
|
|
||||||
assert merged["a"]["b"]["c"] == 1
|
|
||||||
assert merged["a"]["b"]["d"] == 2
|
|
||||||
assert merged["a"]["e"] == 3
|
|
||||||
|
|
||||||
def test_merge_json_files_empty_existing(tmp_path):
|
|
||||||
"""Merging into an empty/new file."""
|
|
||||||
existing_file = tmp_path / "empty.json"
|
|
||||||
existing_file.write_text("{}")
|
|
||||||
|
|
||||||
new_settings = {"a": 1}
|
|
||||||
merged = merge_json_files(existing_file, new_settings)
|
|
||||||
assert merged == {"a": 1}
|
|
||||||
|
|
||||||
# --- Dimension 3: Real-world Simulation ---
|
|
||||||
|
|
||||||
def test_merge_vscode_realistic_scenario(tmp_path):
|
|
||||||
"""A realistic VSCode settings.json with many existing preferences, comments, and trailing commas."""
|
|
||||||
existing_file = tmp_path / "vscode_settings.json"
|
|
||||||
existing_file.write_text("""
|
|
||||||
{
|
|
||||||
"editor.fontSize": 12,
|
|
||||||
"editor.formatOnSave": true, /* block comment */
|
|
||||||
"files.exclude": {
|
|
||||||
"**/.git": true,
|
|
||||||
"**/node_modules": true,
|
|
||||||
},
|
|
||||||
"chat.promptFilesRecommendations": {
|
|
||||||
"existing.tool": true,
|
|
||||||
} // User comment
|
|
||||||
}
|
|
||||||
""")
|
|
||||||
|
|
||||||
template_settings = {
|
|
||||||
"chat.promptFilesRecommendations": {
|
|
||||||
"speckit.specify": True,
|
|
||||||
"speckit.plan": True
|
|
||||||
},
|
|
||||||
"chat.tools.terminal.autoApprove": {
|
|
||||||
".specify/scripts/bash/": True
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
merged = merge_json_files(existing_file, template_settings)
|
|
||||||
|
|
||||||
# Check preservation
|
|
||||||
assert merged["editor.fontSize"] == 12
|
|
||||||
assert merged["files.exclude"]["**/.git"] is True
|
|
||||||
assert merged["chat.promptFilesRecommendations"]["existing.tool"] is True
|
|
||||||
|
|
||||||
# Check additions
|
|
||||||
assert merged["chat.promptFilesRecommendations"]["speckit.specify"] is True
|
|
||||||
assert merged["chat.tools.terminal.autoApprove"][".specify/scripts/bash/"] is True
|
|
||||||
|
|
||||||
# --- Dimension 4: Error Handling & Robustness ---
|
|
||||||
|
|
||||||
def test_merge_json_files_with_bom(tmp_path):
|
|
||||||
"""Test files with UTF-8 BOM (sometimes created on Windows)."""
|
|
||||||
existing_file = tmp_path / "bom.json"
|
|
||||||
content = '{"a": 1}'
|
|
||||||
# Prepend UTF-8 BOM
|
|
||||||
existing_file.write_bytes(b'\xef\xbb\xbf' + content.encode('utf-8'))
|
|
||||||
|
|
||||||
new_settings = {"b": 2}
|
|
||||||
merged = merge_json_files(existing_file, new_settings)
|
|
||||||
assert merged == {"a": 1, "b": 2}
|
|
||||||
|
|
||||||
def test_merge_json_files_not_a_dictionary_template(tmp_path):
|
|
||||||
"""If for some reason new_content is not a dict, PRESERVE existing settings by returning None."""
|
|
||||||
existing_file = tmp_path / "ok.json"
|
|
||||||
existing_file.write_text('{"a": 1}')
|
|
||||||
|
|
||||||
# Secure fallback: return None to skip writing and avoid clobbering
|
|
||||||
assert merge_json_files(existing_file, ["not", "a", "dict"]) is None
|
|
||||||
|
|
||||||
def test_merge_json_files_unparseable_existing(tmp_path):
|
|
||||||
"""If the existing file is unparseable JSON, return None to avoid overwriting it."""
|
|
||||||
bad_file = tmp_path / "bad.json"
|
|
||||||
bad_file.write_text('{"a": 1, missing_value}') # Invalid JSON
|
|
||||||
|
|
||||||
assert merge_json_files(bad_file, {"b": 2}) is None
|
|
||||||
|
|
||||||
|
|
||||||
def test_merge_json_files_list_preservation(tmp_path):
|
|
||||||
"""Verify that existing list values are preserved and NOT merged or overwritten."""
|
|
||||||
existing_file = tmp_path / "list.json"
|
|
||||||
existing_file.write_text('{"my.list": ["user_item"]}')
|
|
||||||
|
|
||||||
template_settings = {
|
|
||||||
"my.list": ["template_item"]
|
|
||||||
}
|
|
||||||
|
|
||||||
merged = merge_json_files(existing_file, template_settings)
|
|
||||||
# The polite merge policy says: keep existing values if they exist and aren't both dicts.
|
|
||||||
# Since nothing changed, it returns None.
|
|
||||||
assert merged is None
|
|
||||||
|
|
||||||
def test_merge_json_files_no_changes(tmp_path):
|
|
||||||
"""If the merge doesn't introduce any new keys or changes, return None to skip rewrite."""
|
|
||||||
existing_file = tmp_path / "no_change.json"
|
|
||||||
existing_file.write_text('{"a": 1, "b": {"c": 2}}')
|
|
||||||
|
|
||||||
template_settings = {
|
|
||||||
"a": 1, # Already exists
|
|
||||||
"b": {"c": 2} # Already exists nested
|
|
||||||
}
|
|
||||||
|
|
||||||
# Should return None because result == existing
|
|
||||||
assert merge_json_files(existing_file, template_settings) is None
|
|
||||||
|
|
||||||
def test_merge_json_files_type_mismatch_no_op(tmp_path):
|
|
||||||
"""If a key exists with different type and we preserve it, it might still result in no change."""
|
|
||||||
existing_file = tmp_path / "mismatch_no_op.json"
|
|
||||||
existing_file.write_text('{"a": "user_string"}')
|
|
||||||
|
|
||||||
template_settings = {
|
|
||||||
"a": {"key": "template_dict"} # Mismatch, will be ignored
|
|
||||||
}
|
|
||||||
|
|
||||||
# Should return None because we preserved the user's string and nothing else changed
|
|
||||||
assert merge_json_files(existing_file, template_settings) is None
|
|
||||||
|
|
||||||
|
|
||||||
def test_handle_vscode_settings_preserves_mode_on_atomic_write(tmp_path):
|
|
||||||
"""Atomic rewrite should preserve existing file mode bits."""
|
|
||||||
vscode_dir = tmp_path / ".vscode"
|
|
||||||
vscode_dir.mkdir()
|
|
||||||
dest_file = vscode_dir / "settings.json"
|
|
||||||
template_file = tmp_path / "template_settings.json"
|
|
||||||
|
|
||||||
dest_file.write_text('{"a": 1}\n', encoding="utf-8")
|
|
||||||
dest_file.chmod(0o640)
|
|
||||||
before_mode = stat.S_IMODE(dest_file.stat().st_mode)
|
|
||||||
|
|
||||||
template_file.write_text('{"b": 2}\n', encoding="utf-8")
|
|
||||||
|
|
||||||
handle_vscode_settings(
|
|
||||||
template_file,
|
|
||||||
dest_file,
|
|
||||||
"settings.json",
|
|
||||||
verbose=False,
|
|
||||||
tracker=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
after_mode = stat.S_IMODE(dest_file.stat().st_mode)
|
|
||||||
assert after_mode == before_mode
|
|
||||||
@@ -32,7 +32,6 @@ from specify_cli.presets import (
|
|||||||
PresetCompatibilityError,
|
PresetCompatibilityError,
|
||||||
VALID_PRESET_TEMPLATE_TYPES,
|
VALID_PRESET_TEMPLATE_TYPES,
|
||||||
)
|
)
|
||||||
from specify_cli.extensions import ExtensionRegistry
|
|
||||||
|
|
||||||
|
|
||||||
# ===== Fixtures =====
|
# ===== Fixtures =====
|
||||||
@@ -369,172 +368,6 @@ class TestPresetRegistry:
|
|||||||
registry = PresetRegistry(packs_dir)
|
registry = PresetRegistry(packs_dir)
|
||||||
assert registry.get("nonexistent") is None
|
assert registry.get("nonexistent") is None
|
||||||
|
|
||||||
def test_restore(self, temp_dir):
|
|
||||||
"""Test restore() preserves timestamps exactly."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
# Create original entry with a specific timestamp
|
|
||||||
original_metadata = {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"source": "local",
|
|
||||||
"installed_at": "2025-01-15T10:30:00+00:00",
|
|
||||||
"enabled": True,
|
|
||||||
}
|
|
||||||
registry.restore("test-pack", original_metadata)
|
|
||||||
|
|
||||||
# Verify exact restoration
|
|
||||||
restored = registry.get("test-pack")
|
|
||||||
assert restored["installed_at"] == "2025-01-15T10:30:00+00:00"
|
|
||||||
assert restored["version"] == "1.0.0"
|
|
||||||
assert restored["enabled"] is True
|
|
||||||
|
|
||||||
def test_restore_rejects_none_metadata(self, temp_dir):
|
|
||||||
"""Test restore() raises ValueError for None metadata."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
|
||||||
registry.restore("test-pack", None)
|
|
||||||
|
|
||||||
def test_restore_rejects_non_dict_metadata(self, temp_dir):
|
|
||||||
"""Test restore() raises ValueError for non-dict metadata."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
|
||||||
registry.restore("test-pack", "not-a-dict")
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
|
||||||
registry.restore("test-pack", ["list", "not", "dict"])
|
|
||||||
|
|
||||||
def test_restore_uses_deep_copy(self, temp_dir):
|
|
||||||
"""Test restore() deep copies metadata to prevent mutation."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
original_metadata = {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"nested": {"key": "original"},
|
|
||||||
}
|
|
||||||
registry.restore("test-pack", original_metadata)
|
|
||||||
|
|
||||||
# Mutate the original metadata after restore
|
|
||||||
original_metadata["version"] = "MUTATED"
|
|
||||||
original_metadata["nested"]["key"] = "MUTATED"
|
|
||||||
|
|
||||||
# Registry should have the original values
|
|
||||||
stored = registry.get("test-pack")
|
|
||||||
assert stored["version"] == "1.0.0"
|
|
||||||
assert stored["nested"]["key"] == "original"
|
|
||||||
|
|
||||||
def test_get_returns_deep_copy(self, temp_dir):
|
|
||||||
"""Test that get() returns a deep copy to prevent mutation."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
registry.add("test-pack", {"version": "1.0.0", "nested": {"key": "original"}})
|
|
||||||
|
|
||||||
# Get and mutate the returned copy
|
|
||||||
metadata = registry.get("test-pack")
|
|
||||||
metadata["version"] = "MUTATED"
|
|
||||||
metadata["nested"]["key"] = "MUTATED"
|
|
||||||
|
|
||||||
# Original should be unchanged
|
|
||||||
fresh = registry.get("test-pack")
|
|
||||||
assert fresh["version"] == "1.0.0"
|
|
||||||
assert fresh["nested"]["key"] == "original"
|
|
||||||
|
|
||||||
def test_get_returns_none_for_corrupted_entry(self, temp_dir):
|
|
||||||
"""Test that get() returns None for corrupted (non-dict) entries."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
# Directly corrupt the registry with non-dict entries
|
|
||||||
registry.data["presets"]["corrupted-string"] = "not a dict"
|
|
||||||
registry.data["presets"]["corrupted-list"] = ["not", "a", "dict"]
|
|
||||||
registry.data["presets"]["corrupted-int"] = 42
|
|
||||||
registry._save()
|
|
||||||
|
|
||||||
# All corrupted entries should return None
|
|
||||||
assert registry.get("corrupted-string") is None
|
|
||||||
assert registry.get("corrupted-list") is None
|
|
||||||
assert registry.get("corrupted-int") is None
|
|
||||||
# Non-existent should also return None
|
|
||||||
assert registry.get("nonexistent") is None
|
|
||||||
|
|
||||||
def test_list_returns_deep_copy(self, temp_dir):
|
|
||||||
"""Test that list() returns deep copies to prevent mutation."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
registry.add("test-pack", {"version": "1.0.0", "nested": {"key": "original"}})
|
|
||||||
|
|
||||||
# Get list and mutate
|
|
||||||
all_packs = registry.list()
|
|
||||||
all_packs["test-pack"]["version"] = "MUTATED"
|
|
||||||
all_packs["test-pack"]["nested"]["key"] = "MUTATED"
|
|
||||||
|
|
||||||
# Original should be unchanged
|
|
||||||
fresh = registry.get("test-pack")
|
|
||||||
assert fresh["version"] == "1.0.0"
|
|
||||||
assert fresh["nested"]["key"] == "original"
|
|
||||||
|
|
||||||
def test_list_returns_empty_dict_for_corrupted_registry(self, temp_dir):
|
|
||||||
"""Test that list() returns empty dict when presets is not a dict."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
# Corrupt the registry - presets is a list instead of dict
|
|
||||||
registry.data["presets"] = ["not", "a", "dict"]
|
|
||||||
registry._save()
|
|
||||||
|
|
||||||
# list() should return empty dict, not crash
|
|
||||||
result = registry.list()
|
|
||||||
assert result == {}
|
|
||||||
|
|
||||||
def test_list_by_priority_excludes_disabled(self, temp_dir):
|
|
||||||
"""Test that list_by_priority excludes disabled presets by default."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
registry.add("pack-enabled", {"version": "1.0.0", "enabled": True, "priority": 5})
|
|
||||||
registry.add("pack-disabled", {"version": "1.0.0", "enabled": False, "priority": 1})
|
|
||||||
registry.add("pack-default", {"version": "1.0.0", "priority": 10}) # no enabled field = True
|
|
||||||
|
|
||||||
# Default: exclude disabled
|
|
||||||
by_priority = registry.list_by_priority()
|
|
||||||
pack_ids = [p[0] for p in by_priority]
|
|
||||||
assert "pack-enabled" in pack_ids
|
|
||||||
assert "pack-default" in pack_ids
|
|
||||||
assert "pack-disabled" not in pack_ids
|
|
||||||
|
|
||||||
def test_list_by_priority_includes_disabled_when_requested(self, temp_dir):
|
|
||||||
"""Test that list_by_priority includes disabled presets when requested."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
registry.add("pack-enabled", {"version": "1.0.0", "enabled": True, "priority": 5})
|
|
||||||
registry.add("pack-disabled", {"version": "1.0.0", "enabled": False, "priority": 1})
|
|
||||||
|
|
||||||
# Include disabled
|
|
||||||
by_priority = registry.list_by_priority(include_disabled=True)
|
|
||||||
pack_ids = [p[0] for p in by_priority]
|
|
||||||
assert "pack-enabled" in pack_ids
|
|
||||||
assert "pack-disabled" in pack_ids
|
|
||||||
# Disabled pack has lower priority number, so it comes first when included
|
|
||||||
assert pack_ids[0] == "pack-disabled"
|
|
||||||
|
|
||||||
|
|
||||||
# ===== PresetManager Tests =====
|
# ===== PresetManager Tests =====
|
||||||
|
|
||||||
@@ -740,24 +573,6 @@ class TestRegistryPriority:
|
|||||||
assert sorted_packs[0][0] == "pack-b"
|
assert sorted_packs[0][0] == "pack-b"
|
||||||
assert sorted_packs[1][0] == "pack-a"
|
assert sorted_packs[1][0] == "pack-a"
|
||||||
|
|
||||||
def test_list_by_priority_invalid_priority_defaults(self, temp_dir):
|
|
||||||
"""Malformed priority values fall back to the default priority."""
|
|
||||||
packs_dir = temp_dir / "packs"
|
|
||||||
packs_dir.mkdir()
|
|
||||||
registry = PresetRegistry(packs_dir)
|
|
||||||
|
|
||||||
registry.add("pack-high", {"version": "1.0.0", "priority": 1})
|
|
||||||
registry.data["presets"]["pack-invalid"] = {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"priority": "high",
|
|
||||||
}
|
|
||||||
registry._save()
|
|
||||||
|
|
||||||
sorted_packs = registry.list_by_priority()
|
|
||||||
|
|
||||||
assert [item[0] for item in sorted_packs] == ["pack-high", "pack-invalid"]
|
|
||||||
assert sorted_packs[1][1]["priority"] == 10
|
|
||||||
|
|
||||||
|
|
||||||
# ===== PresetResolver Tests =====
|
# ===== PresetResolver Tests =====
|
||||||
|
|
||||||
@@ -863,54 +678,11 @@ class TestPresetResolver:
|
|||||||
ext_template = ext_templates_dir / "custom-template.md"
|
ext_template = ext_templates_dir / "custom-template.md"
|
||||||
ext_template.write_text("# Extension Custom Template\n")
|
ext_template.write_text("# Extension Custom Template\n")
|
||||||
|
|
||||||
# Register extension in registry
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("my-ext", {"version": "1.0.0", "priority": 10})
|
|
||||||
|
|
||||||
resolver = PresetResolver(project_dir)
|
resolver = PresetResolver(project_dir)
|
||||||
result = resolver.resolve("custom-template")
|
result = resolver.resolve("custom-template")
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert "Extension Custom Template" in result.read_text()
|
assert "Extension Custom Template" in result.read_text()
|
||||||
|
|
||||||
def test_resolve_disabled_extension_templates_skipped(self, project_dir):
|
|
||||||
"""Test that disabled extension templates are not resolved."""
|
|
||||||
# Create extension with templates
|
|
||||||
ext_dir = project_dir / ".specify" / "extensions" / "disabled-ext"
|
|
||||||
ext_templates_dir = ext_dir / "templates"
|
|
||||||
ext_templates_dir.mkdir(parents=True)
|
|
||||||
ext_template = ext_templates_dir / "disabled-template.md"
|
|
||||||
ext_template.write_text("# Disabled Extension Template\n")
|
|
||||||
|
|
||||||
# Register extension as disabled
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("disabled-ext", {"version": "1.0.0", "priority": 1, "enabled": False})
|
|
||||||
|
|
||||||
# Template should NOT be resolved because extension is disabled
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
result = resolver.resolve("disabled-template")
|
|
||||||
assert result is None, "Disabled extension template should not be resolved"
|
|
||||||
|
|
||||||
def test_resolve_disabled_extension_not_picked_up_as_unregistered(self, project_dir):
|
|
||||||
"""Test that disabled extensions are not picked up via unregistered dir scan."""
|
|
||||||
# Create extension directory with templates
|
|
||||||
ext_dir = project_dir / ".specify" / "extensions" / "test-disabled-ext"
|
|
||||||
ext_templates_dir = ext_dir / "templates"
|
|
||||||
ext_templates_dir.mkdir(parents=True)
|
|
||||||
ext_template = ext_templates_dir / "unique-disabled-template.md"
|
|
||||||
ext_template.write_text("# Should Not Resolve\n")
|
|
||||||
|
|
||||||
# Register the extension but disable it
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("test-disabled-ext", {"version": "1.0.0", "enabled": False})
|
|
||||||
|
|
||||||
# Verify the template is NOT resolved (even though the directory exists)
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
result = resolver.resolve("unique-disabled-template")
|
|
||||||
assert result is None, "Disabled extension should not be picked up as unregistered"
|
|
||||||
|
|
||||||
def test_resolve_pack_over_extension(self, project_dir, pack_dir, temp_dir, valid_pack_data):
|
def test_resolve_pack_over_extension(self, project_dir, pack_dir, temp_dir, valid_pack_data):
|
||||||
"""Test that pack templates take priority over extension templates."""
|
"""Test that pack templates take priority over extension templates."""
|
||||||
# Create extension with templates
|
# Create extension with templates
|
||||||
@@ -969,15 +741,10 @@ class TestPresetResolver:
|
|||||||
ext_template = ext_templates_dir / "unique-template.md"
|
ext_template = ext_templates_dir / "unique-template.md"
|
||||||
ext_template.write_text("# Unique\n")
|
ext_template.write_text("# Unique\n")
|
||||||
|
|
||||||
# Register extension in registry
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("my-ext", {"version": "1.0.0", "priority": 10})
|
|
||||||
|
|
||||||
resolver = PresetResolver(project_dir)
|
resolver = PresetResolver(project_dir)
|
||||||
result = resolver.resolve_with_source("unique-template")
|
result = resolver.resolve_with_source("unique-template")
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert result["source"] == "extension:my-ext v1.0.0"
|
assert result["source"] == "extension:my-ext"
|
||||||
|
|
||||||
def test_resolve_with_source_not_found(self, project_dir):
|
def test_resolve_with_source_not_found(self, project_dir):
|
||||||
"""Test resolve_with_source for nonexistent template."""
|
"""Test resolve_with_source for nonexistent template."""
|
||||||
@@ -998,104 +765,6 @@ class TestPresetResolver:
|
|||||||
assert result is None
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
class TestExtensionPriorityResolution:
|
|
||||||
"""Test extension priority resolution with registered and unregistered extensions."""
|
|
||||||
|
|
||||||
def test_unregistered_beats_registered_with_lower_precedence(self, project_dir):
|
|
||||||
"""Unregistered extension (implicit priority 10) beats registered with priority 20."""
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Create registered extension with priority 20 (lower precedence than 10)
|
|
||||||
registered_dir = extensions_dir / "registered-ext"
|
|
||||||
(registered_dir / "templates").mkdir(parents=True)
|
|
||||||
(registered_dir / "templates" / "test-template.md").write_text("# From Registered\n")
|
|
||||||
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("registered-ext", {"version": "1.0.0", "priority": 20})
|
|
||||||
|
|
||||||
# Create unregistered extension directory (implicit priority 10)
|
|
||||||
unregistered_dir = extensions_dir / "unregistered-ext"
|
|
||||||
(unregistered_dir / "templates").mkdir(parents=True)
|
|
||||||
(unregistered_dir / "templates" / "test-template.md").write_text("# From Unregistered\n")
|
|
||||||
|
|
||||||
# Unregistered (priority 10) should beat registered (priority 20)
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
result = resolver.resolve("test-template")
|
|
||||||
assert result is not None
|
|
||||||
assert "From Unregistered" in result.read_text()
|
|
||||||
|
|
||||||
def test_registered_with_higher_precedence_beats_unregistered(self, project_dir):
|
|
||||||
"""Registered extension with priority 5 beats unregistered (implicit priority 10)."""
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Create registered extension with priority 5 (higher precedence than 10)
|
|
||||||
registered_dir = extensions_dir / "registered-ext"
|
|
||||||
(registered_dir / "templates").mkdir(parents=True)
|
|
||||||
(registered_dir / "templates" / "test-template.md").write_text("# From Registered\n")
|
|
||||||
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("registered-ext", {"version": "1.0.0", "priority": 5})
|
|
||||||
|
|
||||||
# Create unregistered extension directory (implicit priority 10)
|
|
||||||
unregistered_dir = extensions_dir / "unregistered-ext"
|
|
||||||
(unregistered_dir / "templates").mkdir(parents=True)
|
|
||||||
(unregistered_dir / "templates" / "test-template.md").write_text("# From Unregistered\n")
|
|
||||||
|
|
||||||
# Registered (priority 5) should beat unregistered (priority 10)
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
result = resolver.resolve("test-template")
|
|
||||||
assert result is not None
|
|
||||||
assert "From Registered" in result.read_text()
|
|
||||||
|
|
||||||
def test_unregistered_attribution_with_priority_ordering(self, project_dir):
|
|
||||||
"""Test resolve_with_source correctly attributes unregistered extension."""
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Create registered extension with priority 20
|
|
||||||
registered_dir = extensions_dir / "registered-ext"
|
|
||||||
(registered_dir / "templates").mkdir(parents=True)
|
|
||||||
(registered_dir / "templates" / "test-template.md").write_text("# From Registered\n")
|
|
||||||
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("registered-ext", {"version": "1.0.0", "priority": 20})
|
|
||||||
|
|
||||||
# Create unregistered extension (implicit priority 10)
|
|
||||||
unregistered_dir = extensions_dir / "unregistered-ext"
|
|
||||||
(unregistered_dir / "templates").mkdir(parents=True)
|
|
||||||
(unregistered_dir / "templates" / "test-template.md").write_text("# From Unregistered\n")
|
|
||||||
|
|
||||||
# Attribution should show unregistered extension
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
result = resolver.resolve_with_source("test-template")
|
|
||||||
assert result is not None
|
|
||||||
assert "unregistered-ext" in result["source"]
|
|
||||||
assert "(unregistered)" in result["source"]
|
|
||||||
|
|
||||||
def test_same_priority_sorted_alphabetically(self, project_dir):
|
|
||||||
"""Extensions with same priority are sorted alphabetically by ID."""
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Create two unregistered extensions (both implicit priority 10)
|
|
||||||
# "aaa-ext" should come before "zzz-ext" alphabetically
|
|
||||||
zzz_dir = extensions_dir / "zzz-ext"
|
|
||||||
(zzz_dir / "templates").mkdir(parents=True)
|
|
||||||
(zzz_dir / "templates" / "test-template.md").write_text("# From ZZZ\n")
|
|
||||||
|
|
||||||
aaa_dir = extensions_dir / "aaa-ext"
|
|
||||||
(aaa_dir / "templates").mkdir(parents=True)
|
|
||||||
(aaa_dir / "templates" / "test-template.md").write_text("# From AAA\n")
|
|
||||||
|
|
||||||
# AAA should win due to alphabetical ordering at same priority
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
result = resolver.resolve("test-template")
|
|
||||||
assert result is not None
|
|
||||||
assert "From AAA" in result.read_text()
|
|
||||||
|
|
||||||
|
|
||||||
# ===== PresetCatalog Tests =====
|
# ===== PresetCatalog Tests =====
|
||||||
|
|
||||||
|
|
||||||
@@ -1310,13 +979,8 @@ class TestIntegration:
|
|||||||
ext_templates_dir.mkdir(parents=True)
|
ext_templates_dir.mkdir(parents=True)
|
||||||
(ext_templates_dir / "spec-template.md").write_text("# Extension\n")
|
(ext_templates_dir / "spec-template.md").write_text("# Extension\n")
|
||||||
|
|
||||||
# Register extension in registry
|
|
||||||
extensions_dir = project_dir / ".specify" / "extensions"
|
|
||||||
ext_registry = ExtensionRegistry(extensions_dir)
|
|
||||||
ext_registry.add("my-ext", {"version": "1.0.0", "priority": 10})
|
|
||||||
|
|
||||||
result = resolver.resolve_with_source("spec-template")
|
result = resolver.resolve_with_source("spec-template")
|
||||||
assert result["source"] == "extension:my-ext v1.0.0"
|
assert result["source"] == "extension:my-ext"
|
||||||
|
|
||||||
# Install pack — should win over extension
|
# Install pack — should win over extension
|
||||||
manager = PresetManager(project_dir)
|
manager = PresetManager(project_dir)
|
||||||
@@ -2046,348 +1710,3 @@ class TestPresetSkills:
|
|||||||
|
|
||||||
metadata = manager.registry.get("self-test")
|
metadata = manager.registry.get("self-test")
|
||||||
assert metadata.get("registered_skills", []) == []
|
assert metadata.get("registered_skills", []) == []
|
||||||
|
|
||||||
|
|
||||||
class TestPresetSetPriority:
|
|
||||||
"""Test preset set-priority CLI command."""
|
|
||||||
|
|
||||||
def test_set_priority_changes_priority(self, project_dir, pack_dir):
|
|
||||||
"""Test set-priority command changes preset priority."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset with default priority
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
|
|
||||||
# Verify default priority
|
|
||||||
assert manager.registry.get("test-pack")["priority"] == 10
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "set-priority", "test-pack", "5"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0, result.output
|
|
||||||
assert "priority changed: 10 → 5" in result.output
|
|
||||||
|
|
||||||
# Reload registry to see updated value
|
|
||||||
manager2 = PresetManager(project_dir)
|
|
||||||
assert manager2.registry.get("test-pack")["priority"] == 5
|
|
||||||
|
|
||||||
def test_set_priority_same_value_no_change(self, project_dir, pack_dir):
|
|
||||||
"""Test set-priority with same value shows already set message."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset with priority 5
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5", priority=5)
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "set-priority", "test-pack", "5"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0, result.output
|
|
||||||
assert "already has priority 5" in result.output
|
|
||||||
|
|
||||||
def test_set_priority_invalid_value(self, project_dir, pack_dir):
|
|
||||||
"""Test set-priority rejects invalid priority values."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "set-priority", "test-pack", "0"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1, result.output
|
|
||||||
assert "Priority must be a positive integer" in result.output
|
|
||||||
|
|
||||||
def test_set_priority_not_installed(self, project_dir):
|
|
||||||
"""Test set-priority fails for non-installed preset."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "set-priority", "nonexistent", "5"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1, result.output
|
|
||||||
assert "not installed" in result.output.lower()
|
|
||||||
|
|
||||||
|
|
||||||
class TestPresetPriorityBackwardsCompatibility:
|
|
||||||
"""Test backwards compatibility for presets installed before priority feature."""
|
|
||||||
|
|
||||||
def test_legacy_preset_without_priority_field(self, temp_dir):
|
|
||||||
"""Presets installed before priority feature should default to 10."""
|
|
||||||
presets_dir = temp_dir / ".specify" / "presets"
|
|
||||||
presets_dir.mkdir(parents=True)
|
|
||||||
|
|
||||||
# Simulate legacy registry entry without priority field
|
|
||||||
registry = PresetRegistry(presets_dir)
|
|
||||||
registry.data["presets"]["legacy-pack"] = {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"source": "local",
|
|
||||||
"enabled": True,
|
|
||||||
"installed_at": "2025-01-01T00:00:00Z",
|
|
||||||
# No "priority" field - simulates pre-feature preset
|
|
||||||
}
|
|
||||||
registry._save()
|
|
||||||
|
|
||||||
# Reload registry
|
|
||||||
registry2 = PresetRegistry(presets_dir)
|
|
||||||
|
|
||||||
# list_by_priority should use default of 10
|
|
||||||
result = registry2.list_by_priority()
|
|
||||||
assert len(result) == 1
|
|
||||||
assert result[0][0] == "legacy-pack"
|
|
||||||
# Priority defaults to 10 and is normalized in returned metadata
|
|
||||||
assert result[0][1]["priority"] == 10
|
|
||||||
|
|
||||||
def test_legacy_preset_in_list_installed(self, project_dir, pack_dir):
|
|
||||||
"""list_installed returns priority=10 for legacy presets without priority field."""
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
|
|
||||||
# Install preset normally
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
|
|
||||||
# Manually remove priority to simulate legacy preset
|
|
||||||
pack_data = manager.registry.data["presets"]["test-pack"]
|
|
||||||
del pack_data["priority"]
|
|
||||||
manager.registry._save()
|
|
||||||
|
|
||||||
# list_installed should still return priority=10
|
|
||||||
installed = manager.list_installed()
|
|
||||||
assert len(installed) == 1
|
|
||||||
assert installed[0]["priority"] == 10
|
|
||||||
|
|
||||||
def test_mixed_legacy_and_new_presets_ordering(self, temp_dir):
|
|
||||||
"""Legacy presets (no priority) sort with default=10 among prioritized presets."""
|
|
||||||
presets_dir = temp_dir / ".specify" / "presets"
|
|
||||||
presets_dir.mkdir(parents=True)
|
|
||||||
|
|
||||||
registry = PresetRegistry(presets_dir)
|
|
||||||
|
|
||||||
# Add preset with explicit priority=5
|
|
||||||
registry.add("pack-with-priority", {"version": "1.0.0", "priority": 5})
|
|
||||||
|
|
||||||
# Add legacy preset without priority (manually)
|
|
||||||
registry.data["presets"]["legacy-pack"] = {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"source": "local",
|
|
||||||
"enabled": True,
|
|
||||||
# No priority field
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add another preset with priority=15
|
|
||||||
registry.add("low-priority-pack", {"version": "1.0.0", "priority": 15})
|
|
||||||
registry._save()
|
|
||||||
|
|
||||||
# Reload and check ordering
|
|
||||||
registry2 = PresetRegistry(presets_dir)
|
|
||||||
sorted_presets = registry2.list_by_priority()
|
|
||||||
|
|
||||||
# Should be: pack-with-priority (5), legacy-pack (default 10), low-priority-pack (15)
|
|
||||||
assert [p[0] for p in sorted_presets] == [
|
|
||||||
"pack-with-priority",
|
|
||||||
"legacy-pack",
|
|
||||||
"low-priority-pack",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class TestPresetEnableDisable:
|
|
||||||
"""Test preset enable/disable CLI commands."""
|
|
||||||
|
|
||||||
def test_disable_preset(self, project_dir, pack_dir):
|
|
||||||
"""Test disable command sets enabled=False."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
|
|
||||||
# Verify initially enabled
|
|
||||||
assert manager.registry.get("test-pack").get("enabled", True) is True
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "disable", "test-pack"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0, result.output
|
|
||||||
assert "disabled" in result.output.lower()
|
|
||||||
|
|
||||||
# Reload registry to see updated value
|
|
||||||
manager2 = PresetManager(project_dir)
|
|
||||||
assert manager2.registry.get("test-pack")["enabled"] is False
|
|
||||||
|
|
||||||
def test_enable_preset(self, project_dir, pack_dir):
|
|
||||||
"""Test enable command sets enabled=True."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset and disable it
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
manager.registry.update("test-pack", {"enabled": False})
|
|
||||||
|
|
||||||
# Verify disabled
|
|
||||||
assert manager.registry.get("test-pack")["enabled"] is False
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "enable", "test-pack"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0, result.output
|
|
||||||
assert "enabled" in result.output.lower()
|
|
||||||
|
|
||||||
# Reload registry to see updated value
|
|
||||||
manager2 = PresetManager(project_dir)
|
|
||||||
assert manager2.registry.get("test-pack")["enabled"] is True
|
|
||||||
|
|
||||||
def test_disable_already_disabled(self, project_dir, pack_dir):
|
|
||||||
"""Test disable on already disabled preset shows warning."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset and disable it
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
manager.registry.update("test-pack", {"enabled": False})
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "disable", "test-pack"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0, result.output
|
|
||||||
assert "already disabled" in result.output.lower()
|
|
||||||
|
|
||||||
def test_enable_already_enabled(self, project_dir, pack_dir):
|
|
||||||
"""Test enable on already enabled preset shows warning."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset (enabled by default)
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "enable", "test-pack"])
|
|
||||||
|
|
||||||
assert result.exit_code == 0, result.output
|
|
||||||
assert "already enabled" in result.output.lower()
|
|
||||||
|
|
||||||
def test_disable_not_installed(self, project_dir):
|
|
||||||
"""Test disable fails for non-installed preset."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "disable", "nonexistent"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1, result.output
|
|
||||||
assert "not installed" in result.output.lower()
|
|
||||||
|
|
||||||
def test_enable_not_installed(self, project_dir):
|
|
||||||
"""Test enable fails for non-installed preset."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "enable", "nonexistent"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1, result.output
|
|
||||||
assert "not installed" in result.output.lower()
|
|
||||||
|
|
||||||
def test_disabled_preset_excluded_from_resolution(self, project_dir, pack_dir):
|
|
||||||
"""Test that disabled presets are excluded from template resolution."""
|
|
||||||
# Install preset with a template
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
|
|
||||||
# Create a template in the preset directory
|
|
||||||
preset_template = project_dir / ".specify" / "presets" / "test-pack" / "templates" / "test-template.md"
|
|
||||||
preset_template.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
preset_template.write_text("# Template from test-pack")
|
|
||||||
|
|
||||||
resolver = PresetResolver(project_dir)
|
|
||||||
|
|
||||||
# Template should be found when enabled
|
|
||||||
result = resolver.resolve("test-template", "template")
|
|
||||||
assert result is not None
|
|
||||||
assert "test-pack" in str(result)
|
|
||||||
|
|
||||||
# Disable the preset
|
|
||||||
manager.registry.update("test-pack", {"enabled": False})
|
|
||||||
|
|
||||||
# Template should NOT be found when disabled
|
|
||||||
resolver2 = PresetResolver(project_dir)
|
|
||||||
result2 = resolver2.resolve("test-template", "template")
|
|
||||||
assert result2 is None
|
|
||||||
|
|
||||||
def test_enable_corrupted_registry_entry(self, project_dir, pack_dir):
|
|
||||||
"""Test enable fails gracefully for corrupted registry entry."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset then corrupt the registry entry
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
manager.registry.data["presets"]["test-pack"] = "corrupted-string"
|
|
||||||
manager.registry._save()
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "enable", "test-pack"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1
|
|
||||||
assert "corrupted state" in result.output.lower()
|
|
||||||
|
|
||||||
def test_disable_corrupted_registry_entry(self, project_dir, pack_dir):
|
|
||||||
"""Test disable fails gracefully for corrupted registry entry."""
|
|
||||||
from typer.testing import CliRunner
|
|
||||||
from unittest.mock import patch
|
|
||||||
from specify_cli import app
|
|
||||||
|
|
||||||
runner = CliRunner()
|
|
||||||
|
|
||||||
# Install preset then corrupt the registry entry
|
|
||||||
manager = PresetManager(project_dir)
|
|
||||||
manager.install_from_directory(pack_dir, "0.1.5")
|
|
||||||
manager.registry.data["presets"]["test-pack"] = "corrupted-string"
|
|
||||||
manager.registry._save()
|
|
||||||
|
|
||||||
with patch.object(Path, "cwd", return_value=project_dir):
|
|
||||||
result = runner.invoke(app, ["preset", "disable", "test-pack"])
|
|
||||||
|
|
||||||
assert result.exit_code == 1
|
|
||||||
assert "corrupted state" in result.output.lower()
|
|
||||||
|
|||||||
@@ -1,252 +0,0 @@
|
|||||||
"""
|
|
||||||
Pytest tests for timestamp-based branch naming in create-new-feature.sh and common.sh.
|
|
||||||
|
|
||||||
Converted from tests/test_timestamp_branches.sh so they are discovered by `uv run pytest`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
|
||||||
CREATE_FEATURE = PROJECT_ROOT / "scripts" / "bash" / "create-new-feature.sh"
|
|
||||||
COMMON_SH = PROJECT_ROOT / "scripts" / "bash" / "common.sh"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def git_repo(tmp_path: Path) -> Path:
|
|
||||||
"""Create a temp git repo with scripts and .specify dir."""
|
|
||||||
subprocess.run(["git", "init", "-q"], cwd=tmp_path, check=True)
|
|
||||||
subprocess.run(
|
|
||||||
["git", "config", "user.email", "test@example.com"], cwd=tmp_path, check=True
|
|
||||||
)
|
|
||||||
subprocess.run(
|
|
||||||
["git", "config", "user.name", "Test User"], cwd=tmp_path, check=True
|
|
||||||
)
|
|
||||||
subprocess.run(
|
|
||||||
["git", "commit", "--allow-empty", "-m", "init", "-q"],
|
|
||||||
cwd=tmp_path,
|
|
||||||
check=True,
|
|
||||||
)
|
|
||||||
scripts_dir = tmp_path / "scripts" / "bash"
|
|
||||||
scripts_dir.mkdir(parents=True)
|
|
||||||
shutil.copy(CREATE_FEATURE, scripts_dir / "create-new-feature.sh")
|
|
||||||
shutil.copy(COMMON_SH, scripts_dir / "common.sh")
|
|
||||||
(tmp_path / ".specify" / "templates").mkdir(parents=True)
|
|
||||||
return tmp_path
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def no_git_dir(tmp_path: Path) -> Path:
|
|
||||||
"""Create a temp directory without git, but with scripts."""
|
|
||||||
scripts_dir = tmp_path / "scripts" / "bash"
|
|
||||||
scripts_dir.mkdir(parents=True)
|
|
||||||
shutil.copy(CREATE_FEATURE, scripts_dir / "create-new-feature.sh")
|
|
||||||
shutil.copy(COMMON_SH, scripts_dir / "common.sh")
|
|
||||||
(tmp_path / ".specify" / "templates").mkdir(parents=True)
|
|
||||||
return tmp_path
|
|
||||||
|
|
||||||
|
|
||||||
def run_script(cwd: Path, *args: str) -> subprocess.CompletedProcess:
|
|
||||||
"""Run create-new-feature.sh with given args."""
|
|
||||||
cmd = ["bash", "scripts/bash/create-new-feature.sh", *args]
|
|
||||||
return subprocess.run(
|
|
||||||
cmd,
|
|
||||||
cwd=cwd,
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def source_and_call(func_call: str, env: dict | None = None) -> subprocess.CompletedProcess:
|
|
||||||
"""Source common.sh and call a function."""
|
|
||||||
cmd = f'source "{COMMON_SH}" && {func_call}'
|
|
||||||
return subprocess.run(
|
|
||||||
["bash", "-c", cmd],
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
env={**os.environ, **(env or {})},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ── Timestamp Branch Tests ───────────────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestTimestampBranch:
|
|
||||||
def test_timestamp_creates_branch(self, git_repo: Path):
|
|
||||||
"""Test 1: --timestamp creates branch with YYYYMMDD-HHMMSS prefix."""
|
|
||||||
result = run_script(git_repo, "--timestamp", "--short-name", "user-auth", "Add user auth")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
branch = None
|
|
||||||
for line in result.stdout.splitlines():
|
|
||||||
if line.startswith("BRANCH_NAME:"):
|
|
||||||
branch = line.split(":", 1)[1].strip()
|
|
||||||
assert branch is not None
|
|
||||||
assert re.match(r"^\d{8}-\d{6}-user-auth$", branch), f"unexpected branch: {branch}"
|
|
||||||
|
|
||||||
def test_number_and_timestamp_warns(self, git_repo: Path):
|
|
||||||
"""Test 3: --number + --timestamp warns and uses timestamp."""
|
|
||||||
result = run_script(git_repo, "--timestamp", "--number", "42", "--short-name", "feat", "Feature")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
assert "Warning" in result.stderr and "--number" in result.stderr
|
|
||||||
|
|
||||||
def test_json_output_keys(self, git_repo: Path):
|
|
||||||
"""Test 4: JSON output contains expected keys."""
|
|
||||||
import json
|
|
||||||
result = run_script(git_repo, "--json", "--timestamp", "--short-name", "api", "API feature")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
data = json.loads(result.stdout)
|
|
||||||
for key in ("BRANCH_NAME", "SPEC_FILE", "FEATURE_NUM"):
|
|
||||||
assert key in data, f"missing {key} in JSON: {data}"
|
|
||||||
assert re.match(r"^\d{8}-\d{6}$", data["FEATURE_NUM"])
|
|
||||||
|
|
||||||
def test_long_name_truncation(self, git_repo: Path):
|
|
||||||
"""Test 5: Long branch name is truncated to <= 244 chars."""
|
|
||||||
long_name = "a-" * 150 + "end"
|
|
||||||
result = run_script(git_repo, "--timestamp", "--short-name", long_name, "Long feature")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
branch = None
|
|
||||||
for line in result.stdout.splitlines():
|
|
||||||
if line.startswith("BRANCH_NAME:"):
|
|
||||||
branch = line.split(":", 1)[1].strip()
|
|
||||||
assert branch is not None
|
|
||||||
assert len(branch) <= 244
|
|
||||||
assert re.match(r"^\d{8}-\d{6}-", branch)
|
|
||||||
|
|
||||||
|
|
||||||
# ── Sequential Branch Tests ──────────────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestSequentialBranch:
|
|
||||||
def test_sequential_default_with_existing_specs(self, git_repo: Path):
|
|
||||||
"""Test 2: Sequential default with existing specs."""
|
|
||||||
(git_repo / "specs" / "001-first-feat").mkdir(parents=True)
|
|
||||||
(git_repo / "specs" / "002-second-feat").mkdir(parents=True)
|
|
||||||
result = run_script(git_repo, "--short-name", "new-feat", "New feature")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
branch = None
|
|
||||||
for line in result.stdout.splitlines():
|
|
||||||
if line.startswith("BRANCH_NAME:"):
|
|
||||||
branch = line.split(":", 1)[1].strip()
|
|
||||||
assert branch is not None
|
|
||||||
assert re.match(r"^\d{3}-new-feat$", branch), f"unexpected branch: {branch}"
|
|
||||||
|
|
||||||
def test_sequential_ignores_timestamp_dirs(self, git_repo: Path):
|
|
||||||
"""Sequential numbering skips timestamp dirs when computing next number."""
|
|
||||||
(git_repo / "specs" / "002-first-feat").mkdir(parents=True)
|
|
||||||
(git_repo / "specs" / "20260319-143022-ts-feat").mkdir(parents=True)
|
|
||||||
result = run_script(git_repo, "--short-name", "next-feat", "Next feature")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
branch = None
|
|
||||||
for line in result.stdout.splitlines():
|
|
||||||
if line.startswith("BRANCH_NAME:"):
|
|
||||||
branch = line.split(":", 1)[1].strip()
|
|
||||||
assert branch == "003-next-feat", f"expected 003-next-feat, got: {branch}"
|
|
||||||
|
|
||||||
|
|
||||||
# ── check_feature_branch Tests ───────────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckFeatureBranch:
|
|
||||||
def test_accepts_timestamp_branch(self):
|
|
||||||
"""Test 6: check_feature_branch accepts timestamp branch."""
|
|
||||||
result = source_and_call('check_feature_branch "20260319-143022-feat" "true"')
|
|
||||||
assert result.returncode == 0
|
|
||||||
|
|
||||||
def test_accepts_sequential_branch(self):
|
|
||||||
"""Test 7: check_feature_branch accepts sequential branch."""
|
|
||||||
result = source_and_call('check_feature_branch "004-feat" "true"')
|
|
||||||
assert result.returncode == 0
|
|
||||||
|
|
||||||
def test_rejects_main(self):
|
|
||||||
"""Test 8: check_feature_branch rejects main."""
|
|
||||||
result = source_and_call('check_feature_branch "main" "true"')
|
|
||||||
assert result.returncode != 0
|
|
||||||
|
|
||||||
def test_rejects_partial_timestamp(self):
|
|
||||||
"""Test 9: check_feature_branch rejects 7-digit date."""
|
|
||||||
result = source_and_call('check_feature_branch "2026031-143022-feat" "true"')
|
|
||||||
assert result.returncode != 0
|
|
||||||
|
|
||||||
|
|
||||||
# ── find_feature_dir_by_prefix Tests ─────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestFindFeatureDirByPrefix:
|
|
||||||
def test_timestamp_branch(self, tmp_path: Path):
|
|
||||||
"""Test 10: find_feature_dir_by_prefix with timestamp branch."""
|
|
||||||
(tmp_path / "specs" / "20260319-143022-user-auth").mkdir(parents=True)
|
|
||||||
result = source_and_call(
|
|
||||||
f'find_feature_dir_by_prefix "{tmp_path}" "20260319-143022-user-auth"'
|
|
||||||
)
|
|
||||||
assert result.returncode == 0
|
|
||||||
assert result.stdout.strip() == f"{tmp_path}/specs/20260319-143022-user-auth"
|
|
||||||
|
|
||||||
def test_cross_branch_prefix(self, tmp_path: Path):
|
|
||||||
"""Test 11: find_feature_dir_by_prefix cross-branch (different suffix, same timestamp)."""
|
|
||||||
(tmp_path / "specs" / "20260319-143022-original-feat").mkdir(parents=True)
|
|
||||||
result = source_and_call(
|
|
||||||
f'find_feature_dir_by_prefix "{tmp_path}" "20260319-143022-different-name"'
|
|
||||||
)
|
|
||||||
assert result.returncode == 0
|
|
||||||
assert result.stdout.strip() == f"{tmp_path}/specs/20260319-143022-original-feat"
|
|
||||||
|
|
||||||
|
|
||||||
# ── get_current_branch Tests ─────────────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestGetCurrentBranch:
|
|
||||||
def test_env_var(self):
|
|
||||||
"""Test 12: get_current_branch returns SPECIFY_FEATURE env var."""
|
|
||||||
result = source_and_call("get_current_branch", env={"SPECIFY_FEATURE": "my-custom-branch"})
|
|
||||||
assert result.stdout.strip() == "my-custom-branch"
|
|
||||||
|
|
||||||
|
|
||||||
# ── No-git Tests ─────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestNoGitTimestamp:
|
|
||||||
def test_no_git_timestamp(self, no_git_dir: Path):
|
|
||||||
"""Test 13: No-git repo + timestamp creates spec dir with warning."""
|
|
||||||
result = run_script(no_git_dir, "--timestamp", "--short-name", "no-git-feat", "No git feature")
|
|
||||||
assert result.returncode == 0, result.stderr
|
|
||||||
spec_dirs = list((no_git_dir / "specs").iterdir()) if (no_git_dir / "specs").exists() else []
|
|
||||||
assert len(spec_dirs) > 0, "spec dir not created"
|
|
||||||
assert "git" in result.stderr.lower() or "warning" in result.stderr.lower()
|
|
||||||
|
|
||||||
|
|
||||||
# ── E2E Flow Tests ───────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
class TestE2EFlow:
|
|
||||||
def test_e2e_timestamp(self, git_repo: Path):
|
|
||||||
"""Test 14: E2E timestamp flow — branch, dir, validation."""
|
|
||||||
run_script(git_repo, "--timestamp", "--short-name", "e2e-ts", "E2E timestamp test")
|
|
||||||
branch = subprocess.run(
|
|
||||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
||||||
cwd=git_repo,
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
).stdout.strip()
|
|
||||||
assert re.match(r"^\d{8}-\d{6}-e2e-ts$", branch), f"branch: {branch}"
|
|
||||||
assert (git_repo / "specs" / branch).is_dir()
|
|
||||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
|
||||||
assert val.returncode == 0
|
|
||||||
|
|
||||||
def test_e2e_sequential(self, git_repo: Path):
|
|
||||||
"""Test 15: E2E sequential flow (regression guard)."""
|
|
||||||
run_script(git_repo, "--short-name", "seq-feat", "Sequential feature")
|
|
||||||
branch = subprocess.run(
|
|
||||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
||||||
cwd=git_repo,
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
).stdout.strip()
|
|
||||||
assert re.match(r"^\d{3}-seq-feat$", branch), f"branch: {branch}"
|
|
||||||
assert (git_repo / "specs" / branch).is_dir()
|
|
||||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
|
||||||
assert val.returncode == 0
|
|
||||||
Reference in New Issue
Block a user