Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
f4fe2d35a9 chore: bump version to 0.1.13 2026-03-03 22:06:42 +00:00
32 changed files with 234 additions and 2905 deletions

View File

@@ -8,15 +8,15 @@ run_command() {
local command_to_run="$*"
local output
local exit_code
# Capture all output (stdout and stderr)
output=$(eval "$command_to_run" 2>&1) || exit_code=$?
exit_code=${exit_code:-0}
if [ $exit_code -ne 0 ]; then
echo -e "\033[0;31m[ERROR] Command failed (Exit Code $exit_code): $command_to_run\033[0m" >&2
echo -e "\033[0;31m$output\033[0m" >&2
exit $exit_code
fi
}
@@ -53,7 +53,7 @@ echo "✅ Done"
echo -e "\n🤖 Installing Kiro CLI..."
# https://kiro.dev/docs/cli/
KIRO_INSTALLER_URL="https://kiro.dev/install.sh"
KIRO_INSTALLER_URL="https://cli.kiro.dev/install"
KIRO_INSTALLER_SHA256="7487a65cf310b7fb59b357c4b5e6e3f3259d383f4394ecedb39acf70f307cffb"
KIRO_INSTALLER_PATH="$(mktemp)"
@@ -80,11 +80,6 @@ fi
run_command "$kiro_binary --help > /dev/null"
echo "✅ Done"
echo -e "\n🤖 Installing Kimi CLI..."
# https://code.kimi.com
run_command "pipx install kimi-cli"
echo "✅ Done"
echo -e "\n🤖 Installing CodeBuddy CLI..."
run_command "npm install -g @tencent-ai/codebuddy-code@latest"
echo "✅ Done"

6
.github/workflows/scripts/create-github-release.sh vendored Executable file → Normal file
View File

@@ -46,18 +46,12 @@ gh release create "$VERSION" \
.genreleases/spec-kit-template-amp-ps-"$VERSION".zip \
.genreleases/spec-kit-template-shai-sh-"$VERSION".zip \
.genreleases/spec-kit-template-shai-ps-"$VERSION".zip \
.genreleases/spec-kit-template-tabnine-sh-"$VERSION".zip \
.genreleases/spec-kit-template-tabnine-ps-"$VERSION".zip \
.genreleases/spec-kit-template-kiro-cli-sh-"$VERSION".zip \
.genreleases/spec-kit-template-kiro-cli-ps-"$VERSION".zip \
.genreleases/spec-kit-template-agy-sh-"$VERSION".zip \
.genreleases/spec-kit-template-agy-ps-"$VERSION".zip \
.genreleases/spec-kit-template-bob-sh-"$VERSION".zip \
.genreleases/spec-kit-template-bob-ps-"$VERSION".zip \
.genreleases/spec-kit-template-vibe-sh-"$VERSION".zip \
.genreleases/spec-kit-template-vibe-ps-"$VERSION".zip \
.genreleases/spec-kit-template-kimi-sh-"$VERSION".zip \
.genreleases/spec-kit-template-kimi-ps-"$VERSION".zip \
.genreleases/spec-kit-template-generic-sh-"$VERSION".zip \
.genreleases/spec-kit-template-generic-ps-"$VERSION".zip \
--title "Spec Kit Templates - $VERSION_NO_V" \

View File

@@ -8,13 +8,13 @@
.DESCRIPTION
create-release-packages.ps1 (workflow-local)
Build Spec Kit template release archives for each supported AI assistant and script type.
.PARAMETER Version
Version string with leading 'v' (e.g., v0.2.0)
.PARAMETER Agents
Comma or space separated subset of agents to build (default: all)
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, kimi, generic
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, agy, generic
.PARAMETER Scripts
Comma or space separated subset of script types to build (default: both)
@@ -33,10 +33,10 @@
param(
[Parameter(Mandatory=$true, Position=0)]
[string]$Version,
[Parameter(Mandatory=$false)]
[string]$Agents = "",
[Parameter(Mandatory=$false)]
[string]$Scripts = ""
)
@@ -60,7 +60,7 @@ New-Item -ItemType Directory -Path $GenReleasesDir -Force | Out-Null
function Rewrite-Paths {
param([string]$Content)
$Content = $Content -replace '(/?)\bmemory/', '.specify/memory/'
$Content = $Content -replace '(/?)\bscripts/', '.specify/scripts/'
$Content = $Content -replace '(/?)\btemplates/', '.specify/templates/'
@@ -75,55 +75,55 @@ function Generate-Commands {
[string]$OutputDir,
[string]$ScriptVariant
)
New-Item -ItemType Directory -Path $OutputDir -Force | Out-Null
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
foreach ($template in $templates) {
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
# Read file content and normalize line endings
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
# Extract description from YAML frontmatter
$description = ""
if ($fileContent -match '(?m)^description:\s*(.+)$') {
$description = $matches[1]
}
# Extract script command from YAML frontmatter
$scriptCommand = ""
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
$scriptCommand = $matches[1]
}
if ([string]::IsNullOrEmpty($scriptCommand)) {
Write-Warning "No script command found for $ScriptVariant in $($template.Name)"
$scriptCommand = "(Missing script command for $ScriptVariant)"
}
# Extract agent_script command from YAML frontmatter if present
$agentScriptCommand = ""
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
$agentScriptCommand = $matches[1].Trim()
}
# Replace {SCRIPT} placeholder with the script command
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
}
# Remove the scripts: and agent_scripts: sections from frontmatter
$lines = $body -split "`n"
$outputLines = @()
$inFrontmatter = $false
$skipScripts = $false
$dashCount = 0
foreach ($line in $lines) {
if ($line -match '^---$') {
$outputLines += $line
@@ -135,7 +135,7 @@ function Generate-Commands {
}
continue
}
if ($inFrontmatter) {
if ($line -match '^(scripts|agent_scripts):$') {
$skipScripts = $true
@@ -148,20 +148,20 @@ function Generate-Commands {
continue
}
}
$outputLines += $line
}
$body = $outputLines -join "`n"
# Apply other substitutions
$body = $body -replace '\{ARGS\}', $ArgFormat
$body = $body -replace '__AGENT__', $Agent
$body = Rewrite-Paths -Content $body
# Generate output file based on extension
$outputFile = Join-Path $OutputDir "speckit.$name.$Extension"
switch ($Extension) {
'toml' {
$body = $body -replace '\\', '\\'
@@ -183,15 +183,15 @@ function Generate-CopilotPrompts {
[string]$AgentsDir,
[string]$PromptsDir
)
New-Item -ItemType Directory -Path $PromptsDir -Force | Out-Null
$agentFiles = Get-ChildItem -Path "$AgentsDir/speckit.*.agent.md" -File -ErrorAction SilentlyContinue
foreach ($agentFile in $agentFiles) {
$basename = $agentFile.Name -replace '\.agent\.md$', ''
$promptFile = Join-Path $PromptsDir "$basename.prompt.md"
$content = @"
---
agent: $basename
@@ -201,118 +201,31 @@ agent: $basename
}
}
# Create Kimi Code skills in .kimi/skills/<name>/SKILL.md format.
# Kimi CLI discovers skills as directories containing a SKILL.md file,
# invoked with /skill:<name> (e.g. /skill:speckit.specify).
function New-KimiSkills {
param(
[string]$SkillsDir,
[string]$ScriptVariant
)
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
foreach ($template in $templates) {
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
$skillName = "speckit.$name"
$skillDir = Join-Path $SkillsDir $skillName
New-Item -ItemType Directory -Force -Path $skillDir | Out-Null
$fileContent = (Get-Content -Path $template.FullName -Raw) -replace "`r`n", "`n"
# Extract description
$description = "Spec Kit: $name workflow"
if ($fileContent -match '(?m)^description:\s*(.+)$') {
$description = $matches[1]
}
# Extract script command
$scriptCommand = "(Missing script command for $ScriptVariant)"
if ($fileContent -match "(?m)^\s*${ScriptVariant}:\s*(.+)$") {
$scriptCommand = $matches[1]
}
# Extract agent_script command from frontmatter if present
$agentScriptCommand = ""
if ($fileContent -match "(?ms)agent_scripts:.*?^\s*${ScriptVariant}:\s*(.+?)$") {
$agentScriptCommand = $matches[1].Trim()
}
# Replace {SCRIPT}, strip scripts sections, rewrite paths
$body = $fileContent -replace '\{SCRIPT\}', $scriptCommand
if (-not [string]::IsNullOrEmpty($agentScriptCommand)) {
$body = $body -replace '\{AGENT_SCRIPT\}', $agentScriptCommand
}
$lines = $body -split "`n"
$outputLines = @()
$inFrontmatter = $false
$skipScripts = $false
$dashCount = 0
foreach ($line in $lines) {
if ($line -match '^---$') {
$outputLines += $line
$dashCount++
$inFrontmatter = ($dashCount -eq 1)
continue
}
if ($inFrontmatter) {
if ($line -match '^(scripts|agent_scripts):$') { $skipScripts = $true; continue }
if ($line -match '^[a-zA-Z].*:' -and $skipScripts) { $skipScripts = $false }
if ($skipScripts -and $line -match '^\s+') { continue }
}
$outputLines += $line
}
$body = $outputLines -join "`n"
$body = $body -replace '\{ARGS\}', '$ARGUMENTS'
$body = $body -replace '__AGENT__', 'kimi'
$body = Rewrite-Paths -Content $body
# Strip existing frontmatter, keep only body
$templateBody = ""
$fmCount = 0
$inBody = $false
foreach ($line in ($body -split "`n")) {
if ($line -match '^---$') {
$fmCount++
if ($fmCount -eq 2) { $inBody = $true }
continue
}
if ($inBody) { $templateBody += "$line`n" }
}
$skillContent = "---`nname: `"$skillName`"`ndescription: `"$description`"`n---`n`n$templateBody"
Set-Content -Path (Join-Path $skillDir "SKILL.md") -Value $skillContent -NoNewline
}
}
function Build-Variant {
param(
[string]$Agent,
[string]$Script
)
$baseDir = Join-Path $GenReleasesDir "sdd-${Agent}-package-${Script}"
Write-Host "Building $Agent ($Script) package..."
New-Item -ItemType Directory -Path $baseDir -Force | Out-Null
# Copy base structure but filter scripts by variant
$specDir = Join-Path $baseDir ".specify"
New-Item -ItemType Directory -Path $specDir -Force | Out-Null
# Copy memory directory
if (Test-Path "memory") {
Copy-Item -Path "memory" -Destination $specDir -Recurse -Force
Write-Host "Copied memory -> .specify"
}
# Only copy the relevant script variant directory
if (Test-Path "scripts") {
$scriptsDestDir = Join-Path $specDir "scripts"
New-Item -ItemType Directory -Path $scriptsDestDir -Force | Out-Null
switch ($Script) {
'sh' {
if (Test-Path "scripts/bash") {
@@ -327,17 +240,18 @@ function Build-Variant {
}
}
}
# Copy any script files that aren't in variant-specific directories
Get-ChildItem -Path "scripts" -File -ErrorAction SilentlyContinue | ForEach-Object {
Copy-Item -Path $_.FullName -Destination $scriptsDestDir -Force
}
}
# Copy templates (excluding commands directory and vscode-settings.json)
if (Test-Path "templates") {
$templatesDestDir = Join-Path $specDir "templates"
New-Item -ItemType Directory -Path $templatesDestDir -Force | Out-Null
Get-ChildItem -Path "templates" -Recurse -File | Where-Object {
$_.FullName -notmatch 'templates[/\\]commands[/\\]' -and $_.Name -ne 'vscode-settings.json'
} | ForEach-Object {
@@ -349,7 +263,7 @@ function Build-Variant {
}
Write-Host "Copied templates -> .specify/templates"
}
# Generate agent-specific command files
switch ($Agent) {
'claude' {
@@ -366,10 +280,12 @@ function Build-Variant {
'copilot' {
$agentsDir = Join-Path $baseDir ".github/agents"
Generate-Commands -Agent 'copilot' -Extension 'agent.md' -ArgFormat '$ARGUMENTS' -OutputDir $agentsDir -ScriptVariant $Script
# Generate companion prompt files
$promptsDir = Join-Path $baseDir ".github/prompts"
Generate-CopilotPrompts -AgentsDir $agentsDir -PromptsDir $promptsDir
# Create VS Code workspace settings
$vscodeDir = Join-Path $baseDir ".vscode"
New-Item -ItemType Directory -Path $vscodeDir -Force | Out-Null
if (Test-Path "templates/vscode-settings.json") {
@@ -435,25 +351,10 @@ function Build-Variant {
$cmdDir = Join-Path $baseDir ".shai/commands"
Generate-Commands -Agent 'shai' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'tabnine' {
$cmdDir = Join-Path $baseDir ".tabnine/agent/commands"
Generate-Commands -Agent 'tabnine' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
$tabnineTemplate = Join-Path 'agent_templates' 'tabnine/TABNINE.md'
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
}
'agy' {
$cmdDir = Join-Path $baseDir ".agent/workflows"
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'vibe' {
$cmdDir = Join-Path $baseDir ".vibe/prompts"
Generate-Commands -Agent 'vibe' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
}
'kimi' {
$skillsDir = Join-Path $baseDir ".kimi/skills"
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
New-KimiSkills -SkillsDir $skillsDir -ScriptVariant $Script
}
'generic' {
$cmdDir = Join-Path $baseDir ".speckit/commands"
Generate-Commands -Agent 'generic' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
@@ -462,7 +363,7 @@ function Build-Variant {
throw "Unsupported agent '$Agent'."
}
}
# Create zip archive
$zipFile = Join-Path $GenReleasesDir "spec-kit-template-${Agent}-${Script}-${Version}.zip"
Compress-Archive -Path "$baseDir/*" -DestinationPath $zipFile -Force
@@ -470,16 +371,17 @@ function Build-Variant {
}
# Define all agents and scripts
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'kimi', 'generic')
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'agy', 'generic')
$AllScripts = @('sh', 'ps')
function Normalize-List {
param([string]$Input)
if ([string]::IsNullOrEmpty($Input)) {
return @()
}
# Split by comma or space and remove duplicates while preserving order
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
return $items
}
@@ -490,7 +392,7 @@ function Validate-Subset {
[string[]]$Allowed,
[string[]]$Items
)
$ok = $true
foreach ($item in $Items) {
if ($item -notin $Allowed) {

View File

@@ -6,7 +6,7 @@ set -euo pipefail
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
# Version argument should include leading 'v'.
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi generic (default: all)
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai kiro-cli agy bob qodercli generic (default: all)
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
# Examples:
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
@@ -45,19 +45,19 @@ generate_commands() {
[[ -f "$template" ]] || continue
local name description script_command agent_script_command body
name=$(basename "$template" .md)
# Normalize line endings
file_content=$(tr -d '\r' < "$template")
# Extract description and script command from YAML frontmatter
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
if [[ -z $script_command ]]; then
echo "Warning: no script command found for $script_variant in $template" >&2
script_command="(Missing script command for $script_variant)"
fi
# Extract agent_script command from YAML frontmatter if present
agent_script_command=$(printf '%s\n' "$file_content" | awk '
/^agent_scripts:$/ { in_agent_scripts=1; next }
@@ -68,15 +68,15 @@ generate_commands() {
}
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
')
# Replace {SCRIPT} placeholder with the script command
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
# Replace {AGENT_SCRIPT} placeholder with the agent script command if found
if [[ -n $agent_script_command ]]; then
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
fi
# Remove the scripts: and agent_scripts: sections from frontmatter while preserving YAML structure
body=$(printf '%s\n' "$body" | awk '
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
@@ -86,10 +86,10 @@ generate_commands() {
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
{ print }
')
# Apply other substitutions
body=$(printf '%s\n' "$body" | sed "s/{ARGS}/$arg_format/g" | sed "s/__AGENT__/$agent/g" | rewrite_paths)
case $ext in
toml)
body=$(printf '%s\n' "$body" | sed 's/\\/\\\\/g')
@@ -105,14 +105,15 @@ generate_commands() {
generate_copilot_prompts() {
local agents_dir=$1 prompts_dir=$2
mkdir -p "$prompts_dir"
# Generate a .prompt.md file for each .agent.md file
for agent_file in "$agents_dir"/speckit.*.agent.md; do
[[ -f "$agent_file" ]] || continue
local basename=$(basename "$agent_file" .agent.md)
local prompt_file="$prompts_dir/${basename}.prompt.md"
# Create prompt file with agent frontmatter
cat > "$prompt_file" <<EOF
---
agent: ${basename}
@@ -121,104 +122,41 @@ EOF
done
}
# Create Kimi Code skills in .kimi/skills/<name>/SKILL.md format.
# Kimi CLI discovers skills as directories containing a SKILL.md file,
# invoked with /skill:<name> (e.g. /skill:speckit.specify).
create_kimi_skills() {
local skills_dir="$1"
local script_variant="$2"
for template in templates/commands/*.md; do
[[ -f "$template" ]] || continue
local name
name=$(basename "$template" .md)
local skill_name="speckit.${name}"
local skill_dir="${skills_dir}/${skill_name}"
mkdir -p "$skill_dir"
local file_content
file_content=$(tr -d '\r' < "$template")
# Extract description from frontmatter
local description
description=$(printf '%s\n' "$file_content" | awk '/^description:/ {sub(/^description:[[:space:]]*/, ""); print; exit}')
[[ -z "$description" ]] && description="Spec Kit: ${name} workflow"
# Extract script command
local script_command
script_command=$(printf '%s\n' "$file_content" | awk -v sv="$script_variant" '/^[[:space:]]*'"$script_variant"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, ""); print; exit}')
[[ -z "$script_command" ]] && script_command="(Missing script command for $script_variant)"
# Extract agent_script command from frontmatter if present
local agent_script_command
agent_script_command=$(printf '%s\n' "$file_content" | awk '
/^agent_scripts:$/ { in_agent_scripts=1; next }
in_agent_scripts && /^[[:space:]]*'"$script_variant"':[[:space:]]*/ {
sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, "")
print
exit
}
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
')
# Build body: replace placeholders, strip scripts sections, rewrite paths
local body
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
if [[ -n $agent_script_command ]]; then
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
fi
body=$(printf '%s\n' "$body" | awk '
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
in_frontmatter && /^scripts:$/ { skip_scripts=1; next }
in_frontmatter && /^agent_scripts:$/ { skip_scripts=1; next }
in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 }
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
{ print }
')
body=$(printf '%s\n' "$body" | sed 's/{ARGS}/\$ARGUMENTS/g' | sed 's/__AGENT__/kimi/g' | rewrite_paths)
# Strip existing frontmatter and prepend Kimi frontmatter
local template_body
template_body=$(printf '%s\n' "$body" | awk '/^---/{p++; if(p==2){found=1; next}} found')
{
printf -- '---\n'
printf 'name: "%s"\n' "$skill_name"
printf 'description: "%s"\n' "$description"
printf -- '---\n\n'
printf '%s\n' "$template_body"
} > "$skill_dir/SKILL.md"
done
}
build_variant() {
local agent=$1 script=$2
local base_dir="$GENRELEASES_DIR/sdd-${agent}-package-${script}"
echo "Building $agent ($script) package..."
mkdir -p "$base_dir"
# Copy base structure but filter scripts by variant
SPEC_DIR="$base_dir/.specify"
mkdir -p "$SPEC_DIR"
[[ -d memory ]] && { cp -r memory "$SPEC_DIR/"; echo "Copied memory -> .specify"; }
# Only copy the relevant script variant directory
if [[ -d scripts ]]; then
mkdir -p "$SPEC_DIR/scripts"
case $script in
sh)
[[ -d scripts/bash ]] && { cp -r scripts/bash "$SPEC_DIR/scripts/"; echo "Copied scripts/bash -> .specify/scripts"; }
# Copy any script files that aren't in variant-specific directories
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
;;
ps)
[[ -d scripts/powershell ]] && { cp -r scripts/powershell "$SPEC_DIR/scripts/"; echo "Copied scripts/powershell -> .specify/scripts"; }
# Copy any script files that aren't in variant-specific directories
find scripts -maxdepth 1 -type f -exec cp {} "$SPEC_DIR/scripts/" \; 2>/dev/null || true
;;
esac
fi
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -not -name "vscode-settings.json" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; }
# NOTE: We substitute {ARGS} internally. Outward tokens differ intentionally:
# * Markdown/prompt (claude, copilot, cursor-agent, opencode): $ARGUMENTS
# * TOML (gemini, qwen): {{args}}
# This keeps formats readable without extra abstraction.
case $agent in
claude)
@@ -231,7 +169,9 @@ build_variant() {
copilot)
mkdir -p "$base_dir/.github/agents"
generate_commands copilot agent.md "\$ARGUMENTS" "$base_dir/.github/agents" "$script"
# Generate companion prompt files
generate_copilot_prompts "$base_dir/.github/agents" "$base_dir/.github/prompts"
# Create VS Code workspace settings
mkdir -p "$base_dir/.vscode"
[[ -f templates/vscode-settings.json ]] && cp templates/vscode-settings.json "$base_dir/.vscode/settings.json"
;;
@@ -272,10 +212,6 @@ build_variant() {
shai)
mkdir -p "$base_dir/.shai/commands"
generate_commands shai md "\$ARGUMENTS" "$base_dir/.shai/commands" "$script" ;;
tabnine)
mkdir -p "$base_dir/.tabnine/agent/commands"
generate_commands tabnine toml "{{args}}" "$base_dir/.tabnine/agent/commands" "$script"
[[ -f agent_templates/tabnine/TABNINE.md ]] && cp agent_templates/tabnine/TABNINE.md "$base_dir/TABNINE.md" ;;
kiro-cli)
mkdir -p "$base_dir/.kiro/prompts"
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
@@ -285,12 +221,6 @@ build_variant() {
bob)
mkdir -p "$base_dir/.bob/commands"
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
vibe)
mkdir -p "$base_dir/.vibe/prompts"
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
kimi)
mkdir -p "$base_dir/.kimi/skills"
create_kimi_skills "$base_dir/.kimi/skills" "$script" ;;
generic)
mkdir -p "$base_dir/.speckit/commands"
generate_commands generic md "\$ARGUMENTS" "$base_dir/.speckit/commands" "$script" ;;
@@ -300,10 +230,11 @@ build_variant() {
}
# Determine agent list
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi generic)
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai kiro-cli agy bob qodercli generic)
ALL_SCRIPTS=(sh ps)
norm_list() {
# convert comma+space separated -> line separated unique while preserving order of first occurrence
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?"\n":"") $i);out=1}}}END{printf("\n")}'
}

View File

@@ -47,8 +47,6 @@ Specify supports multiple AI agents by generating agent-specific command files a
| **Kiro CLI** | `.kiro/prompts/` | Markdown | `kiro-cli` | Kiro CLI |
| **Amp** | `.agents/commands/` | Markdown | `amp` | Amp CLI |
| **SHAI** | `.shai/commands/` | Markdown | `shai` | SHAI CLI |
| **Tabnine CLI** | `.tabnine/agent/commands/` | TOML | `tabnine` | Tabnine CLI |
| **Kimi Code** | `.kimi/skills/` | Markdown | `kimi` | Kimi Code CLI (Moonshot AI) |
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
@@ -324,8 +322,6 @@ Require a command-line tool to be installed:
- **Qoder CLI**: `qodercli` CLI
- **Amp**: `amp` CLI
- **SHAI**: `shai` CLI
- **Tabnine CLI**: `tabnine` CLI
- **Kimi Code**: `kimi` CLI
### IDE-Based Agents
@@ -339,7 +335,7 @@ Work within integrated development environments:
### Markdown Format
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob
**Standard format:**
@@ -364,7 +360,7 @@ Command content with {SCRIPT} and $ARGUMENTS placeholders.
### TOML Format
Used by: Gemini, Qwen, Tabnine
Used by: Gemini, Qwen
```toml
description = "Command description"

View File

@@ -7,114 +7,6 @@ Recent changes to the Specify CLI and templates are documented here.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.2.1] - 2026-03-11
### Changed
- Added February 2026 newsletter (#1812)
- feat: add Kimi Code CLI agent support (#1790)
- docs: fix broken links in quickstart guide (#1759) (#1797)
- docs: add catalog cli help documentation (#1793) (#1794)
- fix: use quiet checkout to avoid exception on git checkout (#1792)
- feat(extensions): support .extensionignore to exclude files during install (#1781)
- feat: add Codex support for extension command registration (#1767)
- chore: bump version to 0.2.0 (#1786)
- fix: sync agent list comments with actual supported agents (#1785)
- feat(extensions): support multiple active catalogs simultaneously (#1720)
- Pavel/add tabnine cli support (#1503)
- Add Understanding extension to community catalog (#1778)
- Add ralph extension to community catalog (#1780)
- Update README with project initialization instructions (#1772)
- feat: add review extension to community catalog (#1775)
- Add fleet extension to community catalog (#1771)
- Integration of Mistral vibe support into speckit (#1725)
- fix: Remove duplicate options in specify.md (#1765)
- fix: use global branch numbering instead of per-short-name detection (#1757)
- Add Community Walkthroughs section to README (#1766)
- feat(extensions): add Jira Integration to community catalog (#1764)
- Add Azure DevOps Integration extension to community catalog (#1734)
- Fix docs: update Antigravity link and add initialization example (#1748)
- fix: wire after_tasks and after_implement hook events into command templates (#1702)
- make c ignores consistent with c++ (#1747)
- chore: bump version to 0.1.13 (#1746)
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
- feat: add verify extension to community catalog (#1726)
- Add Retrospective Extension to community catalog README table (#1741)
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
- fix: correct Copilot extension command registration (#1724)
- fix(implement): remove Makefile from C ignore patterns (#1558)
- Add sync extension to community catalog (#1728)
- fix(checklist): clarify file handling behavior for append vs create (#1556)
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
- chore: bump version to 0.1.12 (#1737)
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
## [Unreleased]
### Added
- feat(extensions): support `.extensionignore` to exclude files/folders during `specify extension add` (#1781)
## [0.2.0] - 2026-03-09
### Changed
- feat: add Kimi Code CLI agent support
- fix: sync agent list comments with actual supported agents (#1785)
- feat(extensions): support multiple active catalogs simultaneously (#1720)
- Pavel/add tabnine cli support (#1503)
- Add Understanding extension to community catalog (#1778)
- Add ralph extension to community catalog (#1780)
- Update README with project initialization instructions (#1772)
- feat: add review extension to community catalog (#1775)
- Add fleet extension to community catalog (#1771)
- Integration of Mistral vibe support into speckit (#1725)
- fix: Remove duplicate options in specify.md (#1765)
- fix: use global branch numbering instead of per-short-name detection (#1757)
- Add Community Walkthroughs section to README (#1766)
- feat(extensions): add Jira Integration to community catalog (#1764)
- Add Azure DevOps Integration extension to community catalog (#1734)
- Fix docs: update Antigravity link and add initialization example (#1748)
- fix: wire after_tasks and after_implement hook events into command templates (#1702)
- make c ignores consistent with c++ (#1747)
- chore: bump version to 0.1.13 (#1746)
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
- feat: add verify extension to community catalog (#1726)
- Add Retrospective Extension to community catalog README table (#1741)
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
- fix: correct Copilot extension command registration (#1724)
- fix(implement): remove Makefile from C ignore patterns (#1558)
- Add sync extension to community catalog (#1728)
- fix(checklist): clarify file handling behavior for append vs create (#1556)
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
- chore: bump version to 0.1.12 (#1737)
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
## [0.1.14] - 2026-03-09
### Added
- feat: add Tabnine CLI agent support
- **Multi-Catalog Support (#1707)**: Extension catalog system now supports multiple active catalogs simultaneously via a catalog stack
- New `specify extension catalog list` command lists all active catalogs with name, URL, priority, and `install_allowed` status
- New `specify extension catalog add` and `specify extension catalog remove` commands for project-scoped catalog management
- Default built-in stack includes `catalog.json` (default, installable) and `catalog.community.json` (community, discovery only) — community extensions are now surfaced in search results out of the box
- `specify extension search` aggregates results across all active catalogs, annotating each result with source catalog
- `specify extension add` enforces `install_allowed` policy — extensions from discovery-only catalogs cannot be installed directly
- Project-level `.specify/extension-catalogs.yml` and user-level `~/.specify/extension-catalogs.yml` config files supported, with project-level taking precedence
- `SPECKIT_CATALOG_URL` environment variable still works for backward compatibility (replaces full stack with single catalog)
- All catalog URLs require HTTPS (HTTP allowed for localhost development)
- New `CatalogEntry` dataclass in `extensions.py` for catalog stack representation
- Per-URL hash-based caching for non-default catalogs; legacy cache preserved for default catalog
- Higher-priority catalogs win on merge conflicts (same extension id in multiple catalogs)
- 13 new tests covering catalog stack resolution, merge conflicts, URL validation, and `install_allowed` enforcement
- Updated RFC, Extension User Guide, and Extension API Reference documentation
## [0.1.13] - 2026-03-03
### Changed

View File

@@ -22,7 +22,6 @@
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
- [⚡ Get Started](#-get-started)
- [📽️ Video Overview](#-video-overview)
- [🚶 Community Walkthroughs](#-community-walkthroughs)
- [🤖 Supported AI Agents](#-supported-ai-agents)
- [🔧 Specify CLI Reference](#-specify-cli-reference)
- [📚 Core Philosophy](#-core-philosophy)
@@ -80,13 +79,7 @@ uv tool install specify-cli --force --from git+https://github.com/github/spec-ki
Run directly without installing:
```bash
# Create new project
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
# Or initialize in existing project
uvx --from git+https://github.com/github/spec-kit.git specify init . --ai claude
# or
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai claude
```
**Benefits of persistent installation:**
@@ -146,16 +139,6 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
[![Spec Kit video header](/media/spec-kit-video-header.jpg)](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
## 🚶 Community Walkthroughs
See Spec-Driven Development in action across different scenarios with these community-contributed walkthroughs:
- **[Greenfield .NET CLI tool](https://github.com/mnriem/spec-kit-dotnet-cli-demo)** — Builds a Timezone Utility as a .NET single-binary CLI tool from a blank directory, covering the full spec-kit workflow: constitution, specify, plan, tasks, and multi-pass implement using GitHub Copilot agents.
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
## 🤖 Supported AI Agents
| Agent | Support | Notes |
@@ -177,11 +160,8 @@ See Spec-Driven Development in action across different scenarios with these comm
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
| [Roo Code](https://roocode.com/) | ✅ | |
| [SHAI (OVHcloud)](https://github.com/ovh/shai) | ✅ | |
| [Tabnine CLI](https://docs.tabnine.com/main/getting-started/tabnine-cli) | ✅ | |
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
| [Kimi Code](https://code.kimi.com/) | ✅ | |
| [Windsurf](https://windsurf.com/) | ✅ | |
| [Antigravity (agy)](https://antigravity.google/) | ✅ | |
| [Antigravity (agy)](https://agy.ai/) | ✅ | |
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
## 🔧 Specify CLI Reference
@@ -193,14 +173,14 @@ The `specify` command supports the following options:
| Command | Description |
| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | Initialize a new Specify project from the latest template |
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`, `kimi`) |
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`) |
### `specify init` Arguments & Options
| Argument/Option | Type | Description |
| ---------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, `kimi`, or `generic` (requires `--ai-commands-dir`) |
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, or `generic` (requires `--ai-commands-dir`) |
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
@@ -239,15 +219,9 @@ specify init my-project --ai amp
# Initialize with SHAI support
specify init my-project --ai shai
# Initialize with Mistral Vibe support
specify init my-project --ai vibe
# Initialize with IBM Bob support
specify init my-project --ai bob
# Initialize with Antigravity support
specify init my-project --ai agy
# Initialize with an unsupported agent (generic / bring your own agent)
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
@@ -422,7 +396,7 @@ specify init . --force --ai claude
specify init --here --force --ai claude
```
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, or Kiro CLI installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
```bash
specify init <project_name> --ai claude --ignore-agent-tools

View File

@@ -173,6 +173,6 @@ Finally, implement the solution:
## Next Steps
- Read the [complete methodology](https://github.com/github/spec-kit/blob/main/spec-driven.md) for in-depth guidance
- Check out [more examples](https://github.com/github/spec-kit/tree/main/templates) in the repository
- Read the [complete methodology](../spec-driven.md) for in-depth guidance
- Check out [more examples](../templates) in the repository
- Explore the [source code on GitHub](https://github.com/github/spec-kit)

View File

@@ -243,34 +243,6 @@ manager.check_compatibility(
) # Raises: CompatibilityError if incompatible
```
### CatalogEntry
**Module**: `specify_cli.extensions`
Represents a single catalog in the active catalog stack.
```python
from specify_cli.extensions import CatalogEntry
entry = CatalogEntry(
url="https://example.com/catalog.json",
name="default",
priority=1,
install_allowed=True,
description="Built-in catalog of installable extensions",
)
```
**Fields**:
| Field | Type | Description |
|-------|------|-------------|
| `url` | `str` | Catalog URL (must use HTTPS, or HTTP for localhost) |
| `name` | `str` | Human-readable catalog name |
| `priority` | `int` | Sort order (lower = higher priority, wins on conflicts) |
| `install_allowed` | `bool` | Whether extensions from this catalog can be installed |
| `description` | `str` | Optional human-readable description of the catalog (default: empty) |
### ExtensionCatalog
**Module**: `specify_cli.extensions`
@@ -281,67 +253,30 @@ from specify_cli.extensions import ExtensionCatalog
catalog = ExtensionCatalog(project_root)
```
**Class attributes**:
```python
ExtensionCatalog.DEFAULT_CATALOG_URL # default catalog URL
ExtensionCatalog.COMMUNITY_CATALOG_URL # community catalog URL
```
**Methods**:
```python
# Get the ordered list of active catalogs
entries = catalog.get_active_catalogs() # List[CatalogEntry]
# Fetch catalog (primary catalog, backward compat)
# Fetch catalog
catalog_data = catalog.fetch_catalog(force_refresh: bool = False) # Dict
# Search extensions across all active catalogs
# Each result includes _catalog_name and _install_allowed
# Search extensions
results = catalog.search(
query: Optional[str] = None,
tag: Optional[str] = None,
author: Optional[str] = None,
verified_only: bool = False
) # Returns: List[Dict] — each dict includes _catalog_name, _install_allowed
) # Returns: List[Dict]
# Get extension info (searches all active catalogs)
# Returns None if not found; includes _catalog_name and _install_allowed
# Get extension info
ext_info = catalog.get_extension_info(extension_id: str) # Optional[Dict]
# Check cache validity (primary catalog)
# Check cache validity
is_valid = catalog.is_cache_valid() # bool
# Clear all catalog caches
# Clear cache
catalog.clear_cache()
```
**Result annotation fields**:
Each extension dict returned by `search()` and `get_extension_info()` includes:
| Field | Type | Description |
|-------|------|-------------|
| `_catalog_name` | `str` | Name of the source catalog |
| `_install_allowed` | `bool` | Whether installation is allowed from this catalog |
**Catalog config file** (`.specify/extension-catalogs.yml`):
```yaml
catalogs:
- name: "default"
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
priority: 1
install_allowed: true
description: "Built-in catalog of installable extensions"
- name: "community"
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
priority: 2
install_allowed: false
description: "Community-contributed extensions (discovery only)"
```
### HookExecutor
**Module**: `specify_cli.extensions`
@@ -608,39 +543,6 @@ EXECUTE_COMMAND: {command}
**Output**: List of installed extensions with metadata
### extension catalog list
**Usage**: `specify extension catalog list`
Lists all active catalogs in the current catalog stack, showing name, description, URL, priority, and `install_allowed` status.
### extension catalog add
**Usage**: `specify extension catalog add URL [OPTIONS]`
**Options**:
- `--name NAME` - Catalog name (required)
- `--priority INT` - Priority (lower = higher priority, default: 10)
- `--install-allowed / --no-install-allowed` - Allow installs from this catalog (default: false)
- `--description TEXT` - Optional description of the catalog
**Arguments**:
- `URL` - Catalog URL (must use HTTPS)
Adds a catalog entry to `.specify/extension-catalogs.yml`.
### extension catalog remove
**Usage**: `specify extension catalog remove NAME`
**Arguments**:
- `NAME` - Catalog name to remove
Removes a catalog entry from `.specify/extension-catalogs.yml`.
### extension add
**Usage**: `specify extension add EXTENSION [OPTIONS]`
@@ -649,13 +551,13 @@ Removes a catalog entry from `.specify/extension-catalogs.yml`.
- `--from URL` - Install from custom URL
- `--dev PATH` - Install from local directory
- `--version VERSION` - Install specific version
- `--no-register` - Skip command registration
**Arguments**:
- `EXTENSION` - Extension name or URL
**Note**: Extensions from catalogs with `install_allowed: false` cannot be installed via this command.
### extension remove
**Usage**: `specify extension remove EXTENSION [OPTIONS]`
@@ -673,8 +575,6 @@ Removes a catalog entry from `.specify/extension-catalogs.yml`.
**Usage**: `specify extension search [QUERY] [OPTIONS]`
Searches all active catalogs simultaneously. Results include source catalog name and install_allowed status.
**Options**:
- `--tag TAG` - Filter by tag
@@ -689,8 +589,6 @@ Searches all active catalogs simultaneously. Results include source catalog name
**Usage**: `specify extension info EXTENSION`
Shows source catalog and install_allowed status.
**Arguments**:
- `EXTENSION` - Extension ID

View File

@@ -332,67 +332,6 @@ echo "$config"
---
## Excluding Files with `.extensionignore`
Extension authors can create a `.extensionignore` file in the extension root to exclude files and folders from being copied when a user installs the extension with `specify extension add`. This is useful for keeping development-only files (tests, CI configs, docs source, etc.) out of the installed copy.
### Format
The file uses `.gitignore`-compatible patterns (one per line), powered by the [`pathspec`](https://pypi.org/project/pathspec/) library:
- Blank lines are ignored
- Lines starting with `#` are comments
- `*` matches anything **except** `/` (does not cross directory boundaries)
- `**` matches zero or more directories (e.g., `docs/**/*.draft.md`)
- `?` matches any single character except `/`
- A trailing `/` restricts a pattern to directories only
- Patterns containing `/` (other than a trailing slash) are anchored to the extension root
- Patterns without `/` match at any depth in the tree
- `!` negates a previously excluded pattern (re-includes a file)
- Backslashes in patterns are normalised to forward slashes for cross-platform compatibility
- The `.extensionignore` file itself is always excluded automatically
### Example
```gitignore
# .extensionignore
# Development files
tests/
.github/
.gitignore
# Build artifacts
__pycache__/
*.pyc
dist/
# Documentation source (keep only the built README)
docs/
CONTRIBUTING.md
```
### Pattern Matching
| Pattern | Matches | Does NOT match |
|---------|---------|----------------|
| `*.pyc` | Any `.pyc` file in any directory | — |
| `tests/` | The `tests` directory (and all its contents) | A file named `tests` |
| `docs/*.draft.md` | `docs/api.draft.md` (directly inside `docs/`) | `docs/sub/api.draft.md` (nested) |
| `.env` | The `.env` file at any level | — |
| `!README.md` | Re-includes `README.md` even if matched by an earlier pattern | — |
| `docs/**/*.draft.md` | `docs/api.draft.md`, `docs/sub/api.draft.md` | — |
### Unsupported Features
The following `.gitignore` features are **not applicable** in this context:
- **Multiple `.extensionignore` files**: Only a single file at the extension root is supported (`.gitignore` supports files in subdirectories)
- **`$GIT_DIR/info/exclude` and `core.excludesFile`**: These are Git-specific and have no equivalent here
- **Negation inside excluded directories**: Because file copying uses `shutil.copytree`, excluding a directory prevents recursion into it entirely. A negation pattern cannot re-include a file inside a directory that was itself excluded. For example, the combination `tests/` followed by `!tests/important.py` will **not** preserve `tests/important.py` — the `tests/` directory is skipped at the root level and its contents are never evaluated. To work around this, exclude the directory's contents individually instead of the directory itself (e.g., `tests/*.pyc` and `tests/.cache/` rather than `tests/`).
---
## Validation Rules
### Extension ID

View File

@@ -76,7 +76,7 @@ vim .specify/extensions/jira/jira-config.yml
## Finding Extensions
`specify extension search` searches **all active catalogs** simultaneously, including the community catalog by default. Results are annotated with their source catalog and install status.
**Note**: By default, `specify extension search` uses your organization's catalog (`catalog.json`). If the catalog is empty, you won't see any results. See [Extension Catalogs](#extension-catalogs) to learn how to populate your catalog from the community reference catalog.
### Browse All Extensions
@@ -84,7 +84,7 @@ vim .specify/extensions/jira/jira-config.yml
specify extension search
```
Shows all extensions across all active catalogs (default and community by default).
Shows all extensions in your organization's catalog.
### Search by Keyword
@@ -402,13 +402,13 @@ In addition to extension-specific environment variables (`SPECKIT_{EXT_ID}_*`),
| Variable | Description | Default |
|----------|-------------|---------|
| `SPECKIT_CATALOG_URL` | Override the full catalog stack with a single URL (backward compat) | Built-in default stack |
| `SPECKIT_CATALOG_URL` | Override the extension catalog URL | GitHub-hosted catalog |
| `GH_TOKEN` / `GITHUB_TOKEN` | GitHub API token for downloads | None |
#### Example: Using a custom catalog for testing
```bash
# Point to a local or alternative catalog (replaces the full stack)
# Point to a local or alternative catalog
export SPECKIT_CATALOG_URL="http://localhost:8000/catalog.json"
# Or use a staging catalog
@@ -419,96 +419,13 @@ export SPECKIT_CATALOG_URL="https://example.com/staging/catalog.json"
## Extension Catalogs
Spec Kit uses a **catalog stack** — an ordered list of catalogs searched simultaneously. By default, two catalogs are active:
| Priority | Catalog | Install Allowed | Purpose |
|----------|---------|-----------------|---------|
| 1 | `catalog.json` (default) | ✅ Yes | Curated extensions available for installation |
| 2 | `catalog.community.json` (community) | ❌ No (discovery only) | Browse community extensions |
### Listing Active Catalogs
```bash
specify extension catalog list
```
### Managing Catalogs via CLI
You can view the main catalog management commands using `--help`:
```text
specify extension catalog --help
Usage: specify extension catalog [OPTIONS] COMMAND [ARGS]...
Manage extension catalogs
╭─ Options ────────────────────────────────────────────────────────────────────────╮
│ --help Show this message and exit. │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─ Commands ───────────────────────────────────────────────────────────────────────╮
│ list List all active extension catalogs. │
│ add Add a catalog to .specify/extension-catalogs.yml. │
│ remove Remove a catalog from .specify/extension-catalogs.yml. │
╰──────────────────────────────────────────────────────────────────────────────────╯
```
### Adding a Catalog (Project-scoped)
```bash
# Add an internal catalog that allows installs
specify extension catalog add \
--name "internal" \
--priority 2 \
--install-allowed \
https://internal.company.com/spec-kit/catalog.json
# Add a discovery-only catalog
specify extension catalog add \
--name "partner" \
--priority 5 \
https://partner.example.com/spec-kit/catalog.json
```
This creates or updates `.specify/extension-catalogs.yml`.
### Removing a Catalog
```bash
specify extension catalog remove internal
```
### Manual Config File
You can also edit `.specify/extension-catalogs.yml` directly:
```yaml
catalogs:
- name: "default"
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
priority: 1
install_allowed: true
description: "Built-in catalog of installable extensions"
- name: "internal"
url: "https://internal.company.com/spec-kit/catalog.json"
priority: 2
install_allowed: true
description: "Internal company extensions"
- name: "community"
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
priority: 3
install_allowed: false
description: "Community-contributed extensions (discovery only)"
```
A user-level equivalent lives at `~/.specify/extension-catalogs.yml`. Project-level config takes full precedence when it contains one or more catalog entries. An empty `catalogs: []` list falls back to built-in defaults.
For information about how Spec Kit's dual-catalog system works (`catalog.json` vs `catalog.community.json`), see the main [Extensions README](README.md#extension-catalogs).
## Organization Catalog Customization
### Why Customize Your Catalog
Organizations customize their catalogs to:
Organizations customize their `catalog.json` to:
- **Control available extensions** - Curate which extensions your team can install
- **Host private extensions** - Internal tools that shouldn't be public
@@ -586,40 +503,24 @@ Options for hosting your catalog:
#### 3. Configure Your Environment
##### Option A: Catalog stack config file (recommended)
Add to `.specify/extension-catalogs.yml` in your project:
```yaml
catalogs:
- name: "my-org"
url: "https://your-org.com/spec-kit/catalog.json"
priority: 1
install_allowed: true
```
Or use the CLI:
```bash
specify extension catalog add \
--name "my-org" \
--install-allowed \
https://your-org.com/spec-kit/catalog.json
```
##### Option B: Environment variable (recommended for CI/CD, single-catalog)
##### Option A: Environment variable (recommended for CI/CD)
```bash
# In ~/.bashrc, ~/.zshrc, or CI pipeline
export SPECKIT_CATALOG_URL="https://your-org.com/spec-kit/catalog.json"
```
##### Option B: Per-project configuration
Create `.env` or set in your shell before running spec-kit commands:
```bash
SPECKIT_CATALOG_URL="https://your-org.com/spec-kit/catalog.json" specify extension search
```
#### 4. Verify Configuration
```bash
# List active catalogs
specify extension catalog list
# Search should now show your catalog's extensions
specify extension search

View File

@@ -72,15 +72,9 @@ The following community-contributed extensions are available in [`catalog.commun
| Extension | Purpose | URL |
|-----------|---------|-----|
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | [understanding](https://github.com/Testimonial/understanding) |
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |

View File

@@ -868,7 +868,7 @@ Spec Kit uses two catalog files with different purposes:
- **Purpose**: Organization's curated catalog of approved extensions
- **Default State**: Empty by design - users populate with extensions they trust
- **Usage**: Primary catalog (priority 1, `install_allowed: true`) in the default stack
- **Usage**: Default catalog used by `specify extension` CLI commands
- **Control**: Organizations maintain their own fork/version for their teams
#### Community Reference Catalog (`catalog.community.json`)
@@ -879,16 +879,16 @@ Spec Kit uses two catalog files with different purposes:
- **Verification**: Community extensions may have `verified: false` initially
- **Status**: Active - open for community contributions
- **Submission**: Via Pull Request following the Extension Publishing Guide
- **Usage**: Secondary catalog (priority 2, `install_allowed: false`) in the default stack — discovery only
- **Usage**: Browse to discover extensions, then copy to your `catalog.json`
**How It Works (default stack):**
**How It Works:**
1. **Discover**: `specify extension search` searches both catalogs — community extensions appear automatically
2. **Review**: Evaluate community extensions for security, quality, and organizational fit
3. **Curate**: Copy approved entries from community catalog to your `catalog.json`, or add to `.specify/extension-catalogs.yml` with `install_allowed: true`
4. **Install**: Use `specify extension add <name>` — only allowed from `install_allowed: true` catalogs
1. **Discover**: Browse `catalog.community.json` to find available extensions
2. **Review**: Evaluate extensions for security, quality, and organizational fit
3. **Curate**: Copy approved extension entries from community catalog to your `catalog.json`
4. **Install**: Use `specify extension add <name>` (pulls from your curated catalog)
This approach gives organizations full control over which extensions can be installed while still providing community discoverability out of the box.
This approach gives organizations full control over which extensions are available to their teams while maintaining a shared community resource for discovery.
### Catalog Format
@@ -961,92 +961,30 @@ specify extension info jira
### Custom Catalogs
Spec Kit supports a **catalog stack** — an ordered list of catalogs that the CLI merges and searches across. This allows organizations to maintain their own org-approved extensions alongside an internal catalog and community discovery, all at once.
**⚠️ FUTURE FEATURE - NOT YET IMPLEMENTED**
#### Catalog Stack Resolution
The active catalog stack is resolved in this order (first match wins):
1. **`SPECKIT_CATALOG_URL` environment variable** — single catalog replacing all defaults (backward compat)
2. **Project-level `.specify/extension-catalogs.yml`** — full control for the project
3. **User-level `~/.specify/extension-catalogs.yml`** — personal defaults
4. **Built-in default stack** — `catalog.json` (install_allowed: true) + `catalog.community.json` (install_allowed: false)
#### Default Built-in Stack
When no config file exists, the CLI uses:
| Priority | Catalog | install_allowed | Purpose |
|----------|---------|-----------------|---------|
| 1 | `catalog.json` (default) | `true` | Curated extensions available for installation |
| 2 | `catalog.community.json` (community) | `false` | Discovery only — browse but not install |
This means `specify extension search` surfaces community extensions out of the box, while `specify extension add` is still restricted to entries from catalogs with `install_allowed: true`.
#### `.specify/extension-catalogs.yml` Config File
```yaml
catalogs:
- name: "default"
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
priority: 1 # Highest — only approved entries can be installed
install_allowed: true
description: "Built-in catalog of installable extensions"
- name: "internal"
url: "https://internal.company.com/spec-kit/catalog.json"
priority: 2
install_allowed: true
description: "Internal company extensions"
- name: "community"
url: "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
priority: 3 # Lowest — discovery only, not installable
install_allowed: false
description: "Community-contributed extensions (discovery only)"
```
A user-level equivalent lives at `~/.specify/extension-catalogs.yml`. When a project-level config is present with one or more catalog entries, it takes full control and the built-in defaults are not applied. An empty `catalogs: []` list is treated the same as no config file, falling back to defaults.
#### Catalog CLI Commands
The following catalog management commands are proposed design concepts but are not yet available in the current implementation:
```bash
# List active catalogs with name, URL, priority, and install_allowed
specify extension catalog list
# Add custom catalog (FUTURE - NOT AVAILABLE)
specify extension add-catalog https://internal.company.com/spec-kit/catalog.json
# Add a catalog (project-scoped)
specify extension catalog add --name "internal" --install-allowed \
https://internal.company.com/spec-kit/catalog.json
# Set as default (FUTURE - NOT AVAILABLE)
specify extension set-catalog --default https://internal.company.com/spec-kit/catalog.json
# Add a discovery-only catalog
specify extension catalog add --name "community" \
https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json
# Remove a catalog
specify extension catalog remove internal
# Show which catalog an extension came from
specify extension info jira
# → Source catalog: default
# List catalogs (FUTURE - NOT AVAILABLE)
specify extension catalogs
```
#### Merge Conflict Resolution
**Proposed catalog priority** (future design):
When the same extension `id` appears in multiple catalogs, the higher-priority (lower priority number) catalog wins. Extensions from lower-priority catalogs with the same `id` are ignored.
1. Project-specific catalog (`.specify/extension-catalogs.yml`) - *not implemented*
2. User-level catalog (`~/.specify/extension-catalogs.yml`) - *not implemented*
3. Default GitHub catalog
#### `install_allowed: false` Behavior
#### Current Implementation: SPECKIT_CATALOG_URL
Extensions from discovery-only catalogs are shown in `specify extension search` results but cannot be installed directly:
```
⚠ 'linear' is available in the 'community' catalog but installation is not allowed from that catalog.
To enable installation, add 'linear' to an approved catalog (install_allowed: true) in .specify/extension-catalogs.yml.
```
#### `SPECKIT_CATALOG_URL` (Backward Compatibility)
The `SPECKIT_CATALOG_URL` environment variable still works — it is treated as a single `install_allowed: true` catalog, **replacing both defaults** for full backward compatibility:
**The currently available method** for using custom catalogs is the `SPECKIT_CATALOG_URL` environment variable:
```bash
# Point to your organization's catalog

View File

@@ -1,47 +1,8 @@
{
"schema_version": "1.0",
"updated_at": "2026-03-09T00:00:00Z",
"updated_at": "2026-03-03T00:00:00Z",
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
"extensions": {
"azure-devops": {
"name": "Azure DevOps Integration",
"id": "azure-devops",
"description": "Sync user stories and tasks to Azure DevOps work items using OAuth authentication.",
"author": "pragya247",
"version": "1.0.0",
"download_url": "https://github.com/pragya247/spec-kit-azure-devops/archive/refs/tags/v1.0.0.zip",
"repository": "https://github.com/pragya247/spec-kit-azure-devops",
"homepage": "https://github.com/pragya247/spec-kit-azure-devops",
"documentation": "https://github.com/pragya247/spec-kit-azure-devops/blob/main/README.md",
"changelog": "https://github.com/pragya247/spec-kit-azure-devops/blob/main/CHANGELOG.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0",
"tools": [
{
"name": "az",
"version": ">=2.0.0",
"required": true
}
]
},
"provides": {
"commands": 1,
"hooks": 1
},
"tags": [
"azure",
"devops",
"project-management",
"work-items",
"issue-tracking"
],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-03T00:00:00Z",
"updated_at": "2026-03-03T00:00:00Z"
},
"cleanup": {
"name": "Cleanup Extension",
"id": "cleanup",
@@ -61,112 +22,13 @@
"commands": 1,
"hooks": 1
},
"tags": [
"quality",
"tech-debt",
"review",
"cleanup",
"scout-rule"
],
"tags": ["quality", "tech-debt", "review", "cleanup", "scout-rule"],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-02-22T00:00:00Z",
"updated_at": "2026-02-22T00:00:00Z"
},
"fleet": {
"name": "Fleet Orchestrator",
"id": "fleet",
"description": "Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases.",
"author": "sharathsatish",
"version": "1.0.0",
"download_url": "https://github.com/sharathsatish/spec-kit-fleet/archive/refs/tags/v1.0.0.zip",
"repository": "https://github.com/sharathsatish/spec-kit-fleet",
"homepage": "https://github.com/sharathsatish/spec-kit-fleet",
"documentation": "https://github.com/sharathsatish/spec-kit-fleet/blob/main/README.md",
"changelog": "https://github.com/sharathsatish/spec-kit-fleet/blob/main/CHANGELOG.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0"
},
"provides": {
"commands": 2,
"hooks": 1
},
"tags": ["orchestration", "workflow", "human-in-the-loop", "parallel"],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-06T00:00:00Z",
"updated_at": "2026-03-06T00:00:00Z"
},
"jira": {
"name": "Jira Integration",
"id": "jira",
"description": "Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support.",
"author": "mbachorik",
"version": "2.1.0",
"download_url": "https://github.com/mbachorik/spec-kit-jira/archive/refs/tags/v2.1.0.zip",
"repository": "https://github.com/mbachorik/spec-kit-jira",
"homepage": "https://github.com/mbachorik/spec-kit-jira",
"documentation": "https://github.com/mbachorik/spec-kit-jira/blob/main/README.md",
"changelog": "https://github.com/mbachorik/spec-kit-jira/blob/main/CHANGELOG.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0"
},
"provides": {
"commands": 3,
"hooks": 1
},
"tags": [
"issue-tracking",
"jira",
"atlassian",
"project-management"
],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-05T00:00:00Z",
"updated_at": "2026-03-05T00:00:00Z"
},
"ralph": {
"name": "Ralph Loop",
"id": "ralph",
"description": "Autonomous implementation loop using AI agent CLI.",
"author": "Rubiss",
"version": "1.0.0",
"download_url": "https://github.com/Rubiss/spec-kit-ralph/archive/refs/tags/v1.0.0.zip",
"repository": "https://github.com/Rubiss/spec-kit-ralph",
"homepage": "https://github.com/Rubiss/spec-kit-ralph",
"documentation": "https://github.com/Rubiss/spec-kit-ralph/blob/main/README.md",
"changelog": "https://github.com/Rubiss/spec-kit-ralph/blob/main/CHANGELOG.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0",
"tools": [
{
"name": "copilot",
"required": true
},
{
"name": "git",
"required": true
}
]
},
"provides": {
"commands": 2,
"hooks": 1
},
"tags": ["implementation", "automation", "loop", "copilot"],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-09T00:00:00Z",
"updated_at": "2026-03-09T00:00:00Z"
},
"retrospective": {
"name": "Retrospective Extension",
"id": "retrospective",
@@ -186,45 +48,13 @@
"commands": 1,
"hooks": 1
},
"tags": [
"retrospective",
"spec-drift",
"quality",
"analysis",
"governance"
],
"tags": ["retrospective", "spec-drift", "quality", "analysis", "governance"],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-02-24T00:00:00Z",
"updated_at": "2026-02-24T00:00:00Z"
},
"review": {
"name": "Review Extension",
"id": "review",
"description": "Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification.",
"author": "ismaelJimenez",
"version": "1.0.0",
"download_url": "https://github.com/ismaelJimenez/spec-kit-review/archive/refs/tags/v1.0.0.zip",
"repository": "https://github.com/ismaelJimenez/spec-kit-review",
"homepage": "https://github.com/ismaelJimenez/spec-kit-review",
"documentation": "https://github.com/ismaelJimenez/spec-kit-review/blob/main/README.md",
"changelog": "https://github.com/ismaelJimenez/spec-kit-review/blob/main/CHANGELOG.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0"
},
"provides": {
"commands": 7,
"hooks": 1
},
"tags": ["code-review", "quality", "review", "testing", "error-handling", "type-design", "simplification"],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-06T00:00:00Z",
"updated_at": "2026-03-06T00:00:00Z"
},
"sync": {
"name": "Spec Sync",
"id": "sync",
@@ -244,60 +74,13 @@
"commands": 5,
"hooks": 1
},
"tags": [
"sync",
"drift",
"validation",
"bidirectional",
"backfill"
],
"tags": ["sync", "drift", "validation", "bidirectional", "backfill"],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-02T00:00:00Z",
"updated_at": "2026-03-02T00:00:00Z"
},
"understanding": {
"name": "Understanding",
"id": "understanding",
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
"author": "Ladislav Bihari",
"version": "3.4.0",
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
"repository": "https://github.com/Testimonial/understanding",
"homepage": "https://github.com/Testimonial/understanding",
"documentation": "https://github.com/Testimonial/understanding/blob/main/extension/README.md",
"changelog": "https://github.com/Testimonial/understanding/blob/main/extension/CHANGELOG.md",
"license": "MIT",
"requires": {
"speckit_version": ">=0.1.0",
"tools": [
{
"name": "understanding",
"version": ">=3.4.0",
"required": true
}
]
},
"provides": {
"commands": 3,
"hooks": 1
},
"tags": [
"quality",
"metrics",
"requirements",
"validation",
"readability",
"IEEE-830",
"ISO-29148"
],
"verified": false,
"downloads": 0,
"stars": 0,
"created_at": "2026-03-07T00:00:00Z",
"updated_at": "2026-03-07T00:00:00Z"
},
"v-model": {
"name": "V-Model Extension Pack",
"id": "v-model",
@@ -317,13 +100,7 @@
"commands": 9,
"hooks": 1
},
"tags": [
"v-model",
"traceability",
"testing",
"compliance",
"safety-critical"
],
"tags": ["v-model", "traceability", "testing", "compliance", "safety-critical"],
"verified": false,
"downloads": 0,
"stars": 0,
@@ -349,13 +126,7 @@
"commands": 1,
"hooks": 1
},
"tags": [
"verification",
"quality-gate",
"implementation",
"spec-adherence",
"compliance"
],
"tags": ["verification", "quality-gate", "implementation", "spec-adherence", "compliance"],
"verified": false,
"downloads": 0,
"stars": 0,

View File

@@ -1,54 +0,0 @@
# Spec Kit - February 2026 Newsletter
This edition covers Spec Kit activity in February 2026. Versions v0.1.7 through v0.1.13 shipped during the month, addressing bugs and adding features including a dual-catalog extension system and additional agent integrations. Community activity included blog posts, tutorials, and meetup sessions. A category summary is in the table below, followed by details.
| **Spec Kit Core (Feb 2026)** | **Community & Content** | **Roadmap & Next** |
| --- | --- | --- |
| Versions **v0.1.7** through **v0.1.13** shipped with bug fixes and features, including a **dual-catalog extension system** and new agent integrations. Over 300 issues were closed (of ~800 filed). The repo reached 71k stars and 6.4k forks. [\[github.com\]](https://github.com/github/spec-kit/releases) [\[github.com\]](https://github.com/github/spec-kit/issues) [\[rywalker.com\]](https://rywalker.com/research/github-spec-kit) | Eduardo Luz published a LinkedIn article on SDD and Spec Kit [\[linkedin.com\]](https://www.linkedin.com/pulse/specification-driven-development-sdd-github-spec-kit-elevating-luz-tojmc?tl=en). Erick Matsen blogged a walkthrough of building a bioinformatics pipeline with Spec Kit [\[matsen.fredhutch.org\]](https://matsen.fredhutch.org/general/2026/02/10/spec-kit-walkthrough.html). Microsoft MVP [Eric Boyd](https://ericboyd.com/) (not the Microsoft AI Platform VP of the same name) presented at the Cleveland .NET User Group [\[ericboyd.com\]](https://ericboyd.com/events/cleveland-csharp-user-group-february-25-2026-spec-driven-development-sdd-github-spec-kit). | **v0.2.0** was released in early March, consolidating February's work. It added extensions for Jira and Azure DevOps, community plugin support, and agents for Tabnine CLI and Kiro CLI [\[github.com\]](https://github.com/github/spec-kit/releases). Future work includes spec lifecycle management and progress toward a stable 1.0 release [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html). |
***
## Spec Kit Project Updates
Spec Kit released versions **v0.1.7** through **v0.1.13** during February. Version 0.1.7 (early February) updated documentation for the newly introduced **dual-catalog extension system**, which allows both core and community extension catalogs to coexist. Subsequent patches (0.1.8, 0.1.9, etc.) bumped dependencies such as GitHub Actions versions and resolved minor issues. **v0.1.10** fixed YAML front-matter handling in generated files. By late February, **v0.1.12** and **v0.1.13** shipped with additional fixes in preparation for the next version bump. [\[github.com\]](https://github.com/github/spec-kit/releases)
The main architectural addition was the **modular extension system** with separate "core" and "community" extension catalogs for third-party add-ons. Multiple community-contributed extensions were merged during the month, including a **Jira extension** for issue tracker integration, an **Azure DevOps extension**, and utility extensions for code review, retrospective documentation, and CI/CD sync. The pending 0.2.0 release changelog lists over a dozen changes from February, including the extension additions and support for **multiple agent catalogs concurrently**. [\[github.com\]](https://github.com/github/spec-kit/releases)
By end of February, **over 330 issues/feature requests had been closed on GitHub** (out of ~870 filed to date). External contributors submitted pull requests including the **Tabnine CLI support**, which was merged in late February. The repository reached ~71k stars and crossed 6,000 forks. [\[github.com\]](https://github.com/github/spec-kit/issues) [\[github.com\]](https://github.com/github/spec-kit/releases) [\[rywalker.com\]](https://rywalker.com/research/github-spec-kit)
On the stability side, February's work focused on tightening core workflows and fixing edge-case bugs in the specification, planning, and task-generation commands. The team addressed file-handling issues (e.g., clarifying how output files are created/appended) and improved the reliability of the automated release pipeline. The project also added **Kiro CLI** to the supported agent list and updated integration scripts for Cursor and Code Interpreter, bringing the total number of supported AI coding assistants to over 20. [\[github.com\]](https://github.com/github/spec-kit/releases) [\[github.com\]](https://github.com/github/spec-kit)
## Community & Content
**Eduardo Luz** published a LinkedIn article on Feb 15 titled *"Specification Driven Development (SDD) and the GitHub Spec Kit: Elevating Software Engineering."* The article draws on his experience as a senior engineer to describe common causes of technical debt and inconsistent designs, and how SDD addresses them. It walks through Spec Kit's **four-layer approach** (Constitution, Design, Tasks, Implementation) and discusses treating specifications as a source of truth. The post generated discussion among software architects on LinkedIn about reducing misunderstandings and rework through spec-driven workflows. [\[linkedin.com\]](https://www.linkedin.com/pulse/specification-driven-development-sdd-github-spec-kit-elevating-luz-tojmc?tl=en)
**Erick Matsen** (Fred Hutchinson Cancer Center) posted a detailed walkthrough on Feb 10 titled *"Spec-Driven Development with spec-kit."* He describes building a **bioinformatics pipeline** in a single day using Spec Kit's workflow (from `speckit.constitution` to `speckit.implement`). The post includes command outputs and notes on decisions made along the way, such as refining the spec to add domain-specific requirements. He writes: "I really recommend this approach. This feels like the way software development should be." [\[matsen.fredhutch.org\]](https://matsen.fredhutch.org/general/2026/02/10/spec-kit-walkthrough.html) [\[github.com\]](https://github.com/mnriem/spec-kit-dotnet-cli-demo)
Several other tutorials and guides appeared during the month. An article on *IntuitionLabs* (updated Feb 21) provided a guide to Spec Kit covering the philosophy behind SDD and a walkthrough of the four-phase workflow with examples. A piece by Ry Walker (Feb 22) summarized key aspects of Spec Kit, noting its agent-agnostic design and 71k-star count. Microsoft's Developer Blog post from late 2025 (*"Diving Into Spec-Driven Development with GitHub Spec Kit"* by Den Delimarsky) continued to circulate among new users. [\[intuitionlabs.ai\]](https://intuitionlabs.ai/articles/spec-driven-development-spec-kit) [\[rywalker.com\]](https://rywalker.com/research/github-spec-kit)
On **Feb 25**, the Cleveland C# .NET User Group hosted a session titled *"Spec Driven Development with GitHub Spec Kit."* The talk was delivered by Microsoft MVP **[Eric Boyd](https://ericboyd.com/)** (Cleveland-based .NET developer; not to be confused with the Microsoft AI Platform VP of the same name). Boyd covered how specs change an AI coding assistant's output, patterns for iterating and refining specs over multiple cycles, and moving from ad-hoc prompting to a repeatable spec-driven workflow. Other groups, including GDG Madison, also listed sessions on spec-driven development in late February and early March. [\[ericboyd.com\]](https://ericboyd.com/events/cleveland-csharp-user-group-february-25-2026-spec-driven-development-sdd-github-spec-kit)
On GitHub, the **Spec Kit Discussions forum** saw activity around installation troubleshooting, handling multi-feature projects with Spec Kit's branching model, and feature suggestions. One thread discussed how Spec Kit treats each spec as a short-lived artifact tied to a feature branch, which led to discussion about future support for long-running "spec of record" use cases. [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
## SDD Ecosystem
Other spec-driven development tools also saw activity in February.
AWS **Kiro** released version 0.10 on Feb 18 with two new spec workflows: a **Design-First** mode (starting from architecture/pseudocode to derive requirements) and a **Bugfix** mode (structured root-cause analysis producing a `bugfix.md` spec file). Kiro also added hunk-level code review for AI-generated changes and pre/post task hooks for custom automation. AWS expanded Kiro to GovCloud regions on Feb 17 for government compliance use cases. [\[kiro.dev\]](https://kiro.dev/changelog/)
**OpenSpec** (by Fission AI), a lightweight SDD framework, reached ~29.3k stars and nearly 2k forks. Its community published guides and comparisons during the month, including *"Spec-Driven Development Made Easy: A Practical Guide with OpenSpec."* OpenSpec emphasizes simplicity and flexibility, integrating with multiple AI coding assistants via YAML configs.
**Tessl** remained in private beta. As described by Thoughtworks writer Birgitta Boeckeler, Tessl pursues a **spec-as-source** model where specifications are maintained long-term and directly generate code files one-to-one, with generated code labeled as "do not edit." This contrasts with Spec Kit's current approach of creating specs per feature/branch. [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
An **arXiv preprint** (January 2026) categorized SDD implementations into three levels: *spec-first*, *spec-anchored*, and *spec-as-source*. Spec Kit was identified as primarily spec-first with elements of spec-anchored. Tech media published reviews including a *Vibe Coding* "GitHub Spec Kit Review (2026)" and a blog post titled *"Putting Spec Kit Through Its Paces: Radical Idea or Reinvented Waterfall?"* which concluded that SDD with AI assistance is more iterative than traditional Waterfall. [\[intuitionlabs.ai\]](https://intuitionlabs.ai/articles/spec-driven-development-spec-kit) [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
## Roadmap
**v0.2.0** was released on March 10, 2026, consolidating the month's work. It includes new extensions (Jira, Azure DevOps, review, sync), support for multiple extension catalogs and community plugins, and additional agent integrations (Tabnine CLI, Kiro CLI). [\[github.com\]](https://github.com/github/spec-kit/releases)
Areas under discussion or in progress for future development:
- **Spec lifecycle management** -- supporting longer-lived specifications that can evolve across multiple iterations, rather than being tied to a single feature branch. Users have raised this in GitHub Discussions, and the concept of "spec-anchored" development is under consideration. [\[martinfowler.com\]](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
- **CI/CD integration** -- incorporating Spec Kit verification (e.g., `speckit.checklist` or `speckit.verify`) into pull request workflows and project management tools. February's Jira and Azure DevOps extensions are a step in this direction. [\[github.com\]](https://github.com/github/spec-kit/releases)
- **Continued agent support** -- adding integrations as new AI coding assistants emerge. The project currently supports over 20 agents and has been adding new ones (Kiro CLI, Tabnine CLI) as they become available. [\[github.com\]](https://github.com/github/spec-kit)
- **Community ecosystem** -- the open extension model allows external contributors to add functionality directly. February's Jira and Azure DevOps plugins were community-contributed. The Spec Kit README now links to community walkthrough demos for .NET, Spring Boot, and other stacks. [\[github.com\]](https://github.com/github/spec-kit)

View File

@@ -1,6 +1,6 @@
[project]
name = "specify-cli"
version = "0.2.1"
version = "0.1.13"
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
requires-python = ">=3.11"
dependencies = [
@@ -13,7 +13,6 @@ dependencies = [
"truststore>=0.10.4",
"pyyaml>=6.0",
"packaging>=23.0",
"pathspec>=0.12.0",
]
[project.scripts]

View File

@@ -30,12 +30,12 @@
#
# 5. Multi-Agent Support
# - Handles agent-specific file paths and naming conventions
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Kimi Code, Antigravity or Generic
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Kiro CLI, or Antigravity
# - Can update single agents or all existing agent files
# - Creates default Claude file if no agent files exist
#
# Usage: ./update-agent-context.sh [agent_type]
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|kiro-cli|agy|bob|qodercli
# Leave empty to update all existing agent files
set -e
@@ -73,12 +73,9 @@ CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
QODER_FILE="$REPO_ROOT/QODER.md"
AMP_FILE="$REPO_ROOT/AGENTS.md"
SHAI_FILE="$REPO_ROOT/SHAI.md"
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
KIRO_FILE="$REPO_ROOT/AGENTS.md"
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
BOB_FILE="$REPO_ROOT/AGENTS.md"
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
KIMI_FILE="$REPO_ROOT/KIMI.md"
# Template file
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
@@ -651,9 +648,6 @@ update_specific_agent() {
shai)
update_agent_file "$SHAI_FILE" "SHAI"
;;
tabnine)
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
;;
kiro-cli)
update_agent_file "$KIRO_FILE" "Kiro CLI"
;;
@@ -663,18 +657,12 @@ update_specific_agent() {
bob)
update_agent_file "$BOB_FILE" "IBM Bob"
;;
vibe)
update_agent_file "$VIBE_FILE" "Mistral Vibe"
;;
kimi)
update_agent_file "$KIMI_FILE" "Kimi Code"
;;
generic)
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
;;
*)
log_error "Unknown agent type '$agent_type'"
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic"
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|kiro-cli|agy|bob|qodercli|generic"
exit 1
;;
esac
@@ -744,11 +732,6 @@ update_all_existing_agents() {
found_agent=true
fi
if [[ -f "$TABNINE_FILE" ]]; then
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
found_agent=true
fi
if [[ -f "$QODER_FILE" ]]; then
update_agent_file "$QODER_FILE" "Qoder CLI"
found_agent=true
@@ -767,16 +750,6 @@ update_all_existing_agents() {
update_agent_file "$BOB_FILE" "IBM Bob"
found_agent=true
fi
if [[ -f "$VIBE_FILE" ]]; then
update_agent_file "$VIBE_FILE" "Mistral Vibe"
found_agent=true
fi
if [[ -f "$KIMI_FILE" ]]; then
update_agent_file "$KIMI_FILE" "Kimi Code"
found_agent=true
fi
# If no agent files exist, create a default Claude file
if [[ "$found_agent" == false ]]; then
@@ -801,7 +774,8 @@ print_summary() {
fi
echo
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic]"
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|kiro-cli|agy|bob|qodercli]"
}
#==============================================================================

View File

@@ -250,7 +250,7 @@ if ($branchName.Length -gt $maxBranchLength) {
if ($hasGit) {
$branchCreated = $false
try {
git checkout -q -b $branchName 2>$null | Out-Null
git checkout -b $branchName 2>$null | Out-Null
if ($LASTEXITCODE -eq 0) {
$branchCreated = $true
}

View File

@@ -9,7 +9,7 @@ Mirrors the behavior of scripts/bash/update-agent-context.sh:
2. Plan Data Extraction
3. Agent File Management (create from template or update existing)
4. Content Generation (technology stack, recent changes, timestamp)
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, kimi, generic)
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, kiro-cli, agy, bob, qodercli)
.PARAMETER AgentType
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
@@ -25,7 +25,7 @@ Relies on common helper functions in common.ps1
#>
param(
[Parameter(Position=0)]
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','kimi','generic')]
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','kiro-cli','agy','bob','qodercli','generic')]
[string]$AgentType
)
@@ -58,12 +58,9 @@ $CODEBUDDY_FILE = Join-Path $REPO_ROOT 'CODEBUDDY.md'
$QODER_FILE = Join-Path $REPO_ROOT 'QODER.md'
$AMP_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
$SHAI_FILE = Join-Path $REPO_ROOT 'SHAI.md'
$TABNINE_FILE = Join-Path $REPO_ROOT 'TABNINE.md'
$KIRO_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
$AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
$VIBE_FILE = Join-Path $REPO_ROOT '.vibe/agents/specify-agents.md'
$KIMI_FILE = Join-Path $REPO_ROOT 'KIMI.md'
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
@@ -402,14 +399,11 @@ function Update-SpecificAgent {
'qodercli' { Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI' }
'amp' { Update-AgentFile -TargetFile $AMP_FILE -AgentName 'Amp' }
'shai' { Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI' }
'tabnine' { Update-AgentFile -TargetFile $TABNINE_FILE -AgentName 'Tabnine CLI' }
'kiro-cli' { Update-AgentFile -TargetFile $KIRO_FILE -AgentName 'Kiro CLI' }
'agy' { Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity' }
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
'vibe' { Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe' }
'kimi' { Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code' }
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic'; return $false }
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|kiro-cli|agy|bob|qodercli|generic'; return $false }
}
}
@@ -429,12 +423,9 @@ function Update-AllExistingAgents {
if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI')) { $ok = $false }; $found = $true }
if (Test-Path $QODER_FILE) { if (-not (Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI')) { $ok = $false }; $found = $true }
if (Test-Path $SHAI_FILE) { if (-not (Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI')) { $ok = $false }; $found = $true }
if (Test-Path $TABNINE_FILE) { if (-not (Update-AgentFile -TargetFile $TABNINE_FILE -AgentName 'Tabnine CLI')) { $ok = $false }; $found = $true }
if (Test-Path $KIRO_FILE) { if (-not (Update-AgentFile -TargetFile $KIRO_FILE -AgentName 'Kiro CLI')) { $ok = $false }; $found = $true }
if (Test-Path $AGY_FILE) { if (-not (Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity')) { $ok = $false }; $found = $true }
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
if (Test-Path $VIBE_FILE) { if (-not (Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe')) { $ok = $false }; $found = $true }
if (Test-Path $KIMI_FILE) { if (-not (Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code')) { $ok = $false }; $found = $true }
if (-not $found) {
Write-Info 'No existing agent files found, creating default Claude file...'
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
@@ -449,7 +440,7 @@ function Print-Summary {
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
Write-Host ''
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]'
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|kiro-cli|agy|bob|qodercli|generic]'
}
function Main {

View File

@@ -237,13 +237,6 @@ AGENT_CONFIG = {
"install_url": "https://github.com/ovh/shai",
"requires_cli": True,
},
"tabnine": {
"name": "Tabnine CLI",
"folder": ".tabnine/agent/",
"commands_subdir": "commands",
"install_url": "https://docs.tabnine.com/main/getting-started/tabnine-cli",
"requires_cli": True,
},
"agy": {
"name": "Antigravity",
"folder": ".agent/",
@@ -258,20 +251,6 @@ AGENT_CONFIG = {
"install_url": None, # IDE-based
"requires_cli": False,
},
"vibe": {
"name": "Mistral Vibe",
"folder": ".vibe/",
"commands_subdir": "prompts",
"install_url": "https://github.com/mistralai/mistral-vibe",
"requires_cli": True,
},
"kimi": {
"name": "Kimi Code",
"folder": ".kimi/",
"commands_subdir": "skills", # Kimi uses /skill:<name> with .kimi/skills/<name>/SKILL.md
"install_url": "https://code.kimi.com/",
"requires_cli": True,
},
"generic": {
"name": "Generic (bring your own agent)",
"folder": None, # Set dynamically via --ai-commands-dir
@@ -1138,7 +1117,7 @@ def install_ai_skills(project_path: Path, selected_ai: str, tracker: StepTracker
if not templates_dir.exists() or not any(templates_dir.glob("*.md")):
# Fallback: try the repo-relative path (for running from source checkout)
# This also covers agents whose extracted commands are in a different
# format (e.g. gemini/tabnine use .toml, not .md).
# format (e.g. gemini uses .toml, not .md).
script_dir = Path(__file__).parent.parent.parent # up from src/specify_cli/
fallback_dir = script_dir / "templates" / "commands"
if fallback_dir.exists() and any(fallback_dir.glob("*.md")):
@@ -1195,12 +1174,7 @@ def install_ai_skills(project_path: Path, selected_ai: str, tracker: StepTracker
# SKILL_DESCRIPTIONS lookups work.
if command_name.startswith("speckit."):
command_name = command_name[len("speckit."):]
# Kimi CLI discovers skills by directory name and invokes them as
# /skill:<name> — use dot separator to match packaging convention.
if selected_ai == "kimi":
skill_name = f"speckit.{command_name}"
else:
skill_name = f"speckit-{command_name}"
skill_name = f"speckit-{command_name}"
# Create skill directory (additive — never removes existing content)
skill_dir = skills_dir / skill_name
@@ -1306,7 +1280,6 @@ def init(
specify init --here --ai claude # Alternative syntax for current directory
specify init --here --ai codex
specify init --here --ai codebuddy
specify init --here --ai vibe # Initialize with Mistral Vibe support
specify init --here
specify init --here --force # Skip confirmation when current directory not empty
specify init my-project --ai claude --ai-skills # Install agent skills
@@ -1784,13 +1757,6 @@ extension_app = typer.Typer(
)
app.add_typer(extension_app, name="extension")
catalog_app = typer.Typer(
name="catalog",
help="Manage extension catalogs",
add_completion=False,
)
extension_app.add_typer(catalog_app, name="catalog")
def get_speckit_version() -> str:
"""Get current spec-kit version."""
@@ -1856,181 +1822,6 @@ def extension_list(
console.print(" [cyan]specify extension add <name>[/cyan]")
@catalog_app.command("list")
def catalog_list():
"""List all active extension catalogs."""
from .extensions import ExtensionCatalog, ValidationError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
catalog = ExtensionCatalog(project_root)
try:
active_catalogs = catalog.get_active_catalogs()
except ValidationError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
console.print("\n[bold cyan]Active Extension Catalogs:[/bold cyan]\n")
for entry in active_catalogs:
install_str = (
"[green]install allowed[/green]"
if entry.install_allowed
else "[yellow]discovery only[/yellow]"
)
console.print(f" [bold]{entry.name}[/bold] (priority {entry.priority})")
if entry.description:
console.print(f" {entry.description}")
console.print(f" URL: {entry.url}")
console.print(f" Install: {install_str}")
console.print()
config_path = project_root / ".specify" / "extension-catalogs.yml"
user_config_path = Path.home() / ".specify" / "extension-catalogs.yml"
if os.environ.get("SPECKIT_CATALOG_URL"):
console.print("[dim]Catalog configured via SPECKIT_CATALOG_URL environment variable.[/dim]")
else:
try:
proj_loaded = config_path.exists() and catalog._load_catalog_config(config_path) is not None
except ValidationError:
proj_loaded = False
if proj_loaded:
console.print(f"[dim]Config: {config_path.relative_to(project_root)}[/dim]")
else:
try:
user_loaded = user_config_path.exists() and catalog._load_catalog_config(user_config_path) is not None
except ValidationError:
user_loaded = False
if user_loaded:
console.print("[dim]Config: ~/.specify/extension-catalogs.yml[/dim]")
else:
console.print("[dim]Using built-in default catalog stack.[/dim]")
console.print(
"[dim]Add .specify/extension-catalogs.yml to customize.[/dim]"
)
@catalog_app.command("add")
def catalog_add(
url: str = typer.Argument(help="Catalog URL (must use HTTPS)"),
name: str = typer.Option(..., "--name", help="Catalog name"),
priority: int = typer.Option(10, "--priority", help="Priority (lower = higher priority)"),
install_allowed: bool = typer.Option(
False, "--install-allowed/--no-install-allowed",
help="Allow extensions from this catalog to be installed",
),
description: str = typer.Option("", "--description", help="Description of the catalog"),
):
"""Add a catalog to .specify/extension-catalogs.yml."""
from .extensions import ExtensionCatalog, ValidationError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate URL
tmp_catalog = ExtensionCatalog(project_root)
try:
tmp_catalog._validate_catalog_url(url)
except ValidationError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
config_path = specify_dir / "extension-catalogs.yml"
# Load existing config
if config_path.exists():
try:
config = yaml.safe_load(config_path.read_text()) or {}
except Exception as e:
console.print(f"[red]Error:[/red] Failed to read {config_path}: {e}")
raise typer.Exit(1)
else:
config = {}
catalogs = config.get("catalogs", [])
if not isinstance(catalogs, list):
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
raise typer.Exit(1)
# Check for duplicate name
for existing in catalogs:
if isinstance(existing, dict) and existing.get("name") == name:
console.print(f"[yellow]Warning:[/yellow] A catalog named '{name}' already exists.")
console.print("Use 'specify extension catalog remove' first, or choose a different name.")
raise typer.Exit(1)
catalogs.append({
"name": name,
"url": url,
"priority": priority,
"install_allowed": install_allowed,
"description": description,
})
config["catalogs"] = catalogs
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
install_label = "install allowed" if install_allowed else "discovery only"
console.print(f"\n[green]✓[/green] Added catalog '[bold]{name}[/bold]' ({install_label})")
console.print(f" URL: {url}")
console.print(f" Priority: {priority}")
console.print(f"\nConfig saved to {config_path.relative_to(project_root)}")
@catalog_app.command("remove")
def catalog_remove(
name: str = typer.Argument(help="Catalog name to remove"),
):
"""Remove a catalog from .specify/extension-catalogs.yml."""
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
config_path = specify_dir / "extension-catalogs.yml"
if not config_path.exists():
console.print("[red]Error:[/red] No catalog config found. Nothing to remove.")
raise typer.Exit(1)
try:
config = yaml.safe_load(config_path.read_text()) or {}
except Exception:
console.print("[red]Error:[/red] Failed to read catalog config.")
raise typer.Exit(1)
catalogs = config.get("catalogs", [])
if not isinstance(catalogs, list):
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
raise typer.Exit(1)
original_count = len(catalogs)
catalogs = [c for c in catalogs if isinstance(c, dict) and c.get("name") != name]
if len(catalogs) == original_count:
console.print(f"[red]Error:[/red] Catalog '{name}' not found.")
raise typer.Exit(1)
config["catalogs"] = catalogs
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
console.print(f"[green]✓[/green] Removed catalog '{name}'")
if not catalogs:
console.print("\n[dim]No catalogs remain in config. Built-in defaults will be used.[/dim]")
@extension_app.command("add")
def extension_add(
extension: str = typer.Argument(help="Extension name or path"),
@@ -2119,19 +1910,6 @@ def extension_add(
console.print(" specify extension search")
raise typer.Exit(1)
# Enforce install_allowed policy
if not ext_info.get("_install_allowed", True):
catalog_name = ext_info.get("_catalog_name", "community")
console.print(
f"[red]Error:[/red] '{extension}' is available in the "
f"'{catalog_name}' catalog but installation is not allowed from that catalog."
)
console.print(
f"\nTo enable installation, add '{extension}' to an approved catalog "
f"(install_allowed: true) in .specify/extension-catalogs.yml."
)
raise typer.Exit(1)
# Download extension ZIP
console.print(f"Downloading {ext_info['name']} v{ext_info.get('version', 'unknown')}...")
zip_path = catalog.download_extension(extension)
@@ -2276,15 +2054,6 @@ def extension_search(
tags_str = ", ".join(ext['tags'])
console.print(f" [dim]Tags:[/dim] {tags_str}")
# Source catalog
catalog_name = ext.get("_catalog_name", "")
install_allowed = ext.get("_install_allowed", True)
if catalog_name:
if install_allowed:
console.print(f" [dim]Catalog:[/dim] {catalog_name}")
else:
console.print(f" [dim]Catalog:[/dim] {catalog_name} [yellow](discovery only — not installable)[/yellow]")
# Stats
stats = []
if ext.get('downloads') is not None:
@@ -2298,15 +2067,8 @@ def extension_search(
if ext.get('repository'):
console.print(f" [dim]Repository:[/dim] {ext['repository']}")
# Install command (show warning if not installable)
if install_allowed:
console.print(f"\n [cyan]Install:[/cyan] specify extension add {ext['id']}")
else:
console.print(f"\n [yellow]⚠[/yellow] Not directly installable from '{catalog_name}'.")
console.print(
f" Add to an approved catalog with install_allowed: true, "
f"or install from a ZIP URL: specify extension add {ext['id']} --from <zip-url>"
)
# Install command
console.print(f"\n [cyan]Install:[/cyan] specify extension add {ext['id']}")
console.print()
except ExtensionError as e:
@@ -2355,12 +2117,6 @@ def extension_info(
# Author and License
console.print(f"[dim]Author:[/dim] {ext_info.get('author', 'Unknown')}")
console.print(f"[dim]License:[/dim] {ext_info.get('license', 'Unknown')}")
# Source catalog
if ext_info.get("_catalog_name"):
install_allowed = ext_info.get("_install_allowed", True)
install_note = "" if install_allowed else " [yellow](discovery only)[/yellow]"
console.print(f"[dim]Source catalog:[/dim] {ext_info['_catalog_name']}{install_note}")
console.print()
# Requirements
@@ -2417,21 +2173,12 @@ def extension_info(
# Installation status and command
is_installed = manager.registry.is_installed(ext_info['id'])
install_allowed = ext_info.get("_install_allowed", True)
if is_installed:
console.print("[green]✓ Installed[/green]")
console.print(f"\nTo remove: specify extension remove {ext_info['id']}")
elif install_allowed:
else:
console.print("[yellow]Not installed[/yellow]")
console.print(f"\n[cyan]Install:[/cyan] specify extension add {ext_info['id']}")
else:
catalog_name = ext_info.get("_catalog_name", "community")
console.print("[yellow]Not installed[/yellow]")
console.print(
f"\n[yellow]⚠[/yellow] '{ext_info['id']}' is available in the '{catalog_name}' catalog "
f"but not in your approved catalog. Add it to .specify/extension-catalogs.yml "
f"with install_allowed: true to enable installation."
)
except ExtensionError as e:
console.print(f"\n[red]Error:[/red] {e}")

View File

@@ -8,18 +8,14 @@ without bloating the core framework.
import json
import hashlib
import os
import tempfile
import zipfile
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Dict, List, Any, Callable, Set
from typing import Optional, Dict, List, Any
from datetime import datetime, timezone
import re
import pathspec
import yaml
from packaging import version as pkg_version
from packaging.specifiers import SpecifierSet, InvalidSpecifier
@@ -40,16 +36,6 @@ class CompatibilityError(ExtensionError):
pass
@dataclass
class CatalogEntry:
"""Represents a single catalog entry in the catalog stack."""
url: str
name: str
priority: int
install_allowed: bool
description: str = ""
class ExtensionManifest:
"""Represents and validates an extension manifest (extension.yml)."""
@@ -282,70 +268,6 @@ class ExtensionManager:
self.extensions_dir = project_root / ".specify" / "extensions"
self.registry = ExtensionRegistry(self.extensions_dir)
@staticmethod
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
"""Load .extensionignore and return an ignore function for shutil.copytree.
The .extensionignore file uses .gitignore-compatible patterns (one per line).
Lines starting with '#' are comments. Blank lines are ignored.
The .extensionignore file itself is always excluded.
Pattern semantics mirror .gitignore:
- '*' matches anything except '/'
- '**' matches zero or more directories
- '?' matches any single character except '/'
- Trailing '/' restricts a pattern to directories only
- Patterns with '/' (other than trailing) are anchored to the root
- '!' negates a previously excluded pattern
Args:
source_dir: Path to the extension source directory
Returns:
An ignore function compatible with shutil.copytree, or None
if no .extensionignore file exists.
"""
ignore_file = source_dir / ".extensionignore"
if not ignore_file.exists():
return None
lines: List[str] = ignore_file.read_text().splitlines()
# Normalise backslashes in patterns so Windows-authored files work
normalised: List[str] = []
for line in lines:
stripped = line.strip()
if stripped and not stripped.startswith("#"):
normalised.append(stripped.replace("\\", "/"))
else:
# Preserve blanks/comments so pathspec line numbers stay stable
normalised.append(line)
# Always ignore the .extensionignore file itself
normalised.append(".extensionignore")
spec = pathspec.GitIgnoreSpec.from_lines(normalised)
def _ignore(directory: str, entries: List[str]) -> Set[str]:
ignored: Set[str] = set()
rel_dir = Path(directory).relative_to(source_dir)
for entry in entries:
rel_path = str(rel_dir / entry) if str(rel_dir) != "." else entry
# Normalise to forward slashes for consistent matching
rel_path_fwd = rel_path.replace("\\", "/")
entry_full = Path(directory) / entry
if entry_full.is_dir():
# Append '/' so directory-only patterns (e.g. tests/) match
if spec.match_file(rel_path_fwd + "/"):
ignored.add(entry)
else:
if spec.match_file(rel_path_fwd):
ignored.add(entry)
return ignored
return _ignore
def check_compatibility(
self,
manifest: ExtensionManifest,
@@ -419,8 +341,7 @@ class ExtensionManager:
if dest_dir.exists():
shutil.rmtree(dest_dir)
ignore_fn = self._load_extensionignore(source_dir)
shutil.copytree(source_dir, dest_dir, ignore=ignore_fn)
shutil.copytree(source_dir, dest_dir)
# Register commands with AI agents
registered_commands = {}
@@ -702,12 +623,6 @@ class CommandRegistrar:
"args": "$ARGUMENTS",
"extension": ".md"
},
"codex": {
"dir": ".codex/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"windsurf": {
"dir": ".windsurf/workflows",
"format": "markdown",
@@ -727,7 +642,7 @@ class CommandRegistrar:
"extension": ".md"
},
"roo": {
"dir": ".roo/commands",
"dir": ".roo/rules",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
@@ -762,23 +677,11 @@ class CommandRegistrar:
"args": "$ARGUMENTS",
"extension": ".md"
},
"tabnine": {
"dir": ".tabnine/agent/commands",
"format": "toml",
"args": "{{args}}",
"extension": ".toml"
},
"bob": {
"dir": ".bob/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kimi": {
"dir": ".kimi/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md"
}
}
@@ -972,7 +875,6 @@ class CommandRegistrar:
# Write command file
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
dest_file.parent.mkdir(parents=True, exist_ok=True)
dest_file.write_text(output)
# Generate companion .prompt.md for Copilot agents
@@ -984,7 +886,6 @@ class CommandRegistrar:
# Register aliases
for alias in cmd_info.get("aliases", []):
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
alias_file.parent.mkdir(parents=True, exist_ok=True)
alias_file.write_text(output)
# Generate companion .prompt.md for alias too
if agent_name == "copilot":
@@ -1069,7 +970,6 @@ class ExtensionCatalog:
"""Manages extension catalog fetching, caching, and searching."""
DEFAULT_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
COMMUNITY_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
CACHE_DURATION = 3600 # 1 hour in seconds
def __init__(self, project_root: Path):
@@ -1084,109 +984,43 @@ class ExtensionCatalog:
self.cache_file = self.cache_dir / "catalog.json"
self.cache_metadata_file = self.cache_dir / "catalog-metadata.json"
def _validate_catalog_url(self, url: str) -> None:
"""Validate that a catalog URL uses HTTPS (localhost HTTP allowed).
def get_catalog_url(self) -> str:
"""Get catalog URL from config or use default.
Args:
url: URL to validate
Checks in order:
1. SPECKIT_CATALOG_URL environment variable
2. Default catalog URL
Returns:
URL to fetch catalog from
Raises:
ValidationError: If URL is invalid or uses non-HTTPS scheme
ValidationError: If custom URL is invalid (non-HTTPS)
"""
import os
import sys
from urllib.parse import urlparse
parsed = urlparse(url)
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
raise ValidationError(
f"Catalog URL must use HTTPS (got {parsed.scheme}://). "
"HTTP is only allowed for localhost."
)
if not parsed.netloc:
raise ValidationError("Catalog URL must be a valid URL with a host.")
def _load_catalog_config(self, config_path: Path) -> Optional[List[CatalogEntry]]:
"""Load catalog stack configuration from a YAML file.
Args:
config_path: Path to extension-catalogs.yml
Returns:
Ordered list of CatalogEntry objects, or None if file doesn't exist
or contains no valid catalog entries.
Raises:
ValidationError: If any catalog entry has an invalid URL,
the file cannot be parsed, or a priority value is invalid.
"""
if not config_path.exists():
return None
try:
data = yaml.safe_load(config_path.read_text()) or {}
except (yaml.YAMLError, OSError) as e:
raise ValidationError(
f"Failed to read catalog config {config_path}: {e}"
)
catalogs_data = data.get("catalogs", [])
if not catalogs_data:
return None
if not isinstance(catalogs_data, list):
raise ValidationError(
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
)
entries: List[CatalogEntry] = []
for idx, item in enumerate(catalogs_data):
if not isinstance(item, dict):
raise ValidationError(
f"Invalid catalog entry at index {idx}: expected a mapping, got {type(item).__name__}"
)
url = str(item.get("url", "")).strip()
if not url:
continue
self._validate_catalog_url(url)
try:
priority = int(item.get("priority", idx + 1))
except (TypeError, ValueError):
raise ValidationError(
f"Invalid priority for catalog '{item.get('name', idx + 1)}': "
f"expected integer, got {item.get('priority')!r}"
)
raw_install = item.get("install_allowed", False)
if isinstance(raw_install, str):
install_allowed = raw_install.strip().lower() in ("true", "yes", "1")
else:
install_allowed = bool(raw_install)
entries.append(CatalogEntry(
url=url,
name=str(item.get("name", f"catalog-{idx + 1}")),
priority=priority,
install_allowed=install_allowed,
description=str(item.get("description", "")),
))
entries.sort(key=lambda e: e.priority)
return entries if entries else None
def get_active_catalogs(self) -> List[CatalogEntry]:
"""Get the ordered list of active catalogs.
Resolution order:
1. SPECKIT_CATALOG_URL env var — single catalog replacing all defaults
2. Project-level .specify/extension-catalogs.yml
3. User-level ~/.specify/extension-catalogs.yml
4. Built-in default stack (default + community)
Returns:
List of CatalogEntry objects sorted by priority (ascending)
Raises:
ValidationError: If a catalog URL is invalid
"""
import sys
# 1. SPECKIT_CATALOG_URL env var replaces all defaults for backward compat
# Environment variable override (useful for testing)
if env_value := os.environ.get("SPECKIT_CATALOG_URL"):
catalog_url = env_value.strip()
self._validate_catalog_url(catalog_url)
parsed = urlparse(catalog_url)
# Require HTTPS for security (prevent man-in-the-middle attacks)
# Allow http://localhost for local development/testing
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
raise ValidationError(
f"Invalid SPECKIT_CATALOG_URL: must use HTTPS (got {parsed.scheme}://). "
"HTTP is only allowed for localhost."
)
if not parsed.netloc:
raise ValidationError(
"Invalid SPECKIT_CATALOG_URL: must be a valid URL with a host."
)
# Warn users when using a non-default catalog (once per instance)
if catalog_url != self.DEFAULT_CATALOG_URL:
if not getattr(self, "_non_default_catalog_warning_shown", False):
print(
@@ -1195,163 +1029,11 @@ class ExtensionCatalog:
file=sys.stderr,
)
self._non_default_catalog_warning_shown = True
return [CatalogEntry(url=catalog_url, name="custom", priority=1, install_allowed=True, description="Custom catalog via SPECKIT_CATALOG_URL")]
# 2. Project-level config overrides all defaults
project_config_path = self.project_root / ".specify" / "extension-catalogs.yml"
catalogs = self._load_catalog_config(project_config_path)
if catalogs is not None:
return catalogs
return catalog_url
# 3. User-level config
user_config_path = Path.home() / ".specify" / "extension-catalogs.yml"
catalogs = self._load_catalog_config(user_config_path)
if catalogs is not None:
return catalogs
# 4. Built-in default stack
return [
CatalogEntry(url=self.DEFAULT_CATALOG_URL, name="default", priority=1, install_allowed=True, description="Built-in catalog of installable extensions"),
CatalogEntry(url=self.COMMUNITY_CATALOG_URL, name="community", priority=2, install_allowed=False, description="Community-contributed extensions (discovery only)"),
]
def get_catalog_url(self) -> str:
"""Get the primary catalog URL.
Returns the URL of the highest-priority catalog. Kept for backward
compatibility. Use get_active_catalogs() for full multi-catalog support.
Returns:
URL of the primary catalog
Raises:
ValidationError: If a catalog URL is invalid
"""
active = self.get_active_catalogs()
return active[0].url if active else self.DEFAULT_CATALOG_URL
def _fetch_single_catalog(self, entry: CatalogEntry, force_refresh: bool = False) -> Dict[str, Any]:
"""Fetch a single catalog with per-URL caching.
For the DEFAULT_CATALOG_URL, uses legacy cache files (self.cache_file /
self.cache_metadata_file) for backward compatibility. For all other URLs,
uses URL-hash-based cache files in self.cache_dir.
Args:
entry: CatalogEntry describing the catalog to fetch
force_refresh: If True, bypass cache
Returns:
Catalog data dictionary
Raises:
ExtensionError: If catalog cannot be fetched or has invalid format
"""
import urllib.request
import urllib.error
# Determine cache file paths (backward compat for default catalog)
if entry.url == self.DEFAULT_CATALOG_URL:
cache_file = self.cache_file
cache_meta_file = self.cache_metadata_file
is_valid = not force_refresh and self.is_cache_valid()
else:
url_hash = hashlib.sha256(entry.url.encode()).hexdigest()[:16]
cache_file = self.cache_dir / f"catalog-{url_hash}.json"
cache_meta_file = self.cache_dir / f"catalog-{url_hash}-metadata.json"
is_valid = False
if not force_refresh and cache_file.exists() and cache_meta_file.exists():
try:
metadata = json.loads(cache_meta_file.read_text())
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
if cached_at.tzinfo is None:
cached_at = cached_at.replace(tzinfo=timezone.utc)
age = (datetime.now(timezone.utc) - cached_at).total_seconds()
is_valid = age < self.CACHE_DURATION
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
# If metadata is invalid or missing expected fields, treat cache as invalid
pass
# Use cache if valid
if is_valid:
try:
return json.loads(cache_file.read_text())
except json.JSONDecodeError:
pass
# Fetch from network
try:
with urllib.request.urlopen(entry.url, timeout=10) as response:
catalog_data = json.loads(response.read())
if "schema_version" not in catalog_data or "extensions" not in catalog_data:
raise ExtensionError(f"Invalid catalog format from {entry.url}")
# Save to cache
self.cache_dir.mkdir(parents=True, exist_ok=True)
cache_file.write_text(json.dumps(catalog_data, indent=2))
cache_meta_file.write_text(json.dumps({
"cached_at": datetime.now(timezone.utc).isoformat(),
"catalog_url": entry.url,
}, indent=2))
return catalog_data
except urllib.error.URLError as e:
raise ExtensionError(f"Failed to fetch catalog from {entry.url}: {e}")
except json.JSONDecodeError as e:
raise ExtensionError(f"Invalid JSON in catalog from {entry.url}: {e}")
def _get_merged_extensions(self, force_refresh: bool = False) -> List[Dict[str, Any]]:
"""Fetch and merge extensions from all active catalogs.
Higher-priority (lower priority number) catalogs win on conflicts
(same extension id in two catalogs). Each extension dict is annotated with:
- _catalog_name: name of the source catalog
- _install_allowed: whether installation is allowed from this catalog
Catalogs that fail to fetch are skipped. Raises ExtensionError only if
ALL catalogs fail.
Args:
force_refresh: If True, bypass all caches
Returns:
List of merged extension dicts
Raises:
ExtensionError: If all catalogs fail to fetch
"""
import sys
active_catalogs = self.get_active_catalogs()
merged: Dict[str, Dict[str, Any]] = {}
any_success = False
for catalog_entry in active_catalogs:
try:
catalog_data = self._fetch_single_catalog(catalog_entry, force_refresh)
any_success = True
except ExtensionError as e:
print(
f"Warning: Could not fetch catalog '{catalog_entry.name}': {e}",
file=sys.stderr,
)
continue
for ext_id, ext_data in catalog_data.get("extensions", {}).items():
if ext_id not in merged: # Higher-priority catalog wins
merged[ext_id] = {
**ext_data,
"id": ext_id,
"_catalog_name": catalog_entry.name,
"_install_allowed": catalog_entry.install_allowed,
}
if not any_success and active_catalogs:
raise ExtensionError("Failed to fetch any extension catalog")
return list(merged.values())
# TODO: Support custom catalogs from .specify/extension-catalogs.yml
return self.DEFAULT_CATALOG_URL
def is_cache_valid(self) -> bool:
"""Check if cached catalog is still valid.
@@ -1365,11 +1047,9 @@ class ExtensionCatalog:
try:
metadata = json.loads(self.cache_metadata_file.read_text())
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
if cached_at.tzinfo is None:
cached_at = cached_at.replace(tzinfo=timezone.utc)
age_seconds = (datetime.now(timezone.utc) - cached_at).total_seconds()
return age_seconds < self.CACHE_DURATION
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
except (json.JSONDecodeError, ValueError, KeyError):
return False
def fetch_catalog(self, force_refresh: bool = False) -> Dict[str, Any]:
@@ -1430,7 +1110,7 @@ class ExtensionCatalog:
author: Optional[str] = None,
verified_only: bool = False,
) -> List[Dict[str, Any]]:
"""Search catalog for extensions across all active catalogs.
"""Search catalog for extensions.
Args:
query: Search query (searches name, description, tags)
@@ -1439,16 +1119,14 @@ class ExtensionCatalog:
verified_only: If True, show only verified extensions
Returns:
List of matching extension metadata, each annotated with
``_catalog_name`` and ``_install_allowed`` from its source catalog.
List of matching extension metadata
"""
all_extensions = self._get_merged_extensions()
catalog = self.fetch_catalog()
extensions = catalog.get("extensions", {})
results = []
for ext_data in all_extensions:
ext_id = ext_data["id"]
for ext_id, ext_data in extensions.items():
# Apply filters
if verified_only and not ext_data.get("verified", False):
continue
@@ -1474,26 +1152,25 @@ class ExtensionCatalog:
if query_lower not in searchable_text:
continue
results.append(ext_data)
results.append({"id": ext_id, **ext_data})
return results
def get_extension_info(self, extension_id: str) -> Optional[Dict[str, Any]]:
"""Get detailed information about a specific extension.
Searches all active catalogs in priority order.
Args:
extension_id: ID of the extension
Returns:
Extension metadata (annotated with ``_catalog_name`` and
``_install_allowed``) or None if not found.
Extension metadata or None if not found
"""
all_extensions = self._get_merged_extensions()
for ext_data in all_extensions:
if ext_data["id"] == extension_id:
return ext_data
catalog = self.fetch_catalog()
extensions = catalog.get("extensions", {})
if extension_id in extensions:
return {"id": extension_id, **extensions[extension_id]}
return None
def download_extension(self, extension_id: str, target_dir: Optional[Path] = None) -> Path:
@@ -1553,18 +1230,11 @@ class ExtensionCatalog:
raise ExtensionError(f"Failed to save extension ZIP: {e}")
def clear_cache(self):
"""Clear the catalog cache (both legacy and URL-hash-based files)."""
"""Clear the catalog cache."""
if self.cache_file.exists():
self.cache_file.unlink()
if self.cache_metadata_file.exists():
self.cache_metadata_file.unlink()
# Also clear any per-URL hash-based cache files
if self.cache_dir.exists():
for extra_cache in self.cache_dir.glob("catalog-*.json"):
if extra_cache != self.cache_file:
extra_cache.unlink(missing_ok=True)
for extra_meta in self.cache_dir.glob("catalog-*-metadata.json"):
extra_meta.unlink(missing_ok=True)
class ConfigManager:

View File

@@ -13,40 +13,6 @@ $ARGUMENTS
You **MUST** consider the user input before proceeding (if not empty).
## Pre-Execution Checks
**Check for extension hooks (before implementation)**:
- Check if `.specify/extensions.yml` exists in the project root.
- If it exists, read it and look for entries under the `hooks.before_implement` key
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
- Filter to only hooks where `enabled: true`
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
- For each executable hook, output the following based on its `optional` flag:
- **Optional hook** (`optional: true`):
```
## Extension Hooks
**Optional Pre-Hook**: {extension}
Command: `/{command}`
Description: {description}
Prompt: {prompt}
To execute: `/{command}`
```
- **Mandatory hook** (`optional: false`):
```
## Extension Hooks
**Automatic Pre-Hook**: {extension}
Executing: `/{command}`
EXECUTE_COMMAND: {command}
Wait for the result of the hook command before proceeding to the Outline.
```
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
## Outline
1. Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
@@ -122,7 +88,7 @@ You **MUST** consider the user input before proceeding (if not empty).
- **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
- **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
- **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `*.dll`, `autom4te.cache/`, `config.status`, `config.log`, `.idea/`, `*.log`, `.env*`
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `autom4te.cache/`, `config.status`, `config.log`, `.idea/`, `*.log`, `.env*`
- **Swift**: `.build/`, `DerivedData/`, `*.swiftpm/`, `Packages/`
- **R**: `.Rproj.user/`, `.Rhistory`, `.RData`, `.Ruserdata`, `*.Rproj`, `packrat/`, `renv/`
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
@@ -170,32 +136,3 @@ You **MUST** consider the user input before proceeding (if not empty).
- Report final status with summary of completed work
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit.tasks` first to regenerate the task list.
10. **Check for extension hooks**: After completion validation, check if `.specify/extensions.yml` exists in the project root.
- If it exists, read it and look for entries under the `hooks.after_implement` key
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
- Filter to only hooks where `enabled: true`
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
- For each executable hook, output the following based on its `optional` flag:
- **Optional hook** (`optional: true`):
```
## Extension Hooks
**Optional Hook**: {extension}
Command: `/{command}`
Description: {description}
Prompt: {prompt}
To execute: `/{command}`
```
- **Mandatory hook** (`optional: false`):
```
## Extension Hooks
**Automatic Hook**: {extension}
Executing: `/{command}`
EXECUTE_COMMAND: {command}
```
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently

View File

@@ -9,8 +9,8 @@ handoffs:
prompt: Clarify specification requirements
send: true
scripts:
sh: scripts/bash/create-new-feature.sh "{ARGS}"
ps: scripts/powershell/create-new-feature.ps1 "{ARGS}"
sh: scripts/bash/create-new-feature.sh --json "{ARGS}"
ps: scripts/powershell/create-new-feature.ps1 -Json "{ARGS}"
---
## User Input
@@ -39,14 +39,33 @@ Given that feature description, do this:
- "Create a dashboard for analytics" → "analytics-dashboard"
- "Fix payment processing timeout bug" → "fix-payment-timeout"
2. **Create the feature branch** by running the script with `--short-name` (and `--json`), and do NOT pass `--number` (the script auto-detects the next globally available number across all branches and spec directories):
2. **Check for existing branches before creating new one**:
- Bash example: `{SCRIPT} --json --short-name "user-auth" "Add user authentication"`
- PowerShell example: `{SCRIPT} -Json -ShortName "user-auth" "Add user authentication"`
a. First, fetch all remote branches to ensure we have the latest information:
```bash
git fetch --all --prune
```
b. Find the highest feature number across all sources for the short-name:
- Remote branches: `git ls-remote --heads origin | grep -E 'refs/heads/[0-9]+-<short-name>$'`
- Local branches: `git branch | grep -E '^[* ]*[0-9]+-<short-name>$'`
- Specs directories: Check for directories matching `specs/[0-9]+-<short-name>`
c. Determine the next available number:
- Extract all numbers from all three sources
- Find the highest number N
- Use N+1 for the new branch number
d. Run the script `{SCRIPT}` with the calculated number and short-name:
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
- Bash example: `{SCRIPT} --json --number 5 --short-name "user-auth" "Add user authentication"`
- PowerShell example: `{SCRIPT} -Json -Number 5 -ShortName "user-auth" "Add user authentication"`
**IMPORTANT**:
- Do NOT pass `--number` — the script determines the correct next number automatically
- Always include the JSON flag (`--json` for Bash, `-Json` for PowerShell) so the output can be parsed reliably
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
- Only match branches/directories with the exact short-name pattern
- If no existing branches/directories found with this short-name, start with number 1
- You must only ever run this script once per feature
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
- The JSON output will contain BRANCH_NAME and SPEC_FILE paths

View File

@@ -22,40 +22,6 @@ $ARGUMENTS
You **MUST** consider the user input before proceeding (if not empty).
## Pre-Execution Checks
**Check for extension hooks (before tasks generation)**:
- Check if `.specify/extensions.yml` exists in the project root.
- If it exists, read it and look for entries under the `hooks.before_tasks` key
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
- Filter to only hooks where `enabled: true`
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
- For each executable hook, output the following based on its `optional` flag:
- **Optional hook** (`optional: true`):
```
## Extension Hooks
**Optional Pre-Hook**: {extension}
Command: `/{command}`
Description: {description}
Prompt: {prompt}
To execute: `/{command}`
```
- **Mandatory hook** (`optional: false`):
```
## Extension Hooks
**Automatic Pre-Hook**: {extension}
Executing: `/{command}`
EXECUTE_COMMAND: {command}
Wait for the result of the hook command before proceeding to the Outline.
```
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
## Outline
1. **Setup**: Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
@@ -97,35 +63,6 @@ You **MUST** consider the user input before proceeding (if not empty).
- Suggested MVP scope (typically just User Story 1)
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
6. **Check for extension hooks**: After tasks.md is generated, check if `.specify/extensions.yml` exists in the project root.
- If it exists, read it and look for entries under the `hooks.after_tasks` key
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
- Filter to only hooks where `enabled: true`
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
- For each executable hook, output the following based on its `optional` flag:
- **Optional hook** (`optional: true`):
```
## Extension Hooks
**Optional Hook**: {extension}
Command: `/{command}`
Description: {description}
Prompt: {prompt}
To execute: `/{command}`
```
- **Mandatory hook** (`optional: false`):
```
## Extension Hooks
**Automatic Hook**: {extension}
Executing: `/{command}`
EXECUTE_COMMAND: {command}
```
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
Context for task generation: {ARGS}
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.

View File

@@ -1,34 +0,0 @@
hooks:
before_implement:
- id: pre_test
enabled: true
optional: false
extension: "test-extension"
command: "pre_implement_test"
description: "Test before implement hook execution"
after_implement:
- id: post_test
enabled: true
optional: true
extension: "test-extension"
command: "post_implement_test"
description: "Test after implement hook execution"
prompt: "Would you like to run the post-implement test?"
before_tasks:
- id: pre_tasks_test
enabled: true
optional: false
extension: "test-extension"
command: "pre_tasks_test"
description: "Test before tasks hook execution"
after_tasks:
- id: post_tasks_test
enabled: true
optional: true
extension: "test-extension"
command: "post_tasks_test"
description: "Test after tasks hook execution"
prompt: "Would you like to run the post-tasks test?"

View File

@@ -1,30 +0,0 @@
# Testing Extension Hooks
This directory contains a mock project to verify that LLM agents correctly identify and execute hook commands defined in `.specify/extensions.yml`.
## Test 1: Testing `before_tasks` and `after_tasks`
1. Open a chat with an LLM (like GitHub Copilot) in this project.
2. Ask it to generate tasks for the current directory:
> "Please follow `/speckit.tasks` for the `./tests/hooks` directory."
3. **Expected Behavior**:
- Before doing any generation, the LLM should notice the `AUTOMATIC Pre-Hook` in `.specify/extensions.yml` under `before_tasks`.
- It should state it is executing `EXECUTE_COMMAND: pre_tasks_test`.
- It should then proceed to read the `.md` docs and produce a `tasks.md`.
- After generation, it should output the optional `after_tasks` hook (`post_tasks_test`) block, asking if you want to run it.
## Test 2: Testing `before_implement` and `after_implement`
*(Requires `tasks.md` from Test 1 to exist)*
1. In the same (or new) chat, ask the LLM to implement the tasks:
> "Please follow `/speckit.implement` for the `./tests/hooks` directory."
2. **Expected Behavior**:
- The LLM should first check for `before_implement` hooks.
- It should state it is executing `EXECUTE_COMMAND: pre_implement_test` BEFORE doing any actual task execution.
- It should evaluate the checklists and execute the code writing tasks.
- Upon completion, it should output the optional `after_implement` hook (`post_implement_test`) block.
## How it works
The templates for these commands in `templates/commands/tasks.md` and `templates/commands/implement.md` contains strict ordered lists. The new `before_*` hooks are explicitly formulated in a **Pre-Execution Checks** section prior to the outline to ensure they're evaluated first without breaking template step numbers.

View File

@@ -1,3 +0,0 @@
# Test Setup for Hooks
This feature is designed to test if LLMs correctly invoke Spec Kit extensions hooks when generating tasks and implementing code.

View File

@@ -1 +0,0 @@
- **User Story 1:** I want a test script that prints "Hello hooks!".

View File

@@ -1 +0,0 @@
- [ ] T001 [US1] Create script that prints 'Hello hooks!' in hello.py

View File

@@ -28,13 +28,6 @@ class TestAgentConfigConsistency:
assert cfg["kiro-cli"]["dir"] == ".kiro/prompts"
assert "q" not in cfg
def test_extension_registrar_includes_codex(self):
"""Extension command registrar should include codex targeting .codex/prompts."""
cfg = CommandRegistrar.AGENT_CONFIGS
assert "codex" in cfg
assert cfg["codex"]["dir"] == ".codex/prompts"
def test_release_agent_lists_include_kiro_cli_and_exclude_q(self):
"""Bash and PowerShell release scripts should agree on agent key set for Kiro."""
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
@@ -104,125 +97,3 @@ class TestAgentConfigConsistency:
assert "kiro-cli" in pwsh_text
assert "Amazon Q Developer CLI" not in bash_text
assert "Amazon Q Developer CLI" not in pwsh_text
# --- Tabnine CLI consistency checks ---
def test_runtime_config_includes_tabnine(self):
"""AGENT_CONFIG should include tabnine with correct folder and subdir."""
assert "tabnine" in AGENT_CONFIG
assert AGENT_CONFIG["tabnine"]["folder"] == ".tabnine/agent/"
assert AGENT_CONFIG["tabnine"]["commands_subdir"] == "commands"
assert AGENT_CONFIG["tabnine"]["requires_cli"] is True
assert AGENT_CONFIG["tabnine"]["install_url"] is not None
def test_extension_registrar_includes_tabnine(self):
"""CommandRegistrar.AGENT_CONFIGS should include tabnine with correct TOML config."""
from specify_cli.extensions import CommandRegistrar
assert "tabnine" in CommandRegistrar.AGENT_CONFIGS
cfg = CommandRegistrar.AGENT_CONFIGS["tabnine"]
assert cfg["dir"] == ".tabnine/agent/commands"
assert cfg["format"] == "toml"
assert cfg["args"] == "{{args}}"
assert cfg["extension"] == ".toml"
def test_release_agent_lists_include_tabnine(self):
"""Bash and PowerShell release scripts should include tabnine in agent lists."""
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
assert sh_match is not None
sh_agents = sh_match.group(1).split()
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
assert ps_match is not None
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
assert "tabnine" in sh_agents
assert "tabnine" in ps_agents
def test_release_scripts_generate_tabnine_toml_commands(self):
"""Release scripts should generate TOML commands for tabnine in .tabnine/agent/commands."""
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
assert ".tabnine/agent/commands" in sh_text
assert ".tabnine/agent/commands" in ps_text
assert re.search(r"'tabnine'\s*\{.*?\.tabnine/agent/commands", ps_text, re.S) is not None
def test_github_release_includes_tabnine_packages(self):
"""GitHub release script should include tabnine template packages."""
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
assert "spec-kit-template-tabnine-sh-" in gh_release_text
assert "spec-kit-template-tabnine-ps-" in gh_release_text
def test_agent_context_scripts_include_tabnine(self):
"""Agent context scripts should support tabnine agent type."""
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
assert "tabnine" in bash_text
assert "TABNINE_FILE" in bash_text
assert "tabnine" in pwsh_text
assert "TABNINE_FILE" in pwsh_text
def test_ai_help_includes_tabnine(self):
"""CLI help text for --ai should include tabnine."""
assert "tabnine" in AI_ASSISTANT_HELP
# --- Kimi Code CLI consistency checks ---
def test_kimi_in_agent_config(self):
"""AGENT_CONFIG should include kimi with correct folder and commands_subdir."""
assert "kimi" in AGENT_CONFIG
assert AGENT_CONFIG["kimi"]["folder"] == ".kimi/"
assert AGENT_CONFIG["kimi"]["commands_subdir"] == "skills"
assert AGENT_CONFIG["kimi"]["requires_cli"] is True
def test_kimi_in_extension_registrar(self):
"""Extension command registrar should include kimi using .kimi/skills and SKILL.md."""
cfg = CommandRegistrar.AGENT_CONFIGS
assert "kimi" in cfg
kimi_cfg = cfg["kimi"]
assert kimi_cfg["dir"] == ".kimi/skills"
assert kimi_cfg["extension"] == "/SKILL.md"
def test_kimi_in_release_agent_lists(self):
"""Bash and PowerShell release scripts should include kimi in agent lists."""
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
assert sh_match is not None
sh_agents = sh_match.group(1).split()
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
assert ps_match is not None
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
assert "kimi" in sh_agents
assert "kimi" in ps_agents
def test_kimi_in_powershell_validate_set(self):
"""PowerShell update-agent-context script should include 'kimi' in ValidateSet."""
ps_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
validate_set_match = re.search(r"\[ValidateSet\(([^)]*)\)\]", ps_text)
assert validate_set_match is not None
validate_set_values = re.findall(r"'([^']+)'", validate_set_match.group(1))
assert "kimi" in validate_set_values
def test_kimi_in_github_release_output(self):
"""GitHub release script should include kimi template packages."""
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
assert "spec-kit-template-kimi-sh-" in gh_release_text
assert "spec-kit-template-kimi-ps-" in gh_release_text
def test_ai_help_includes_kimi(self):
"""CLI help text for --ai should include kimi."""
assert "kimi" in AI_ASSISTANT_HELP

View File

@@ -147,11 +147,6 @@ class TestGetSkillsDir:
result = _get_skills_dir(project_dir, "gemini")
assert result == project_dir / ".gemini" / "skills"
def test_tabnine_skills_dir(self, project_dir):
"""Tabnine should use .tabnine/agent/skills/."""
result = _get_skills_dir(project_dir, "tabnine")
assert result == project_dir / ".tabnine" / "agent" / "skills"
def test_copilot_skills_dir(self, project_dir):
"""Copilot should use .github/skills/."""
result = _get_skills_dir(project_dir, "copilot")
@@ -410,11 +405,8 @@ class TestInstallAiSkills:
skills_dir = _get_skills_dir(proj, agent_key)
assert skills_dir.exists()
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
# Kimi uses dot-separator (speckit.specify) to match /skill:speckit.* invocation;
# all other agents use hyphen-separator (speckit-specify).
expected_skill_name = "speckit.specify" if agent_key == "kimi" else "speckit-specify"
assert expected_skill_name in skill_dirs
assert (skills_dir / expected_skill_name / "SKILL.md").exists()
assert "speckit-specify" in skill_dirs
assert (skills_dir / "speckit-specify" / "SKILL.md").exists()

View File

@@ -6,7 +6,6 @@ Tests cover:
- Extension registry operations
- Extension manager installation/removal
- Command registration
- Catalog stack (multi-catalog support)
"""
import pytest
@@ -17,7 +16,6 @@ from pathlib import Path
from datetime import datetime, timezone
from specify_cli.extensions import (
CatalogEntry,
ExtensionManifest,
ExtensionRegistry,
ExtensionManager,
@@ -407,11 +405,6 @@ class TestCommandRegistrar:
assert CommandRegistrar.AGENT_CONFIGS["kiro-cli"]["dir"] == ".kiro/prompts"
assert "q" not in CommandRegistrar.AGENT_CONFIGS
def test_codex_agent_config_present(self):
"""Codex should be mapped to .codex/prompts."""
assert "codex" in CommandRegistrar.AGENT_CONFIGS
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".codex/prompts"
def test_parse_frontmatter_valid(self):
"""Test parsing valid YAML frontmatter."""
content = """---
@@ -887,29 +880,10 @@ class TestExtensionCatalog:
def test_search_all_extensions(self, temp_dir):
"""Test searching all extensions without filters."""
import yaml as yaml_module
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / ".specify").mkdir()
# Use a single-catalog config so community extensions don't interfere
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump(
{
"catalogs": [
{
"name": "test-catalog",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
}
]
},
f,
)
catalog = ExtensionCatalog(project_dir)
# Create mock catalog
@@ -955,29 +929,10 @@ class TestExtensionCatalog:
def test_search_by_query(self, temp_dir):
"""Test searching by query text."""
import yaml as yaml_module
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / ".specify").mkdir()
# Use a single-catalog config so community extensions don't interfere
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump(
{
"catalogs": [
{
"name": "test-catalog",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
}
]
},
f,
)
catalog = ExtensionCatalog(project_dir)
# Create mock catalog
@@ -1019,29 +974,10 @@ class TestExtensionCatalog:
def test_search_by_tag(self, temp_dir):
"""Test searching by tag."""
import yaml as yaml_module
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / ".specify").mkdir()
# Use a single-catalog config so community extensions don't interfere
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump(
{
"catalogs": [
{
"name": "test-catalog",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
}
]
},
f,
)
catalog = ExtensionCatalog(project_dir)
# Create mock catalog
@@ -1090,29 +1026,10 @@ class TestExtensionCatalog:
def test_search_verified_only(self, temp_dir):
"""Test searching verified extensions only."""
import yaml as yaml_module
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / ".specify").mkdir()
# Use a single-catalog config so community extensions don't interfere
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump(
{
"catalogs": [
{
"name": "test-catalog",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
}
]
},
f,
)
catalog = ExtensionCatalog(project_dir)
# Create mock catalog
@@ -1154,29 +1071,10 @@ class TestExtensionCatalog:
def test_get_extension_info(self, temp_dir):
"""Test getting specific extension info."""
import yaml as yaml_module
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / ".specify").mkdir()
# Use a single-catalog config so community extensions don't interfere
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump(
{
"catalogs": [
{
"name": "test-catalog",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
}
]
},
f,
)
catalog = ExtensionCatalog(project_dir)
# Create mock catalog
@@ -1235,711 +1133,3 @@ class TestExtensionCatalog:
assert not catalog.cache_file.exists()
assert not catalog.cache_metadata_file.exists()
# ===== CatalogEntry Tests =====
class TestCatalogEntry:
"""Test CatalogEntry dataclass."""
def test_catalog_entry_creation(self):
"""Test creating a CatalogEntry."""
entry = CatalogEntry(
url="https://example.com/catalog.json",
name="test",
priority=1,
install_allowed=True,
)
assert entry.url == "https://example.com/catalog.json"
assert entry.name == "test"
assert entry.priority == 1
assert entry.install_allowed is True
# ===== Catalog Stack Tests =====
class TestCatalogStack:
"""Test multi-catalog stack support."""
def _make_project(self, temp_dir: Path) -> Path:
"""Create a minimal spec-kit project directory."""
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / ".specify").mkdir()
return project_dir
def _write_catalog_config(self, project_dir: Path, catalogs: list) -> None:
"""Write extension-catalogs.yml to project .specify dir."""
import yaml as yaml_module
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump({"catalogs": catalogs}, f)
def _write_valid_cache(
self, catalog: ExtensionCatalog, extensions: dict, url: str = "http://test.com"
) -> None:
"""Populate the primary cache file with mock extension data."""
catalog_data = {"schema_version": "1.0", "extensions": extensions}
catalog.cache_dir.mkdir(parents=True, exist_ok=True)
catalog.cache_file.write_text(json.dumps(catalog_data))
catalog.cache_metadata_file.write_text(
json.dumps(
{
"cached_at": datetime.now(timezone.utc).isoformat(),
"catalog_url": url,
}
)
)
# --- get_active_catalogs ---
def test_default_stack(self, temp_dir):
"""Default stack includes default and community catalogs."""
project_dir = self._make_project(temp_dir)
catalog = ExtensionCatalog(project_dir)
entries = catalog.get_active_catalogs()
assert len(entries) == 2
assert entries[0].url == ExtensionCatalog.DEFAULT_CATALOG_URL
assert entries[0].name == "default"
assert entries[0].priority == 1
assert entries[0].install_allowed is True
assert entries[1].url == ExtensionCatalog.COMMUNITY_CATALOG_URL
assert entries[1].name == "community"
assert entries[1].priority == 2
assert entries[1].install_allowed is False
def test_env_var_overrides_default_stack(self, temp_dir, monkeypatch):
"""SPECKIT_CATALOG_URL replaces the entire default stack."""
project_dir = self._make_project(temp_dir)
custom_url = "https://example.com/catalog.json"
monkeypatch.setenv("SPECKIT_CATALOG_URL", custom_url)
catalog = ExtensionCatalog(project_dir)
entries = catalog.get_active_catalogs()
assert len(entries) == 1
assert entries[0].url == custom_url
assert entries[0].install_allowed is True
def test_env_var_invalid_url_raises(self, temp_dir, monkeypatch):
"""SPECKIT_CATALOG_URL with http:// (non-localhost) raises ValidationError."""
project_dir = self._make_project(temp_dir)
monkeypatch.setenv("SPECKIT_CATALOG_URL", "http://example.com/catalog.json")
catalog = ExtensionCatalog(project_dir)
with pytest.raises(ValidationError, match="HTTPS"):
catalog.get_active_catalogs()
def test_project_config_overrides_defaults(self, temp_dir):
"""Project-level extension-catalogs.yml overrides default stack."""
project_dir = self._make_project(temp_dir)
self._write_catalog_config(
project_dir,
[
{
"name": "custom",
"url": "https://example.com/catalog.json",
"priority": 1,
"install_allowed": True,
}
],
)
catalog = ExtensionCatalog(project_dir)
entries = catalog.get_active_catalogs()
assert len(entries) == 1
assert entries[0].url == "https://example.com/catalog.json"
assert entries[0].name == "custom"
def test_project_config_sorted_by_priority(self, temp_dir):
"""Catalog entries are sorted by priority (ascending)."""
project_dir = self._make_project(temp_dir)
self._write_catalog_config(
project_dir,
[
{
"name": "secondary",
"url": "https://example.com/secondary.json",
"priority": 5,
"install_allowed": False,
},
{
"name": "primary",
"url": "https://example.com/primary.json",
"priority": 1,
"install_allowed": True,
},
],
)
catalog = ExtensionCatalog(project_dir)
entries = catalog.get_active_catalogs()
assert len(entries) == 2
assert entries[0].name == "primary"
assert entries[1].name == "secondary"
def test_project_config_invalid_url_raises(self, temp_dir):
"""Project config with HTTP (non-localhost) URL raises ValidationError."""
project_dir = self._make_project(temp_dir)
self._write_catalog_config(
project_dir,
[
{
"name": "bad",
"url": "http://example.com/catalog.json",
"priority": 1,
"install_allowed": True,
}
],
)
catalog = ExtensionCatalog(project_dir)
with pytest.raises(ValidationError, match="HTTPS"):
catalog.get_active_catalogs()
def test_empty_project_config_falls_back_to_defaults(self, temp_dir):
"""Empty catalogs list in config falls back to default stack."""
import yaml as yaml_module
project_dir = self._make_project(temp_dir)
config_path = project_dir / ".specify" / "extension-catalogs.yml"
with open(config_path, "w") as f:
yaml_module.dump({"catalogs": []}, f)
catalog = ExtensionCatalog(project_dir)
entries = catalog.get_active_catalogs()
# Falls back to default stack
assert len(entries) == 2
assert entries[0].url == ExtensionCatalog.DEFAULT_CATALOG_URL
# --- _load_catalog_config ---
def test_load_catalog_config_missing_file(self, temp_dir):
"""Returns None when config file doesn't exist."""
project_dir = self._make_project(temp_dir)
catalog = ExtensionCatalog(project_dir)
result = catalog._load_catalog_config(project_dir / ".specify" / "nonexistent.yml")
assert result is None
def test_load_catalog_config_localhost_allowed(self, temp_dir):
"""Localhost HTTP URLs are allowed in config."""
project_dir = self._make_project(temp_dir)
self._write_catalog_config(
project_dir,
[
{
"name": "local",
"url": "http://localhost:8000/catalog.json",
"priority": 1,
"install_allowed": True,
}
],
)
catalog = ExtensionCatalog(project_dir)
entries = catalog.get_active_catalogs()
assert len(entries) == 1
assert entries[0].url == "http://localhost:8000/catalog.json"
# --- Merge conflict resolution ---
def test_merge_conflict_higher_priority_wins(self, temp_dir):
"""When same extension id is in two catalogs, higher priority wins."""
project_dir = self._make_project(temp_dir)
# Write project config with two catalogs
self._write_catalog_config(
project_dir,
[
{
"name": "primary",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
},
{
"name": "secondary",
"url": ExtensionCatalog.COMMUNITY_CATALOG_URL,
"priority": 2,
"install_allowed": False,
},
],
)
catalog = ExtensionCatalog(project_dir)
# Write primary cache with jira v2.0.0
primary_data = {
"schema_version": "1.0",
"extensions": {
"jira": {
"name": "Jira Integration",
"id": "jira",
"version": "2.0.0",
"description": "Primary Jira",
}
},
}
catalog.cache_dir.mkdir(parents=True, exist_ok=True)
catalog.cache_file.write_text(json.dumps(primary_data))
catalog.cache_metadata_file.write_text(
json.dumps({"cached_at": datetime.now(timezone.utc).isoformat(), "catalog_url": "http://test.com"})
)
# Write secondary cache (URL-hash-based) with jira v1.0.0 (should lose)
import hashlib
url_hash = hashlib.sha256(ExtensionCatalog.COMMUNITY_CATALOG_URL.encode()).hexdigest()[:16]
secondary_cache = catalog.cache_dir / f"catalog-{url_hash}.json"
secondary_meta = catalog.cache_dir / f"catalog-{url_hash}-metadata.json"
secondary_data = {
"schema_version": "1.0",
"extensions": {
"jira": {
"name": "Jira Integration Community",
"id": "jira",
"version": "1.0.0",
"description": "Community Jira",
},
"linear": {
"name": "Linear",
"id": "linear",
"version": "0.9.0",
"description": "Linear from secondary",
},
},
}
secondary_cache.write_text(json.dumps(secondary_data))
secondary_meta.write_text(
json.dumps({"cached_at": datetime.now(timezone.utc).isoformat(), "catalog_url": ExtensionCatalog.COMMUNITY_CATALOG_URL})
)
results = catalog.search()
jira_results = [r for r in results if r["id"] == "jira"]
assert len(jira_results) == 1
# Primary catalog wins
assert jira_results[0]["version"] == "2.0.0"
assert jira_results[0]["_catalog_name"] == "primary"
assert jira_results[0]["_install_allowed"] is True
# linear comes from secondary
linear_results = [r for r in results if r["id"] == "linear"]
assert len(linear_results) == 1
assert linear_results[0]["_catalog_name"] == "secondary"
assert linear_results[0]["_install_allowed"] is False
def test_install_allowed_false_from_get_extension_info(self, temp_dir):
"""get_extension_info includes _install_allowed from source catalog."""
project_dir = self._make_project(temp_dir)
# Single catalog that is install_allowed=False
self._write_catalog_config(
project_dir,
[
{
"name": "discovery",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": False,
}
],
)
catalog = ExtensionCatalog(project_dir)
self._write_valid_cache(
catalog,
{
"jira": {
"name": "Jira Integration",
"id": "jira",
"version": "1.0.0",
"description": "Jira integration",
}
},
)
info = catalog.get_extension_info("jira")
assert info is not None
assert info["_install_allowed"] is False
assert info["_catalog_name"] == "discovery"
def test_search_results_include_catalog_metadata(self, temp_dir):
"""Search results include _catalog_name and _install_allowed."""
project_dir = self._make_project(temp_dir)
self._write_catalog_config(
project_dir,
[
{
"name": "org",
"url": ExtensionCatalog.DEFAULT_CATALOG_URL,
"priority": 1,
"install_allowed": True,
}
],
)
catalog = ExtensionCatalog(project_dir)
self._write_valid_cache(
catalog,
{
"jira": {
"name": "Jira Integration",
"id": "jira",
"version": "1.0.0",
"description": "Jira integration",
}
},
)
results = catalog.search()
assert len(results) == 1
assert results[0]["_catalog_name"] == "org"
assert results[0]["_install_allowed"] is True
class TestExtensionIgnore:
"""Test .extensionignore support during extension installation."""
def _make_extension(self, temp_dir, valid_manifest_data, extra_files=None, ignore_content=None):
"""Helper to create an extension directory with optional extra files and .extensionignore."""
import yaml
ext_dir = temp_dir / "ignored-ext"
ext_dir.mkdir()
# Write manifest
with open(ext_dir / "extension.yml", "w") as f:
yaml.dump(valid_manifest_data, f)
# Create commands directory with a command file
commands_dir = ext_dir / "commands"
commands_dir.mkdir()
(commands_dir / "hello.md").write_text(
"---\ndescription: \"Test hello command\"\n---\n\n# Hello\n\n$ARGUMENTS\n"
)
# Create any extra files/dirs
if extra_files:
for rel_path, content in extra_files.items():
p = ext_dir / rel_path
p.parent.mkdir(parents=True, exist_ok=True)
if content is None:
# Create directory
p.mkdir(parents=True, exist_ok=True)
else:
p.write_text(content)
# Write .extensionignore
if ignore_content is not None:
(ext_dir / ".extensionignore").write_text(ignore_content)
return ext_dir
def test_no_extensionignore(self, temp_dir, valid_manifest_data):
"""Without .extensionignore, all files are copied."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={"README.md": "# Hello", "tests/test_foo.py": "pass"},
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "README.md").exists()
assert (dest / "tests" / "test_foo.py").exists()
def test_extensionignore_excludes_files(self, temp_dir, valid_manifest_data):
"""Files matching .extensionignore patterns are excluded."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"README.md": "# Hello",
"tests/test_foo.py": "pass",
"tests/test_bar.py": "pass",
".github/workflows/ci.yml": "on: push",
},
ignore_content="tests/\n.github/\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
# Included
assert (dest / "README.md").exists()
assert (dest / "extension.yml").exists()
assert (dest / "commands" / "hello.md").exists()
# Excluded
assert not (dest / "tests").exists()
assert not (dest / ".github").exists()
def test_extensionignore_glob_patterns(self, temp_dir, valid_manifest_data):
"""Glob patterns like *.pyc are respected."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"README.md": "# Hello",
"helpers.pyc": b"\x00".decode("latin-1"),
"commands/cache.pyc": b"\x00".decode("latin-1"),
},
ignore_content="*.pyc\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "README.md").exists()
assert not (dest / "helpers.pyc").exists()
assert not (dest / "commands" / "cache.pyc").exists()
def test_extensionignore_comments_and_blanks(self, temp_dir, valid_manifest_data):
"""Comments and blank lines in .extensionignore are ignored."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={"README.md": "# Hello", "notes.txt": "some notes"},
ignore_content="# This is a comment\n\nnotes.txt\n\n# Another comment\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "README.md").exists()
assert not (dest / "notes.txt").exists()
def test_extensionignore_itself_excluded(self, temp_dir, valid_manifest_data):
""".extensionignore is never copied to the destination."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
ignore_content="# nothing special here\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "extension.yml").exists()
assert not (dest / ".extensionignore").exists()
def test_extensionignore_relative_path_match(self, temp_dir, valid_manifest_data):
"""Patterns matching relative paths work correctly."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"docs/guide.md": "# Guide",
"docs/internal/draft.md": "draft",
"README.md": "# Hello",
},
ignore_content="docs/internal/draft.md\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "docs" / "guide.md").exists()
assert not (dest / "docs" / "internal" / "draft.md").exists()
def test_extensionignore_dotdot_pattern_is_noop(self, temp_dir, valid_manifest_data):
"""Patterns with '..' should not escape the extension root."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={"README.md": "# Hello"},
ignore_content="../sibling/\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
# Everything should still be copied — the '..' pattern matches nothing inside
assert (dest / "README.md").exists()
assert (dest / "extension.yml").exists()
assert (dest / "commands" / "hello.md").exists()
def test_extensionignore_absolute_path_pattern_is_noop(self, temp_dir, valid_manifest_data):
"""Absolute path patterns should not match anything."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={"README.md": "# Hello", "passwd": "sensitive"},
ignore_content="/etc/passwd\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
# Nothing matches — /etc/passwd is anchored to root and there's no 'etc' dir
assert (dest / "README.md").exists()
assert (dest / "passwd").exists()
def test_extensionignore_empty_file(self, temp_dir, valid_manifest_data):
"""An empty .extensionignore should exclude only itself."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={"README.md": "# Hello", "notes.txt": "notes"},
ignore_content="",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "README.md").exists()
assert (dest / "notes.txt").exists()
assert (dest / "extension.yml").exists()
# .extensionignore itself is still excluded
assert not (dest / ".extensionignore").exists()
def test_extensionignore_windows_backslash_patterns(self, temp_dir, valid_manifest_data):
"""Backslash patterns (Windows-style) are normalised to forward slashes."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"docs/internal/draft.md": "draft",
"docs/guide.md": "# Guide",
},
ignore_content="docs\\internal\\draft.md\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert (dest / "docs" / "guide.md").exists()
assert not (dest / "docs" / "internal" / "draft.md").exists()
def test_extensionignore_star_does_not_cross_directories(self, temp_dir, valid_manifest_data):
"""'*' should NOT match across directory boundaries (gitignore semantics)."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"docs/api.draft.md": "draft",
"docs/sub/api.draft.md": "nested draft",
},
ignore_content="docs/*.draft.md\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
# docs/*.draft.md should only match directly inside docs/, NOT subdirs
assert not (dest / "docs" / "api.draft.md").exists()
assert (dest / "docs" / "sub" / "api.draft.md").exists()
def test_extensionignore_doublestar_crosses_directories(self, temp_dir, valid_manifest_data):
"""'**' should match across directory boundaries."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"docs/api.draft.md": "draft",
"docs/sub/api.draft.md": "nested draft",
"docs/guide.md": "guide",
},
ignore_content="docs/**/*.draft.md\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
assert not (dest / "docs" / "api.draft.md").exists()
assert not (dest / "docs" / "sub" / "api.draft.md").exists()
assert (dest / "docs" / "guide.md").exists()
def test_extensionignore_negation_pattern(self, temp_dir, valid_manifest_data):
"""'!' negation re-includes a previously excluded file."""
ext_dir = self._make_extension(
temp_dir,
valid_manifest_data,
extra_files={
"docs/guide.md": "# Guide",
"docs/internal.md": "internal",
"docs/api.md": "api",
},
ignore_content="docs/*.md\n!docs/api.md\n",
)
proj_dir = temp_dir / "project"
proj_dir.mkdir()
(proj_dir / ".specify").mkdir()
manager = ExtensionManager(proj_dir)
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
dest = proj_dir / ".specify" / "extensions" / "test-ext"
# docs/*.md excludes all .md in docs, but !docs/api.md re-includes it
assert not (dest / "docs" / "guide.md").exists()
assert not (dest / "docs" / "internal.md").exists()
assert (dest / "docs" / "api.md").exists()