mirror of
https://github.com/github/spec-kit.git
synced 2026-03-20 20:33:08 +00:00
Compare commits
2 Commits
main
...
chore/rele
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b10d440b1 | ||
|
|
2656ee1df7 |
@@ -51,14 +51,6 @@ echo -e "\n🤖 Installing OpenCode CLI..."
|
||||
run_command "npm install -g opencode-ai@latest"
|
||||
echo "✅ Done"
|
||||
|
||||
echo -e "\n🤖 Installing Junie CLI..."
|
||||
run_command "npm install -g @jetbrains/junie-cli@latest"
|
||||
echo "✅ Done"
|
||||
|
||||
echo -e "\n🤖 Installing Pi Coding Agent..."
|
||||
run_command "npm install -g @mariozechner/pi-coding-agent@latest"
|
||||
echo "✅ Done"
|
||||
|
||||
echo -e "\n🤖 Installing Kiro CLI..."
|
||||
# https://kiro.dev/docs/cli/
|
||||
KIRO_INSTALLER_URL="https://kiro.dev/install.sh"
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/agent_request.yml
vendored
2
.github/ISSUE_TEMPLATE/agent_request.yml
vendored
@@ -8,7 +8,7 @@ body:
|
||||
value: |
|
||||
Thanks for requesting a new agent! Before submitting, please check if the agent is already supported.
|
||||
|
||||
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, Tabnine CLI, Antigravity, IBM Bob, Mistral Vibe, Kimi Code, Trae, Pi Coding Agent, iFlow CLI
|
||||
**Currently supported agents**: Claude Code, Gemini CLI, GitHub Copilot, Cursor, Qwen Code, opencode, Codex CLI, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy, Qoder CLI, Kiro CLI, Amp, SHAI, IBM Bob, Antigravity
|
||||
|
||||
- type: input
|
||||
id: agent-name
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -7,7 +7,7 @@ contact_links:
|
||||
url: https://github.com/github/spec-kit/blob/main/README.md
|
||||
about: Read the Spec Kit documentation and guides
|
||||
- name: 🛠️ Extension Development Guide
|
||||
url: https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-DEVELOPMENT-GUIDE.md
|
||||
url: https://github.com/manfredseee/spec-kit/blob/main/extensions/EXTENSION-DEVELOPMENT-GUIDE.md
|
||||
about: Learn how to develop and publish Spec Kit extensions
|
||||
- name: 🤝 Contributing Guide
|
||||
url: https://github.com/github/spec-kit/blob/main/CONTRIBUTING.md
|
||||
|
||||
169
.github/ISSUE_TEMPLATE/preset_submission.yml
vendored
169
.github/ISSUE_TEMPLATE/preset_submission.yml
vendored
@@ -1,169 +0,0 @@
|
||||
name: Preset Submission
|
||||
description: Submit your preset to the Spec Kit preset catalog
|
||||
title: "[Preset]: Add "
|
||||
labels: ["preset-submission", "enhancement", "needs-triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for contributing a preset! This template helps you submit your preset to the community catalog.
|
||||
|
||||
**Before submitting:**
|
||||
- Review the [Preset Publishing Guide](https://github.com/github/spec-kit/blob/main/presets/PUBLISHING.md)
|
||||
- Ensure your preset has a valid `preset.yml` manifest
|
||||
- Create a GitHub release with a version tag (e.g., v1.0.0)
|
||||
- Test installation from the release archive: `specify preset add --from <download-url>`
|
||||
|
||||
- type: input
|
||||
id: preset-id
|
||||
attributes:
|
||||
label: Preset ID
|
||||
description: Unique preset identifier (lowercase with hyphens only)
|
||||
placeholder: "e.g., healthcare-compliance"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: preset-name
|
||||
attributes:
|
||||
label: Preset Name
|
||||
description: Human-readable preset name
|
||||
placeholder: "e.g., Healthcare Compliance"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Version
|
||||
description: Semantic version number
|
||||
placeholder: "e.g., 1.0.0"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Brief description of what your preset does (under 200 characters)
|
||||
placeholder: Enforces HIPAA-compliant spec workflows with audit templates and compliance checklists
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: author
|
||||
attributes:
|
||||
label: Author
|
||||
description: Your name or organization
|
||||
placeholder: "e.g., John Doe or Acme Corp"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: repository
|
||||
attributes:
|
||||
label: Repository URL
|
||||
description: GitHub repository URL for your preset
|
||||
placeholder: "https://github.com/your-org/spec-kit-your-preset"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: download-url
|
||||
attributes:
|
||||
label: Download URL
|
||||
description: URL to the GitHub release archive for your preset (e.g., https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip)
|
||||
placeholder: "https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: license
|
||||
attributes:
|
||||
label: License
|
||||
description: Open source license type
|
||||
placeholder: "e.g., MIT, Apache-2.0"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: speckit-version
|
||||
attributes:
|
||||
label: Required Spec Kit Version
|
||||
description: Minimum Spec Kit version required
|
||||
placeholder: "e.g., >=0.3.0"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: templates-provided
|
||||
attributes:
|
||||
label: Templates Provided
|
||||
description: List the template overrides your preset provides
|
||||
placeholder: |
|
||||
- spec-template.md — adds compliance section
|
||||
- plan-template.md — includes audit checkpoints
|
||||
- checklist-template.md — HIPAA compliance checklist
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: commands-provided
|
||||
attributes:
|
||||
label: Commands Provided (optional)
|
||||
description: List any command overrides your preset provides
|
||||
placeholder: |
|
||||
- speckit.specify.md — customized for compliance workflows
|
||||
|
||||
- type: textarea
|
||||
id: tags
|
||||
attributes:
|
||||
label: Tags
|
||||
description: 2-5 relevant tags (lowercase, separated by commas)
|
||||
placeholder: "compliance, healthcare, hipaa, audit"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: features
|
||||
attributes:
|
||||
label: Key Features
|
||||
description: List the main features and capabilities of your preset
|
||||
placeholder: |
|
||||
- HIPAA-compliant spec templates
|
||||
- Audit trail checklists
|
||||
- Compliance review workflow
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: testing
|
||||
attributes:
|
||||
label: Testing Checklist
|
||||
description: Confirm that your preset has been tested
|
||||
options:
|
||||
- label: Preset installs successfully via `specify preset add`
|
||||
required: true
|
||||
- label: Template resolution works correctly after installation
|
||||
required: true
|
||||
- label: Documentation is complete and accurate
|
||||
required: true
|
||||
- label: Tested on at least one real project
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: requirements
|
||||
attributes:
|
||||
label: Submission Requirements
|
||||
description: Verify your preset meets all requirements
|
||||
options:
|
||||
- label: Valid `preset.yml` manifest included
|
||||
required: true
|
||||
- label: README.md with description and usage instructions
|
||||
required: true
|
||||
- label: LICENSE file included
|
||||
required: true
|
||||
- label: GitHub release created with version tag
|
||||
required: true
|
||||
- label: Preset ID follows naming conventions (lowercase-with-hyphens)
|
||||
required: true
|
||||
8
.github/workflows/release-trigger.yml
vendored
8
.github/workflows/release-trigger.yml
vendored
@@ -86,10 +86,8 @@ jobs:
|
||||
if [ -f "CHANGELOG.md" ]; then
|
||||
DATE=$(date +%Y-%m-%d)
|
||||
|
||||
# Get the previous tag by sorting all version tags numerically
|
||||
# (git describe --tags only finds tags reachable from HEAD,
|
||||
# which misses tags on unmerged release branches)
|
||||
PREVIOUS_TAG=$(git tag -l 'v*' --sort=-version:refname | head -n 1)
|
||||
# Get the previous tag to compare commits
|
||||
PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
|
||||
|
||||
echo "Generating changelog from commits..."
|
||||
if [[ -n "$PREVIOUS_TAG" ]]; then
|
||||
@@ -106,7 +104,7 @@ jobs:
|
||||
echo ""
|
||||
echo "## [${{ steps.version.outputs.version }}] - $DATE"
|
||||
echo ""
|
||||
echo "### Changes"
|
||||
echo "### Changed"
|
||||
echo ""
|
||||
echo "$COMMITS"
|
||||
echo ""
|
||||
|
||||
@@ -30,8 +30,6 @@ gh release create "$VERSION" \
|
||||
.genreleases/spec-kit-template-qwen-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-windsurf-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-windsurf-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-junie-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-junie-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-codex-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-codex-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-kilocode-sh-"$VERSION".zip \
|
||||
@@ -60,12 +58,6 @@ gh release create "$VERSION" \
|
||||
.genreleases/spec-kit-template-vibe-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-kimi-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-kimi-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-trae-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-trae-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-pi-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-pi-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-iflow-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-iflow-ps-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-generic-sh-"$VERSION".zip \
|
||||
.genreleases/spec-kit-template-generic-ps-"$VERSION".zip \
|
||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
.PARAMETER Agents
|
||||
Comma or space separated subset of agents to build (default: all)
|
||||
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, junie, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, kimi, trae, pi, iflow, generic
|
||||
Valid agents: claude, gemini, copilot, cursor-agent, qwen, opencode, windsurf, codex, kilocode, auggie, roo, codebuddy, amp, kiro-cli, bob, qodercli, shai, tabnine, agy, vibe, kimi, generic
|
||||
|
||||
.PARAMETER Scripts
|
||||
Comma or space separated subset of script types to build (default: both)
|
||||
@@ -201,26 +201,20 @@ agent: $basename
|
||||
}
|
||||
}
|
||||
|
||||
# Create skills in <skills_dir>\<name>\SKILL.md format.
|
||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
||||
# current dotted-name exception (e.g. speckit.plan).
|
||||
#
|
||||
# Technical debt note:
|
||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
||||
# overrides (at minimum: name/description/compatibility/metadata.{author,source}).
|
||||
function New-Skills {
|
||||
# Create Kimi Code skills in .kimi/skills/<name>/SKILL.md format.
|
||||
# Kimi CLI discovers skills as directories containing a SKILL.md file,
|
||||
# invoked with /skill:<name> (e.g. /skill:speckit.specify).
|
||||
function New-KimiSkills {
|
||||
param(
|
||||
[string]$SkillsDir,
|
||||
[string]$ScriptVariant,
|
||||
[string]$AgentName,
|
||||
[string]$Separator = '-'
|
||||
[string]$ScriptVariant
|
||||
)
|
||||
|
||||
$templates = Get-ChildItem -Path "templates/commands/*.md" -File -ErrorAction SilentlyContinue
|
||||
|
||||
foreach ($template in $templates) {
|
||||
$name = [System.IO.Path]::GetFileNameWithoutExtension($template.Name)
|
||||
$skillName = "speckit${Separator}$name"
|
||||
$skillName = "speckit.$name"
|
||||
$skillDir = Join-Path $SkillsDir $skillName
|
||||
New-Item -ItemType Directory -Force -Path $skillDir | Out-Null
|
||||
|
||||
@@ -273,7 +267,7 @@ function New-Skills {
|
||||
|
||||
$body = $outputLines -join "`n"
|
||||
$body = $body -replace '\{ARGS\}', '$ARGUMENTS'
|
||||
$body = $body -replace '__AGENT__', $AgentName
|
||||
$body = $body -replace '__AGENT__', 'kimi'
|
||||
$body = Rewrite-Paths -Content $body
|
||||
|
||||
# Strip existing frontmatter, keep only body
|
||||
@@ -289,7 +283,7 @@ function New-Skills {
|
||||
if ($inBody) { $templateBody += "$line`n" }
|
||||
}
|
||||
|
||||
$skillContent = "---`nname: `"$skillName`"`ndescription: `"$description`"`ncompatibility: `"Requires spec-kit project structure with .specify/ directory`"`nmetadata:`n author: `"github-spec-kit`"`n source: `"templates/commands/$name.md`"`n---`n`n$templateBody"
|
||||
$skillContent = "---`nname: `"$skillName`"`ndescription: `"$description`"`n---`n`n$templateBody"
|
||||
Set-Content -Path (Join-Path $skillDir "SKILL.md") -Value $skillContent -NoNewline
|
||||
}
|
||||
}
|
||||
@@ -401,14 +395,9 @@ function Build-Variant {
|
||||
$cmdDir = Join-Path $baseDir ".windsurf/workflows"
|
||||
Generate-Commands -Agent 'windsurf' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'junie' {
|
||||
$cmdDir = Join-Path $baseDir ".junie/commands"
|
||||
Generate-Commands -Agent 'junie' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'codex' {
|
||||
$skillsDir = Join-Path $baseDir ".agents/skills"
|
||||
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
|
||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'codex' -Separator '-'
|
||||
$cmdDir = Join-Path $baseDir ".codex/prompts"
|
||||
Generate-Commands -Agent 'codex' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'kilocode' {
|
||||
$cmdDir = Join-Path $baseDir ".kilocode/workflows"
|
||||
@@ -463,20 +452,7 @@ function Build-Variant {
|
||||
'kimi' {
|
||||
$skillsDir = Join-Path $baseDir ".kimi/skills"
|
||||
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
|
||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'kimi' -Separator '.'
|
||||
}
|
||||
'trae' {
|
||||
$rulesDir = Join-Path $baseDir ".trae/rules"
|
||||
New-Item -ItemType Directory -Force -Path $rulesDir | Out-Null
|
||||
Generate-Commands -Agent 'trae' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $rulesDir -ScriptVariant $Script
|
||||
}
|
||||
'pi' {
|
||||
$cmdDir = Join-Path $baseDir ".pi/prompts"
|
||||
Generate-Commands -Agent 'pi' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
}
|
||||
'iflow' {
|
||||
$cmdDir = Join-Path $baseDir ".iflow/commands"
|
||||
Generate-Commands -Agent 'iflow' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||
New-KimiSkills -SkillsDir $skillsDir -ScriptVariant $Script
|
||||
}
|
||||
'generic' {
|
||||
$cmdDir = Join-Path $baseDir ".speckit/commands"
|
||||
@@ -494,7 +470,7 @@ function Build-Variant {
|
||||
}
|
||||
|
||||
# Define all agents and scripts
|
||||
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'junie', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'kimi', 'trae', 'pi', 'iflow', 'generic')
|
||||
$AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode', 'windsurf', 'codex', 'kilocode', 'auggie', 'roo', 'codebuddy', 'amp', 'kiro-cli', 'bob', 'qodercli', 'shai', 'tabnine', 'agy', 'vibe', 'kimi', 'generic')
|
||||
$AllScripts = @('sh', 'ps')
|
||||
|
||||
function Normalize-List {
|
||||
|
||||
@@ -6,7 +6,7 @@ set -euo pipefail
|
||||
# Usage: .github/workflows/scripts/create-release-packages.sh <version>
|
||||
# Version argument should include leading 'v'.
|
||||
# Optionally set AGENTS and/or SCRIPTS env vars to limit what gets built.
|
||||
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf junie codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi trae pi iflow generic (default: all)
|
||||
# AGENTS : space or comma separated subset of: claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi generic (default: all)
|
||||
# SCRIPTS : space or comma separated subset of: sh ps (default: both)
|
||||
# Examples:
|
||||
# AGENTS=claude SCRIPTS=sh $0 v0.2.0
|
||||
@@ -26,27 +26,9 @@ fi
|
||||
echo "Building release packages for $NEW_VERSION"
|
||||
|
||||
# Create and use .genreleases directory for all build artifacts
|
||||
# Override via GENRELEASES_DIR env var (e.g. for tests writing to a temp dir)
|
||||
GENRELEASES_DIR="${GENRELEASES_DIR:-.genreleases}"
|
||||
|
||||
# Guard against unsafe GENRELEASES_DIR values before cleaning
|
||||
if [[ -z "$GENRELEASES_DIR" ]]; then
|
||||
echo "GENRELEASES_DIR must not be empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
case "$GENRELEASES_DIR" in
|
||||
'/'|'.'|'..')
|
||||
echo "Refusing to use unsafe GENRELEASES_DIR value: $GENRELEASES_DIR" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
if [[ "$GENRELEASES_DIR" == *".."* ]]; then
|
||||
echo "Refusing to use GENRELEASES_DIR containing '..' path segments: $GENRELEASES_DIR" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GENRELEASES_DIR=".genreleases"
|
||||
mkdir -p "$GENRELEASES_DIR"
|
||||
rm -rf "${GENRELEASES_DIR%/}/"* || true
|
||||
rm -rf "$GENRELEASES_DIR"/* || true
|
||||
|
||||
rewrite_paths() {
|
||||
sed -E \
|
||||
@@ -139,24 +121,18 @@ EOF
|
||||
done
|
||||
}
|
||||
|
||||
# Create skills in <skills_dir>/<name>/SKILL.md format.
|
||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
||||
# current dotted-name exception (e.g. speckit.plan).
|
||||
#
|
||||
# Technical debt note:
|
||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
||||
# overrides (at minimum: name/description/compatibility/metadata.{author,source}).
|
||||
create_skills() {
|
||||
# Create Kimi Code skills in .kimi/skills/<name>/SKILL.md format.
|
||||
# Kimi CLI discovers skills as directories containing a SKILL.md file,
|
||||
# invoked with /skill:<name> (e.g. /skill:speckit.specify).
|
||||
create_kimi_skills() {
|
||||
local skills_dir="$1"
|
||||
local script_variant="$2"
|
||||
local agent_name="$3"
|
||||
local separator="${4:-"-"}"
|
||||
|
||||
for template in templates/commands/*.md; do
|
||||
[[ -f "$template" ]] || continue
|
||||
local name
|
||||
name=$(basename "$template" .md)
|
||||
local skill_name="speckit${separator}${name}"
|
||||
local skill_name="speckit.${name}"
|
||||
local skill_dir="${skills_dir}/${skill_name}"
|
||||
mkdir -p "$skill_dir"
|
||||
|
||||
@@ -199,9 +175,9 @@ create_skills() {
|
||||
in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
|
||||
{ print }
|
||||
')
|
||||
body=$(printf '%s\n' "$body" | sed 's/{ARGS}/\$ARGUMENTS/g' | sed "s/__AGENT__/$agent_name/g" | rewrite_paths)
|
||||
body=$(printf '%s\n' "$body" | sed 's/{ARGS}/\$ARGUMENTS/g' | sed 's/__AGENT__/kimi/g' | rewrite_paths)
|
||||
|
||||
# Strip existing frontmatter and prepend skills frontmatter.
|
||||
# Strip existing frontmatter and prepend Kimi frontmatter
|
||||
local template_body
|
||||
template_body=$(printf '%s\n' "$body" | awk '/^---/{p++; if(p==2){found=1; next}} found')
|
||||
|
||||
@@ -209,10 +185,6 @@ create_skills() {
|
||||
printf -- '---\n'
|
||||
printf 'name: "%s"\n' "$skill_name"
|
||||
printf 'description: "%s"\n' "$description"
|
||||
printf 'compatibility: "%s"\n' "Requires spec-kit project structure with .specify/ directory"
|
||||
printf -- 'metadata:\n'
|
||||
printf ' author: "%s"\n' "github-spec-kit"
|
||||
printf ' source: "%s"\n' "templates/commands/${name}.md"
|
||||
printf -- '---\n\n'
|
||||
printf '%s\n' "$template_body"
|
||||
} > "$skill_dir/SKILL.md"
|
||||
@@ -246,7 +218,7 @@ build_variant() {
|
||||
esac
|
||||
fi
|
||||
|
||||
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -not -name "vscode-settings.json" | while IFS= read -r f; do d="$SPEC_DIR/$(dirname "$f")"; mkdir -p "$d"; cp "$f" "$d/"; done; echo "Copied templates -> .specify/templates"; }
|
||||
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -not -name "vscode-settings.json" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; }
|
||||
|
||||
case $agent in
|
||||
claude)
|
||||
@@ -276,12 +248,9 @@ build_variant() {
|
||||
windsurf)
|
||||
mkdir -p "$base_dir/.windsurf/workflows"
|
||||
generate_commands windsurf md "\$ARGUMENTS" "$base_dir/.windsurf/workflows" "$script" ;;
|
||||
junie)
|
||||
mkdir -p "$base_dir/.junie/commands"
|
||||
generate_commands junie md "\$ARGUMENTS" "$base_dir/.junie/commands" "$script" ;;
|
||||
codex)
|
||||
mkdir -p "$base_dir/.agents/skills"
|
||||
create_skills "$base_dir/.agents/skills" "$script" "codex" "-" ;;
|
||||
mkdir -p "$base_dir/.codex/prompts"
|
||||
generate_commands codex md "\$ARGUMENTS" "$base_dir/.codex/prompts" "$script" ;;
|
||||
kilocode)
|
||||
mkdir -p "$base_dir/.kilocode/workflows"
|
||||
generate_commands kilocode md "\$ARGUMENTS" "$base_dir/.kilocode/workflows" "$script" ;;
|
||||
@@ -321,16 +290,7 @@ build_variant() {
|
||||
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
|
||||
kimi)
|
||||
mkdir -p "$base_dir/.kimi/skills"
|
||||
create_skills "$base_dir/.kimi/skills" "$script" "kimi" "." ;;
|
||||
trae)
|
||||
mkdir -p "$base_dir/.trae/rules"
|
||||
generate_commands trae md "\$ARGUMENTS" "$base_dir/.trae/rules" "$script" ;;
|
||||
pi)
|
||||
mkdir -p "$base_dir/.pi/prompts"
|
||||
generate_commands pi md "\$ARGUMENTS" "$base_dir/.pi/prompts" "$script" ;;
|
||||
iflow)
|
||||
mkdir -p "$base_dir/.iflow/commands"
|
||||
generate_commands iflow md "\$ARGUMENTS" "$base_dir/.iflow/commands" "$script" ;;
|
||||
create_kimi_skills "$base_dir/.kimi/skills" "$script" ;;
|
||||
generic)
|
||||
mkdir -p "$base_dir/.speckit/commands"
|
||||
generate_commands generic md "\$ARGUMENTS" "$base_dir/.speckit/commands" "$script" ;;
|
||||
@@ -340,38 +300,37 @@ build_variant() {
|
||||
}
|
||||
|
||||
# Determine agent list
|
||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf junie codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi trae pi iflow generic)
|
||||
ALL_AGENTS=(claude gemini copilot cursor-agent qwen opencode windsurf codex kilocode auggie roo codebuddy amp shai tabnine kiro-cli agy bob vibe qodercli kimi generic)
|
||||
ALL_SCRIPTS=(sh ps)
|
||||
|
||||
norm_list() {
|
||||
tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?"\n":"") $i);out=1}}}END{printf("\n")}'
|
||||
}
|
||||
|
||||
validate_subset() {
|
||||
local type=$1; shift
|
||||
local allowed_str="$1"; shift
|
||||
local type=$1; shift; local -n allowed=$1; shift; local items=("$@")
|
||||
local invalid=0
|
||||
for it in "$@"; do
|
||||
for it in "${items[@]}"; do
|
||||
local found=0
|
||||
for a in $allowed_str; do
|
||||
if [[ "$it" == "$a" ]]; then found=1; break; fi
|
||||
done
|
||||
for a in "${allowed[@]}"; do [[ $it == "$a" ]] && { found=1; break; }; done
|
||||
if [[ $found -eq 0 ]]; then
|
||||
echo "Error: unknown $type '$it' (allowed: $allowed_str)" >&2
|
||||
echo "Error: unknown $type '$it' (allowed: ${allowed[*]})" >&2
|
||||
invalid=1
|
||||
fi
|
||||
done
|
||||
return $invalid
|
||||
}
|
||||
|
||||
read_list() { tr ',\n' ' ' | awk '{for(i=1;i<=NF;i++){if(!seen[$i]++){printf((out?" ":"") $i);out=1}}}END{printf("\n")}'; }
|
||||
|
||||
if [[ -n ${AGENTS:-} ]]; then
|
||||
read -ra AGENT_LIST <<< "$(printf '%s' "$AGENTS" | read_list)"
|
||||
validate_subset agent "${ALL_AGENTS[*]}" "${AGENT_LIST[@]}" || exit 1
|
||||
mapfile -t AGENT_LIST < <(printf '%s' "$AGENTS" | norm_list)
|
||||
validate_subset agent ALL_AGENTS "${AGENT_LIST[@]}" || exit 1
|
||||
else
|
||||
AGENT_LIST=("${ALL_AGENTS[@]}")
|
||||
fi
|
||||
|
||||
if [[ -n ${SCRIPTS:-} ]]; then
|
||||
read -ra SCRIPT_LIST <<< "$(printf '%s' "$SCRIPTS" | read_list)"
|
||||
validate_subset script "${ALL_SCRIPTS[*]}" "${SCRIPT_LIST[@]}" || exit 1
|
||||
mapfile -t SCRIPT_LIST < <(printf '%s' "$SCRIPTS" | norm_list)
|
||||
validate_subset script ALL_SCRIPTS "${SCRIPT_LIST[@]}" || exit 1
|
||||
else
|
||||
SCRIPT_LIST=("${ALL_SCRIPTS[@]}")
|
||||
fi
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -39,4 +39,4 @@ jobs:
|
||||
any-of-labels: ''
|
||||
|
||||
# Operations per run (helps avoid rate limits)
|
||||
operations-per-run: 250
|
||||
operations-per-run: 100
|
||||
|
||||
23
AGENTS.md
23
AGENTS.md
@@ -33,12 +33,11 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
||||
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
|
||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
||||
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (skills) |
|
||||
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
|
||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
||||
| **Junie** | `.junie/commands/` | Markdown | `junie` | Junie by JetBrains |
|
||||
| **Kilo Code** | `.kilocode/workflows/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
||||
| **Auggie CLI** | `.augment/commands/` | Markdown | `auggie` | Auggie CLI |
|
||||
| **Roo Code** | `.roo/commands/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
||||
| **Kilo Code** | `.kilocode/rules/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
||||
| **Auggie CLI** | `.augment/rules/` | Markdown | `auggie` | Auggie CLI |
|
||||
| **Roo Code** | `.roo/rules/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
||||
| **CodeBuddy CLI** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy CLI |
|
||||
| **Qoder CLI** | `.qoder/commands/` | Markdown | `qodercli` | Qoder CLI |
|
||||
| **Kiro CLI** | `.kiro/prompts/` | Markdown | `kiro-cli` | Kiro CLI |
|
||||
@@ -46,10 +45,7 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **SHAI** | `.shai/commands/` | Markdown | `shai` | SHAI CLI |
|
||||
| **Tabnine CLI** | `.tabnine/agent/commands/` | TOML | `tabnine` | Tabnine CLI |
|
||||
| **Kimi Code** | `.kimi/skills/` | Markdown | `kimi` | Kimi Code CLI (Moonshot AI) |
|
||||
| **Pi Coding Agent** | `.pi/prompts/` | Markdown | `pi` | Pi terminal coding agent |
|
||||
| **iFlow CLI** | `.iflow/commands/` | Markdown | `iflow` | iFlow CLI (iflow-ai) |
|
||||
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
|
||||
| **Trae** | `.trae/rules/` | Markdown | N/A (IDE-based) | Trae IDE |
|
||||
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
|
||||
|
||||
### Step-by-Step Integration Guide
|
||||
@@ -88,7 +84,7 @@ This eliminates the need for special-case mappings throughout the codebase.
|
||||
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
||||
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
||||
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli, pi), `"command"` (opencode - singular)
|
||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
||||
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
||||
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
||||
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
||||
@@ -319,7 +315,6 @@ Require a command-line tool to be installed:
|
||||
- **Cursor**: `cursor-agent` CLI
|
||||
- **Qwen Code**: `qwen` CLI
|
||||
- **opencode**: `opencode` CLI
|
||||
- **Junie**: `junie` CLI
|
||||
- **Kiro CLI**: `kiro-cli` CLI
|
||||
- **CodeBuddy CLI**: `codebuddy` CLI
|
||||
- **Qoder CLI**: `qodercli` CLI
|
||||
@@ -327,7 +322,6 @@ Require a command-line tool to be installed:
|
||||
- **SHAI**: `shai` CLI
|
||||
- **Tabnine CLI**: `tabnine` CLI
|
||||
- **Kimi Code**: `kimi` CLI
|
||||
- **Pi Coding Agent**: `pi` CLI
|
||||
|
||||
### IDE-Based Agents
|
||||
|
||||
@@ -341,7 +335,7 @@ Work within integrated development environments:
|
||||
|
||||
### Markdown Format
|
||||
|
||||
Used by: Claude, Cursor, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi
|
||||
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen
|
||||
|
||||
**Standard format:**
|
||||
|
||||
@@ -379,11 +373,6 @@ Command content with {SCRIPT} and {{args}} placeholders.
|
||||
## Directory Conventions
|
||||
|
||||
- **CLI agents**: Usually `.<agent-name>/commands/`
|
||||
- **Skills-based exceptions**:
|
||||
- Codex: `.agents/skills/` (skills, invoked as `$speckit-<command>`)
|
||||
- **Prompt-based exceptions**:
|
||||
- Kiro CLI: `.kiro/prompts/`
|
||||
- Pi: `.pi/prompts/`
|
||||
- **IDE agents**: Follow IDE-specific patterns:
|
||||
- Copilot: `.github/agents/`
|
||||
- Cursor: `.cursor/commands/`
|
||||
|
||||
118
CHANGELOG.md
118
CHANGELOG.md
@@ -1,61 +1,56 @@
|
||||
# Changelog
|
||||
|
||||
## [0.3.2] - 2026-03-19
|
||||
<!-- markdownlint-disable MD024 -->
|
||||
|
||||
### Changes
|
||||
Recent changes to the Specify CLI and templates are documented here.
|
||||
|
||||
- chore: bump version to 0.3.2
|
||||
- Add conduct extension to community catalog (#1908)
|
||||
- feat(extensions): add verify-tasks extension to community catalog (#1871)
|
||||
- feat(presets): add enable/disable toggle and update semantics (#1891)
|
||||
- feat: add iFlow CLI support (#1875)
|
||||
- feat(commands): wire before/after hook events into specify and plan templates (#1886)
|
||||
- docs(catalog): add speckit-utils to community catalog (#1896)
|
||||
- docs: Add Extensions & Presets section to README (#1898)
|
||||
- chore: update DocGuard extension to v0.9.11 (#1899)
|
||||
- Update cognitive-squad catalog entry — Triadic Model, full lifecycle (#1884)
|
||||
- feat: register spec-kit-iterate extension (#1887)
|
||||
- fix(scripts): add explicit positional binding to PowerShell create-new-feature params (#1885)
|
||||
- fix(scripts): encode residual JSON control chars as \uXXXX instead of stripping (#1872)
|
||||
- chore: update DocGuard extension to v0.9.10 (#1890)
|
||||
- Feature/spec kit add pi coding agent pullrequest (#1853)
|
||||
- feat: register spec-kit-learn extension (#1883)
|
||||
|
||||
## [0.3.1] - 2026-03-17
|
||||
|
||||
### Changed
|
||||
|
||||
- chore: bump version to 0.3.1
|
||||
- docs: add greenfield Spring Boot pirate-speak preset demo to README (#1878)
|
||||
- fix(ai-skills): exclude non-speckit copilot agent markdown from skills (#1867)
|
||||
- feat: add Trae IDE support as a new agent (#1817)
|
||||
- feat(cli): polite deep merge for settings.json and support JSONC (#1874)
|
||||
- feat(extensions,presets): add priority-based resolution ordering (#1855)
|
||||
- fix(scripts): suppress stdout from git fetch in create-new-feature.sh (#1876)
|
||||
- fix(scripts): harden bash scripts — escape, compat, and error handling (#1869)
|
||||
- Add cognitive-squad to community extension catalog (#1870)
|
||||
- docs: add Go / React brownfield walkthrough to community walkthroughs (#1868)
|
||||
- chore: update DocGuard extension to v0.9.8 (#1859)
|
||||
- Feature: add specify status command (#1837)
|
||||
- fix(extensions): show extension ID in list output (#1843)
|
||||
- feat(extensions): add Archive and Reconcile extensions to community catalog (#1844)
|
||||
- feat: Add DocGuard CDD enforcement extension to community catalog (#1838)
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.3.0] - 2026-03-13
|
||||
|
||||
### Changed
|
||||
|
||||
- chore: bump version to 0.3.0
|
||||
- feat(presets): Pluggable preset system with catalog, resolver, and skills propagation (#1787)
|
||||
- fix: match 'Last updated' timestamp with or without bold markers (#1836)
|
||||
- Add specify doctor command for project health diagnostics (#1828)
|
||||
- fix: harden bash scripts against shell injection and improve robustness (#1809)
|
||||
- fix: clean up command templates (specify, analyze) (#1810)
|
||||
- fix: migrate Qwen Code CLI from TOML to Markdown format (#1589) (#1730)
|
||||
- fix(cli): deprecate explicit command support for agy (#1798) (#1808)
|
||||
- Add /selftest.extension core extension to test other extensions (#1758)
|
||||
- feat(extensions): Quality of life improvements for RFC-aligned catalog integration (#1776)
|
||||
- Add Java brownfield walkthrough to community walkthroughs (#1820)
|
||||
- No changes have been documented for this release yet.
|
||||
|
||||
<!-- Entries for 0.2.x and earlier releases are documented in their respective sections below. -->
|
||||
- make c ignores consistent with c++ (#1747)
|
||||
- chore: bump version to 0.1.13 (#1746)
|
||||
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
|
||||
- feat: add verify extension to community catalog (#1726)
|
||||
- Add Retrospective Extension to community catalog README table (#1741)
|
||||
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
|
||||
- fix: correct Copilot extension command registration (#1724)
|
||||
- fix(implement): remove Makefile from C ignore patterns (#1558)
|
||||
- Add sync extension to community catalog (#1728)
|
||||
- fix(checklist): clarify file handling behavior for append vs create (#1556)
|
||||
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
|
||||
- chore: bump version to 0.1.12 (#1737)
|
||||
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
||||
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
||||
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
||||
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
- feat(presets): Pluggable preset system with preset catalog and template resolver
|
||||
- Preset manifest (`preset.yml`) with validation for artifact, command, and script types
|
||||
- `PresetManifest`, `PresetRegistry`, `PresetManager`, `PresetCatalog`, `PresetResolver` classes in `src/specify_cli/presets.py`
|
||||
- CLI commands: `specify preset search`, `specify preset add`, `specify preset list`, `specify preset remove`, `specify preset resolve`, `specify preset info`
|
||||
- CLI commands: `specify preset catalog list`, `specify preset catalog add`, `specify preset catalog remove` for multi-catalog management
|
||||
- `PresetCatalogEntry` dataclass and multi-catalog support mirroring the extension catalog system
|
||||
- `--preset` option for `specify init` to install presets during initialization
|
||||
- Priority-based preset resolution: presets with lower priority number win (`--priority` flag)
|
||||
- `resolve_template()` / `Resolve-Template` helpers in bash and PowerShell common scripts
|
||||
- Template resolution priority stack: overrides → presets → extensions → core
|
||||
- Preset catalog files (`presets/catalog.json`, `presets/catalog.community.json`)
|
||||
- Preset scaffold directory (`presets/scaffold/`)
|
||||
- Scripts updated to use template resolution instead of hardcoded paths
|
||||
- feat(presets): Preset command overrides now propagate to agent skills when `--ai-skills` was used during init
|
||||
- feat: `specify init` persists CLI options to `.specify/init-options.json` for downstream operations
|
||||
- feat(extensions): support `.extensionignore` to exclude files/folders during `specify extension add` (#1781)
|
||||
|
||||
## [0.2.1] - 2026-03-11
|
||||
|
||||
@@ -282,3 +277,28 @@
|
||||
|
||||
- Add pytest and Python linting (ruff) to CI (#1637)
|
||||
- feat: add pull request template for better contribution guidelines (#1634)
|
||||
|
||||
## [0.0.99] - 2026-02-19
|
||||
|
||||
- Feat/ai skills (#1632)
|
||||
|
||||
## [0.0.98] - 2026-02-19
|
||||
|
||||
- chore(deps): bump actions/stale from 9 to 10 (#1623)
|
||||
- feat: add dependabot configuration for pip and GitHub Actions updates (#1622)
|
||||
|
||||
## [0.0.97] - 2026-02-18
|
||||
|
||||
- Remove Maintainers section from README.md (#1618)
|
||||
|
||||
## [0.0.96] - 2026-02-17
|
||||
|
||||
- fix: typo in plan-template.md (#1446)
|
||||
|
||||
## [0.0.95] - 2026-02-12
|
||||
|
||||
- Feat: add a new agent: Google Anti Gravity (#1220)
|
||||
|
||||
## [0.0.94] - 2026-02-11
|
||||
|
||||
- Add stale workflow for 180-day inactive issues and PRs (#1594)
|
||||
|
||||
151
README.md
151
README.md
@@ -25,7 +25,6 @@
|
||||
- [🚶 Community Walkthroughs](#-community-walkthroughs)
|
||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||
- [🧩 Making Spec Kit Your Own: Extensions & Presets](#-making-spec-kit-your-own-extensions--presets)
|
||||
- [📚 Core Philosophy](#-core-philosophy)
|
||||
- [🌟 Development Phases](#-development-phases)
|
||||
- [🎯 Experimental Goals](#-experimental-goals)
|
||||
@@ -49,13 +48,9 @@ Choose your preferred installation method:
|
||||
|
||||
#### Option 1: Persistent Installation (Recommended)
|
||||
|
||||
Install once and use everywhere. Pin a specific release tag for stability (check [Releases](https://github.com/github/spec-kit/releases) for the latest):
|
||||
Install once and use everywhere:
|
||||
|
||||
```bash
|
||||
# Install a specific stable release (recommended — replace vX.Y.Z with the latest tag)
|
||||
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git@vX.Y.Z
|
||||
|
||||
# Or install latest from main (may include unreleased changes)
|
||||
uv tool install specify-cli --from git+https://github.com/github/spec-kit.git
|
||||
```
|
||||
|
||||
@@ -77,7 +72,7 @@ specify check
|
||||
To upgrade Specify, see the [Upgrade Guide](./docs/upgrade.md) for detailed instructions. Quick upgrade:
|
||||
|
||||
```bash
|
||||
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z
|
||||
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git
|
||||
```
|
||||
|
||||
#### Option 2: One-time Usage
|
||||
@@ -85,13 +80,13 @@ uv tool install specify-cli --force --from git+https://github.com/github/spec-ki
|
||||
Run directly without installing:
|
||||
|
||||
```bash
|
||||
# Create new project (pinned to a stable release — replace vX.Y.Z with the latest tag)
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <PROJECT_NAME>
|
||||
# Create new project
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
|
||||
# Or initialize in existing project
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init . --ai claude
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init . --ai claude
|
||||
# or
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here --ai claude
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai claude
|
||||
```
|
||||
|
||||
**Benefits of persistent installation:**
|
||||
@@ -101,13 +96,9 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
|
||||
- Better tool management with `uv tool list`, `uv tool upgrade`, `uv tool uninstall`
|
||||
- Cleaner shell configuration
|
||||
|
||||
#### Option 3: Enterprise / Air-Gapped Installation
|
||||
|
||||
If your environment blocks access to PyPI or GitHub, see the [Enterprise / Air-Gapped Installation](./docs/installation.md#enterprise--air-gapped-installation) guide for step-by-step instructions on using `pip download` to create portable, OS-specific wheel bundles on a connected machine.
|
||||
|
||||
### 2. Establish project principles
|
||||
|
||||
Launch your AI assistant in the project directory. Most agents expose spec-kit as `/speckit.*` slash commands; Codex CLI in skills mode uses `$speckit-*` instead.
|
||||
Launch your AI assistant in the project directory. The `/speckit.*` commands are available in the assistant.
|
||||
|
||||
Use the **`/speckit.constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
|
||||
|
||||
@@ -167,10 +158,6 @@ See Spec-Driven Development in action across different scenarios with these comm
|
||||
|
||||
- **[Brownfield Java runtime extension](https://github.com/mnriem/spec-kit-java-brownfield-demo)** — Extends an existing open-source Jakarta EE runtime (Piranha, ~420,000 lines of Java, XML, JSP, HTML, and config files across 180 Maven modules) with a password-protected Server Admin Console, demonstrating spec-kit on a large multi-module Java project with no prior specs or constitution.
|
||||
|
||||
- **[Brownfield Go / React dashboard demo](https://github.com/mnriem/spec-kit-go-brownfield-demo)** — Demonstrates spec-kit driven entirely from the **terminal using GitHub Copilot CLI**. Extends NASA's open-source Hermes ground support system (Go) with a lightweight React-based web telemetry dashboard, showing that the full constitution → specify → plan → tasks → implement workflow works from the terminal.
|
||||
|
||||
- **[Greenfield Spring Boot MVC with a custom preset](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo)** — Builds a Spring Boot MVC application from scratch using a custom pirate-speak preset, demonstrating how presets can reshape the entire spec-kit experience: specifications become "Voyage Manifests," plans become "Battle Plans," and tasks become "Crew Assignments" — all generated in full pirate vernacular without changing any tooling.
|
||||
|
||||
## 🤖 Supported AI Agents
|
||||
|
||||
| Agent | Support | Notes |
|
||||
@@ -181,7 +168,7 @@ See Spec-Driven Development in action across different scenarios with these comm
|
||||
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
||||
| [Claude Code](https://www.anthropic.com/claude-code) | ✅ | |
|
||||
| [CodeBuddy CLI](https://www.codebuddy.ai/cli) | ✅ | |
|
||||
| [Codex CLI](https://github.com/openai/codex) | ✅ | Requires `--ai-skills`. Codex recommends [skills](https://developers.openai.com/codex/skills) and treats [custom prompts](https://developers.openai.com/codex/custom-prompts) as deprecated. Spec-kit installs Codex skills into `.agents/skills` and invokes them as `$speckit-<command>`. |
|
||||
| [Codex CLI](https://github.com/openai/codex) | ✅ | |
|
||||
| [Cursor](https://cursor.sh/) | ✅ | |
|
||||
| [Gemini CLI](https://github.com/google-gemini/gemini-cli) | ✅ | |
|
||||
| [GitHub Copilot](https://code.visualstudio.com/) | ✅ | |
|
||||
@@ -189,18 +176,14 @@ See Spec-Driven Development in action across different scenarios with these comm
|
||||
| [Jules](https://jules.google.com/) | ✅ | |
|
||||
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
|
||||
| [opencode](https://opencode.ai/) | ✅ | |
|
||||
| [Pi Coding Agent](https://pi.dev) | ✅ | Pi doesn't have MCP support out of the box, so `taskstoissues` won't work as intended. MCP support can be added via [extensions](https://github.com/badlogic/pi-mono/tree/main/packages/coding-agent#extensions) |
|
||||
| [Qwen Code](https://github.com/QwenLM/qwen-code) | ✅ | |
|
||||
| [Roo Code](https://roocode.com/) | ✅ | |
|
||||
| [SHAI (OVHcloud)](https://github.com/ovh/shai) | ✅ | |
|
||||
| [Tabnine CLI](https://docs.tabnine.com/main/getting-started/tabnine-cli) | ✅ | |
|
||||
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
||||
| [Kimi Code](https://code.kimi.com/) | ✅ | |
|
||||
| [iFlow CLI](https://docs.iflow.cn/en/cli/quickstart) | ✅ | |
|
||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
||||
| [Junie](https://junie.jetbrains.com/) | ✅ | |
|
||||
| [Antigravity (agy)](https://antigravity.google/) | ✅ | Requires `--ai-skills` |
|
||||
| [Trae](https://www.trae.ai/) | ✅ | |
|
||||
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
||||
|
||||
## 🔧 Specify CLI Reference
|
||||
@@ -209,28 +192,27 @@ The `specify` command supports the following options:
|
||||
|
||||
### Commands
|
||||
|
||||
| Command | Description |
|
||||
| ------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `init` | Initialize a new Specify project from the latest template |
|
||||
| `check` | Check for installed tools: `git` plus all CLI-based agents configured in `AGENT_CONFIG` (for example: `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `junie`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`, `kimi`, `iflow`, `pi`, etc.) |
|
||||
| Command | Description |
|
||||
| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `init` | Initialize a new Specify project from the latest template |
|
||||
| `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`, `windsurf`, `qwen`, `opencode`, `codex`, `kiro-cli`, `shai`, `qodercli`, `vibe`, `kimi`) |
|
||||
|
||||
### `specify init` Arguments & Options
|
||||
|
||||
| Argument/Option | Type | Description |
|
||||
| ---------------------- | -------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
|
||||
| `--ai` | Option | AI assistant to use (see `AGENT_CONFIG` for the full, up-to-date list). Common options include: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `junie`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, `kimi`, `iflow`, `pi`, or `generic` (requires `--ai-commands-dir`) |
|
||||
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
|
||||
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
||||
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
||||
| `--no-git` | Flag | Skip git repository initialization |
|
||||
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
||||
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
|
||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
|
||||
| `--branch-numbering` | Option | Branch numbering strategy: `sequential` (default — `001`, `002`, `003`) or `timestamp` (`YYYYMMDD-HHMMSS`). Timestamp mode is useful for distributed teams to avoid numbering conflicts |
|
||||
| Argument/Option | Type | Description |
|
||||
| ---------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `<project-name>` | Argument | Name for your new project directory (optional if using `--here`, or use `.` for current directory) |
|
||||
| `--ai` | Option | AI assistant to use: `claude`, `gemini`, `copilot`, `cursor-agent`, `qwen`, `opencode`, `codex`, `windsurf`, `kilocode`, `auggie`, `roo`, `codebuddy`, `amp`, `shai`, `kiro-cli` (`kiro` alias), `agy`, `bob`, `qodercli`, `vibe`, `kimi`, or `generic` (requires `--ai-commands-dir`) |
|
||||
| `--ai-commands-dir` | Option | Directory for agent command files (required with `--ai generic`, e.g. `.myagent/commands/`) |
|
||||
| `--script` | Option | Script variant to use: `sh` (bash/zsh) or `ps` (PowerShell) |
|
||||
| `--ignore-agent-tools` | Flag | Skip checks for AI agent tools like Claude Code |
|
||||
| `--no-git` | Flag | Skip git repository initialization |
|
||||
| `--here` | Flag | Initialize project in the current directory instead of creating a new one |
|
||||
| `--force` | Flag | Force merge/overwrite when initializing in current directory (skip confirmation) |
|
||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
|
||||
|
||||
### Examples
|
||||
|
||||
@@ -265,12 +247,6 @@ specify init my-project --ai vibe
|
||||
# Initialize with IBM Bob support
|
||||
specify init my-project --ai bob
|
||||
|
||||
# Initialize with Pi Coding Agent support
|
||||
specify init my-project --ai pi
|
||||
|
||||
# Initialize with Codex CLI support
|
||||
specify init my-project --ai codex --ai-skills
|
||||
|
||||
# Initialize with Antigravity support
|
||||
specify init my-project --ai agy --ai-skills
|
||||
|
||||
@@ -305,18 +281,13 @@ specify init my-project --ai claude --ai-skills
|
||||
# Initialize in current directory with agent skills
|
||||
specify init --here --ai gemini --ai-skills
|
||||
|
||||
# Use timestamp-based branch numbering (useful for distributed teams)
|
||||
specify init my-project --ai claude --branch-numbering timestamp
|
||||
|
||||
# Check system requirements
|
||||
specify check
|
||||
```
|
||||
|
||||
### Available Slash Commands
|
||||
|
||||
After running `specify init`, your AI coding agent will have access to these slash commands for structured development.
|
||||
|
||||
For Codex CLI, `--ai-skills` installs spec-kit as agent skills instead of slash-command prompt files. In Codex skills mode, invoke spec-kit as `$speckit-constitution`, `$speckit-specify`, `$speckit-plan`, `$speckit-tasks`, and `$speckit-implement`.
|
||||
After running `specify init`, your AI coding agent will have access to these slash commands for structured development:
|
||||
|
||||
#### Core Commands
|
||||
|
||||
@@ -346,68 +317,6 @@ Additional commands for enhanced quality and validation:
|
||||
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>\*\*Must be set in the context of the agent you're working with prior to using `/speckit.plan` or follow-up commands. |
|
||||
|
||||
## 🧩 Making Spec Kit Your Own: Extensions & Presets
|
||||
|
||||
Spec Kit can be tailored to your needs through two complementary systems — **extensions** and **presets** — plus project-local overrides for one-off adjustments:
|
||||
|
||||
```mermaid
|
||||
block-beta
|
||||
columns 1
|
||||
overrides["⬆ Highest priority\nProject-Local Overrides\n.specify/templates/overrides/"]
|
||||
presets["Presets — Customize core & extensions\n.specify/presets/<preset-id>/templates/"]
|
||||
extensions["Extensions — Add new capabilities\n.specify/extensions/<ext-id>/templates/"]
|
||||
core["Spec Kit Core — Built-in SDD commands & templates\n.specify/templates/\n⬇ Lowest priority"]
|
||||
|
||||
style overrides fill:transparent,stroke:#999
|
||||
style presets fill:transparent,stroke:#4a9eda
|
||||
style extensions fill:transparent,stroke:#4a9e4a
|
||||
style core fill:transparent,stroke:#e6a817
|
||||
```
|
||||
|
||||
**Templates** are resolved at **runtime** — Spec Kit walks the stack top-down and uses the first match. Project-local overrides (`.specify/templates/overrides/`) let you make one-off adjustments for a single project without creating a full preset. **Commands** are applied at **install time** — when you run `specify extension add` or `specify preset add`, command files are written into agent directories (e.g., `.claude/commands/`). If multiple presets or extensions provide the same command, the highest-priority version wins. On removal, the next-highest-priority version is restored automatically. If no overrides or customizations exist, Spec Kit uses its core defaults.
|
||||
|
||||
### Extensions — Add New Capabilities
|
||||
|
||||
Use **extensions** when you need functionality that goes beyond Spec Kit's core. Extensions introduce new commands and templates — for example, adding domain-specific workflows that are not covered by the built-in SDD commands, integrating with external tools, or adding entirely new development phases. They expand *what Spec Kit can do*.
|
||||
|
||||
```bash
|
||||
# Search available extensions
|
||||
specify extension search
|
||||
|
||||
# Install an extension
|
||||
specify extension add <extension-name>
|
||||
```
|
||||
|
||||
For example, extensions could add Jira integration, post-implementation code review, V-Model test traceability, or project health diagnostics.
|
||||
|
||||
See the [Extensions README](./extensions/README.md) for the full guide, the complete community catalog, and how to build and publish your own.
|
||||
|
||||
### Presets — Customize Existing Workflows
|
||||
|
||||
Use **presets** when you want to change *how* Spec Kit works without adding new capabilities. Presets override the templates and commands that ship with the core *and* with installed extensions — for example, enforcing a compliance-oriented spec format, using domain-specific terminology, or applying organizational standards to plans and tasks. They customize the artifacts and instructions that Spec Kit and its extensions produce.
|
||||
|
||||
```bash
|
||||
# Search available presets
|
||||
specify preset search
|
||||
|
||||
# Install a preset
|
||||
specify preset add <preset-name>
|
||||
```
|
||||
|
||||
For example, presets could restructure spec templates to require regulatory traceability, adapt the workflow to fit the methodology you use (e.g., Agile, Kanban, Waterfall, jobs-to-be-done, or domain-driven design), add mandatory security review gates to plans, enforce test-first task ordering, or localize the entire workflow to a different language. The [pirate-speak demo](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo) shows just how deep the customization can go. Multiple presets can be stacked with priority ordering.
|
||||
|
||||
See the [Presets README](./presets/README.md) for the full guide, including resolution order, priority, and how to create your own.
|
||||
|
||||
### When to Use Which
|
||||
|
||||
| Goal | Use |
|
||||
| --- | --- |
|
||||
| Add a brand-new command or workflow | Extension |
|
||||
| Customize the format of specs, plans, or tasks | Preset |
|
||||
| Integrate an external tool or service | Extension |
|
||||
| Enforce organizational or regulatory standards | Preset |
|
||||
| Ship reusable domain-specific templates | Either — presets for template overrides, extensions for templates bundled with new commands |
|
||||
|
||||
## 📚 Core Philosophy
|
||||
|
||||
Spec-Driven Development is a structured process that emphasizes:
|
||||
@@ -502,11 +411,11 @@ specify init <project_name> --ai copilot
|
||||
|
||||
# Or in current directory:
|
||||
specify init . --ai claude
|
||||
specify init . --ai codex --ai-skills
|
||||
specify init . --ai codex
|
||||
|
||||
# or use --here flag
|
||||
specify init --here --ai claude
|
||||
specify init --here --ai codex --ai-skills
|
||||
specify init --here --ai codex
|
||||
|
||||
# Force merge into a non-empty current directory
|
||||
specify init . --force --ai claude
|
||||
@@ -515,7 +424,7 @@ specify init . --force --ai claude
|
||||
specify init --here --force --ai claude
|
||||
```
|
||||
|
||||
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, Pi, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
||||
The CLI will check if you have Claude Code, Gemini CLI, Cursor CLI, Qwen CLI, opencode, Codex CLI, Qoder CLI, Tabnine CLI, Kiro CLI, or Mistral Vibe installed. If you do not, or you prefer to get the templates without checking for the right tools, use `--ignore-agent-tools` with your command:
|
||||
|
||||
```bash
|
||||
specify init <project_name> --ai claude --ignore-agent-tools
|
||||
|
||||
13
SUPPORT.md
13
SUPPORT.md
@@ -1,17 +1,18 @@
|
||||
# Support
|
||||
|
||||
## How to get help
|
||||
## How to file issues and get help
|
||||
|
||||
Please search existing [issues](https://github.com/github/spec-kit/issues) and [discussions](https://github.com/github/spec-kit/discussions) before creating new ones to avoid duplicates.
|
||||
This project uses GitHub issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new issue.
|
||||
|
||||
- Review the [README](./README.md) for getting started instructions and troubleshooting tips
|
||||
For help or questions about using this project, please:
|
||||
|
||||
- Open a [GitHub issue](https://github.com/github/spec-kit/issues/new) for bug reports, feature requests, or questions about the Spec-Driven Development methodology
|
||||
- Check the [comprehensive guide](./spec-driven.md) for detailed documentation on the Spec-Driven Development process
|
||||
- Ask in [GitHub Discussions](https://github.com/github/spec-kit/discussions) for questions about using Spec Kit or the Spec-Driven Development methodology
|
||||
- Open a [GitHub issue](https://github.com/github/spec-kit/issues/new) for bug reports and feature requests
|
||||
- Review the [README](./README.md) for getting started instructions and troubleshooting tips
|
||||
|
||||
## Project Status
|
||||
|
||||
**Spec Kit** is under active development and maintained by GitHub staff and the community. We will do our best to respond to support, feature requests, and community questions as time permits.
|
||||
**Spec Kit** is under active development and maintained by GitHub staff **AND THE COMMUNITY**. We will do our best to respond to support, feature requests, and community questions in a timely manner.
|
||||
|
||||
## GitHub Support Policy
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Prerequisites
|
||||
|
||||
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
|
||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli), [Gemini CLI](https://github.com/google-gemini/gemini-cli), or [Pi Coding Agent](https://pi.dev)
|
||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli) or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||
- [uv](https://docs.astral.sh/uv/) for package management
|
||||
- [Python 3.11+](https://www.python.org/downloads/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
@@ -12,22 +12,18 @@
|
||||
|
||||
### Initialize a New Project
|
||||
|
||||
The easiest way to get started is to initialize a new project. Pin a specific release tag for stability (check [Releases](https://github.com/github/spec-kit/releases) for the latest):
|
||||
The easiest way to get started is to initialize a new project:
|
||||
|
||||
```bash
|
||||
# Install from a specific stable release (recommended — replace vX.Y.Z with the latest tag)
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <PROJECT_NAME>
|
||||
|
||||
# Or install latest from main (may include unreleased changes)
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
```
|
||||
|
||||
Or initialize in the current directory:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init .
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init .
|
||||
# or use the --here flag
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here
|
||||
```
|
||||
|
||||
### Specify AI Agent
|
||||
@@ -35,11 +31,10 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here
|
||||
You can proactively specify your AI agent during initialization:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai claude
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai gemini
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai copilot
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai codebuddy
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai pi
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai codebuddy
|
||||
```
|
||||
|
||||
### Specify Script Type (Shell vs PowerShell)
|
||||
@@ -55,8 +50,8 @@ Auto behavior:
|
||||
Force a specific script type:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --script sh
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --script ps
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script sh
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --script ps
|
||||
```
|
||||
|
||||
### Ignore Agent Tools Check
|
||||
@@ -64,7 +59,7 @@ uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <proje
|
||||
If you prefer to get the templates without checking for the right tools:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init <project_name> --ai claude --ignore-agent-tools
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude --ignore-agent-tools
|
||||
```
|
||||
|
||||
## Verification
|
||||
@@ -79,52 +74,6 @@ The `.specify/scripts` directory will contain both `.sh` and `.ps1` scripts.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Enterprise / Air-Gapped Installation
|
||||
|
||||
If your environment blocks access to PyPI (you see 403 errors when running `uv tool install` or `pip install`), you can create a portable wheel bundle on a connected machine and transfer it to the air-gapped target.
|
||||
|
||||
**Step 1: Build the wheel on a connected machine (same OS and Python version as the target)**
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/github/spec-kit.git
|
||||
cd spec-kit
|
||||
|
||||
# Build the wheel
|
||||
pip install build
|
||||
python -m build --wheel --outdir dist/
|
||||
|
||||
# Download the wheel and all its runtime dependencies
|
||||
pip download -d dist/ dist/specify_cli-*.whl
|
||||
```
|
||||
|
||||
> **Important:** `pip download` resolves platform-specific wheels (e.g., PyYAML includes native extensions). You must run this step on a machine with the **same OS and Python version** as the air-gapped target. If you need to support multiple platforms, repeat this step on each target OS (Linux, macOS, Windows) and Python version.
|
||||
|
||||
**Step 2: Transfer the `dist/` directory to the air-gapped machine**
|
||||
|
||||
Copy the entire `dist/` directory (which contains the `specify-cli` wheel and all dependency wheels) to the target machine via USB, network share, or other approved transfer method.
|
||||
|
||||
**Step 3: Install on the air-gapped machine**
|
||||
|
||||
```bash
|
||||
pip install --no-index --find-links=./dist specify-cli
|
||||
```
|
||||
|
||||
**Step 4: Initialize a project (no network required)**
|
||||
|
||||
```bash
|
||||
# Initialize a project — no GitHub access needed
|
||||
specify init my-project --ai claude --offline
|
||||
```
|
||||
|
||||
The `--offline` flag tells the CLI to use the templates, commands, and scripts bundled inside the wheel instead of downloading from GitHub.
|
||||
|
||||
> **Deprecation notice:** Starting with v0.6.0, `specify init` will use bundled assets by default and the `--offline` flag will be removed. The GitHub download path will be retired because bundled assets eliminate the need for network access, avoid proxy/firewall issues, and guarantee that templates always match the installed CLI version. No action will be needed — `specify init` will simply work without network access out of the box.
|
||||
|
||||
> **Note:** Python 3.11+ is required.
|
||||
|
||||
> **Windows note:** Offline scaffolding requires PowerShell 7+ (`pwsh`), not Windows PowerShell 5.x (`powershell.exe`). Install from https://aka.ms/powershell.
|
||||
|
||||
### Git Credential Manager on Linux
|
||||
|
||||
If you're having issues with Git authentication on Linux, you can install Git Credential Manager:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
| What to Upgrade | Command | When to Use |
|
||||
|----------------|---------|-------------|
|
||||
| **CLI Tool Only** | `uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z` | Get latest CLI features without touching project files |
|
||||
| **CLI Tool Only** | `uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git` | Get latest CLI features without touching project files |
|
||||
| **Project Files** | `specify init --here --force --ai <your-agent>` | Update slash commands, templates, and scripts in your project |
|
||||
| **Both** | Run CLI upgrade, then project update | Recommended for major version updates |
|
||||
|
||||
@@ -20,18 +20,16 @@ The CLI tool (`specify`) is separate from your project files. Upgrade it to get
|
||||
|
||||
### If you installed with `uv tool install`
|
||||
|
||||
Upgrade to a specific release (check [Releases](https://github.com/github/spec-kit/releases) for the latest tag):
|
||||
|
||||
```bash
|
||||
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git@vX.Y.Z
|
||||
uv tool install specify-cli --force --from git+https://github.com/github/spec-kit.git
|
||||
```
|
||||
|
||||
### If you use one-shot `uvx` commands
|
||||
|
||||
Specify the desired release tag:
|
||||
No upgrade needed—`uvx` always fetches the latest version. Just run your commands as normal:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git@vX.Y.Z specify init --here --ai copilot
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here --ai copilot
|
||||
```
|
||||
|
||||
### Verify the upgrade
|
||||
@@ -291,9 +289,8 @@ This tells Spec Kit which feature directory to use when creating specs, plans, a
|
||||
|
||||
```bash
|
||||
ls -la .claude/commands/ # Claude Code
|
||||
ls -la .gemini/commands/ # Gemini
|
||||
ls -la .cursor/commands/ # Cursor
|
||||
ls -la .pi/prompts/ # Pi Coding Agent
|
||||
ls -la .gemini/commands/ # Gemini
|
||||
ls -la .cursor/commands/ # Cursor
|
||||
```
|
||||
|
||||
3. **Check agent-specific setup:**
|
||||
@@ -401,7 +398,7 @@ The `specify` CLI tool is used for:
|
||||
- **Upgrades:** `specify init --here --force` to update templates and commands
|
||||
- **Diagnostics:** `specify check` to verify tool installation
|
||||
|
||||
Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/speckit.plan`, etc.) are **permanently installed** in your project's agent folder (`.claude/`, `.github/prompts/`, `.pi/prompts/`, etc.). Your AI assistant reads these command files directly—no need to run `specify` again.
|
||||
Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/speckit.plan`, etc.) are **permanently installed** in your project's agent folder (`.claude/`, `.github/prompts/`, etc.). Your AI assistant reads these command files directly—no need to run `specify` again.
|
||||
|
||||
**If your agent isn't recognizing slash commands:**
|
||||
|
||||
@@ -413,9 +410,6 @@ Once you've run `specify init`, the slash commands (like `/speckit.specify`, `/s
|
||||
|
||||
# For Claude
|
||||
ls -la .claude/commands/
|
||||
|
||||
# For Pi
|
||||
ls -la .pi/prompts/
|
||||
```
|
||||
|
||||
2. **Restart your IDE/editor completely** (not just reload window)
|
||||
|
||||
@@ -53,7 +53,7 @@ provides:
|
||||
required: boolean # Default: false
|
||||
|
||||
hooks: # Optional, event hooks
|
||||
event_name: # e.g., "after_specify", "after_plan", "after_tasks", "after_implement"
|
||||
event_name: # e.g., "after_tasks", "after_implement"
|
||||
command: string # Command to execute
|
||||
optional: boolean # Default: true
|
||||
prompt: string # Prompt text for optional hooks
|
||||
@@ -108,7 +108,7 @@ defaults: # Optional, default configuration values
|
||||
#### `hooks`
|
||||
|
||||
- **Type**: object
|
||||
- **Keys**: Event names (e.g., `after_specify`, `after_plan`, `after_tasks`, `after_implement`, `before_commit`)
|
||||
- **Keys**: Event names (e.g., `after_tasks`, `after_implement`, `before_commit`)
|
||||
- **Description**: Hooks that execute at lifecycle events
|
||||
- **Events**: Defined by core spec-kit commands
|
||||
|
||||
@@ -551,16 +551,10 @@ hooks:
|
||||
|
||||
Standard events (defined by core):
|
||||
|
||||
- `before_specify` - Before specification generation
|
||||
- `after_specify` - After specification generation
|
||||
- `before_plan` - Before implementation planning
|
||||
- `after_plan` - After implementation planning
|
||||
- `before_tasks` - Before task generation
|
||||
- `after_tasks` - After task generation
|
||||
- `before_implement` - Before implementation
|
||||
- `after_implement` - After implementation
|
||||
- `before_commit` - Before git commit *(planned - not yet wired into core templates)*
|
||||
- `after_commit` - After git commit *(planned - not yet wired into core templates)*
|
||||
- `before_commit` - Before git commit
|
||||
- `after_commit` - After git commit
|
||||
|
||||
### Hook Configuration
|
||||
|
||||
|
||||
@@ -209,22 +209,9 @@ Edit `extensions/catalog.community.json` and add your extension:
|
||||
Add your extension to the Available Extensions table in `extensions/README.md`:
|
||||
|
||||
```markdown
|
||||
| Your Extension Name | Brief description of what it does | `<category>` | <effect> | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
|
||||
| Your Extension Name | Brief description of what it does | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
|
||||
```
|
||||
|
||||
**(Table) Category** — pick the one that best fits your extension:
|
||||
|
||||
- `docs` — reads, validates, or generates spec artifacts
|
||||
- `code` — reviews, validates, or modifies source code
|
||||
- `process` — orchestrates workflow across phases
|
||||
- `integration` — syncs with external platforms
|
||||
- `visibility` — reports on project health or progress
|
||||
|
||||
**Effect** — choose one:
|
||||
|
||||
- Read-only — produces reports without modifying files
|
||||
- Read+Write — modifies files, creates artifacts, or updates specs
|
||||
|
||||
Insert your extension in alphabetical order in the table.
|
||||
|
||||
### 4. Submit Pull Request
|
||||
|
||||
@@ -387,9 +387,6 @@ settings:
|
||||
auto_execute_hooks: true
|
||||
|
||||
# Hook configuration
|
||||
# Available events: before_specify, after_specify, before_plan, after_plan,
|
||||
# before_tasks, after_tasks, before_implement, after_implement
|
||||
# Planned (not yet wired into core templates): before_commit, after_commit
|
||||
hooks:
|
||||
after_tasks:
|
||||
- extension: jira
|
||||
|
||||
@@ -70,34 +70,20 @@ specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/ta
|
||||
|
||||
The following community-contributed extensions are available in [`catalog.community.json`](catalog.community.json):
|
||||
|
||||
**Categories:** `docs` — reads, validates, or generates spec artifacts · `code` — reviews, validates, or modifies source code · `process` — orchestrates workflow across phases · `integration` — syncs with external platforms · `visibility` — reports on project health or progress
|
||||
|
||||
**Effect:** `Read-only` — produces reports without modifying files · `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
|
||||
| Extension | Purpose | Category | Effect | URL |
|
||||
|-----------|---------|----------|--------|-----|
|
||||
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Cognitive Squad | Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing | `docs` | Read+Write | [cognitive-squad](https://github.com/Testimonial/cognitive-squad) |
|
||||
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
|
||||
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
|
||||
| Extension | Purpose | URL |
|
||||
|-----------|---------|-----|
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
|
||||
|
||||
## Adding Your Extension
|
||||
|
||||
@@ -359,15 +359,12 @@ specify extension add jira
|
||||
"installed_at": "2026-01-28T14:30:00Z",
|
||||
"source": "catalog",
|
||||
"manifest_hash": "sha256:abc123...",
|
||||
"enabled": true,
|
||||
"priority": 10
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Priority Field**: Extensions are ordered by `priority` (lower = higher precedence). Default is 10. Used for template resolution when multiple extensions provide the same template.
|
||||
|
||||
### 3. Configuration
|
||||
|
||||
```bash
|
||||
@@ -1087,15 +1084,11 @@ List installed extensions in current project.
|
||||
$ specify extension list
|
||||
|
||||
Installed Extensions:
|
||||
✓ Jira Integration (v1.0.0)
|
||||
jira
|
||||
Create Jira issues from spec-kit artifacts
|
||||
Commands: 3 | Hooks: 2 | Priority: 10 | Status: Enabled
|
||||
✓ jira (v1.0.0) - Jira Integration
|
||||
Commands: 3 | Hooks: 2 | Status: Enabled
|
||||
|
||||
✓ Linear Integration (v0.9.0)
|
||||
linear
|
||||
Create Linear issues from spec-kit artifacts
|
||||
Commands: 1 | Hooks: 1 | Priority: 10 | Status: Enabled
|
||||
✓ linear (v0.9.0) - Linear Integration
|
||||
Commands: 1 | Hooks: 1 | Status: Enabled
|
||||
```
|
||||
|
||||
**Options:**
|
||||
@@ -1203,9 +1196,10 @@ Next steps:
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--from URL`: Install from a remote URL (archive). Does not accept Git repositories directly.
|
||||
- `--dev`: Install from a local path in development mode (the PATH is the positional `extension` argument).
|
||||
- `--priority NUMBER`: Set resolution priority (lower = higher precedence, default 10)
|
||||
- `--from URL`: Install from custom URL or Git repo
|
||||
- `--version VERSION`: Install specific version
|
||||
- `--dev PATH`: Install from local path (development mode)
|
||||
- `--no-register`: Skip command registration (manual setup)
|
||||
|
||||
#### `specify extension remove NAME`
|
||||
|
||||
@@ -1286,29 +1280,6 @@ $ specify extension disable jira
|
||||
To re-enable: specify extension enable jira
|
||||
```
|
||||
|
||||
#### `specify extension set-priority NAME PRIORITY`
|
||||
|
||||
Change the resolution priority of an installed extension.
|
||||
|
||||
```bash
|
||||
$ specify extension set-priority jira 5
|
||||
|
||||
✓ Extension 'Jira Integration' priority changed: 10 → 5
|
||||
|
||||
Lower priority = higher precedence in template resolution
|
||||
```
|
||||
|
||||
**Priority Values:**
|
||||
|
||||
- Lower numbers = higher precedence (checked first in resolution)
|
||||
- Default priority is 10
|
||||
- Must be a positive integer (1 or higher)
|
||||
|
||||
**Use Cases:**
|
||||
|
||||
- Ensure a critical extension's templates take precedence
|
||||
- Override default resolution order when multiple extensions provide similar templates
|
||||
|
||||
---
|
||||
|
||||
## Compatibility & Versioning
|
||||
|
||||
@@ -1,39 +1,8 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-03-19T12:08:20Z",
|
||||
"updated_at": "2026-03-13T12:00:00Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||
"extensions": {
|
||||
"archive": {
|
||||
"name": "Archive Extension",
|
||||
"id": "archive",
|
||||
"description": "Archive merged features into main project memory, resolving gaps and conflicts.",
|
||||
"author": "Stanislav Deviatov",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/stn1slv/spec-kit-archive/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/stn1slv/spec-kit-archive",
|
||||
"homepage": "https://github.com/stn1slv/spec-kit-archive",
|
||||
"documentation": "https://github.com/stn1slv/spec-kit-archive/blob/main/README.md",
|
||||
"changelog": "https://github.com/stn1slv/spec-kit-archive/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"archive",
|
||||
"memory",
|
||||
"merge",
|
||||
"changelog"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-14T00:00:00Z",
|
||||
"updated_at": "2026-03-14T00:00:00Z"
|
||||
},
|
||||
"azure-devops": {
|
||||
"name": "Azure DevOps Integration",
|
||||
"id": "azure-devops",
|
||||
@@ -105,122 +74,6 @@
|
||||
"created_at": "2026-02-22T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"cognitive-squad": {
|
||||
"name": "Cognitive Squad",
|
||||
"id": "cognitive-squad",
|
||||
"description": "Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing",
|
||||
"author": "Testimonial",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/Testimonial/cognitive-squad/archive/refs/tags/v0.1.0.zip",
|
||||
"repository": "https://github.com/Testimonial/cognitive-squad",
|
||||
"homepage": "https://github.com/Testimonial/cognitive-squad",
|
||||
"documentation": "https://github.com/Testimonial/cognitive-squad/blob/main/README.md",
|
||||
"changelog": "https://github.com/Testimonial/cognitive-squad/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0",
|
||||
"tools": [
|
||||
{
|
||||
"name": "understanding",
|
||||
"version": ">=3.4.0",
|
||||
"required": false
|
||||
},
|
||||
{
|
||||
"name": "spec-kit-reverse-eng",
|
||||
"version": ">=1.0.0",
|
||||
"required": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 10,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"ai-agents",
|
||||
"cognitive",
|
||||
"full-lifecycle",
|
||||
"verification",
|
||||
"multi-agent"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"conduct": {
|
||||
"name": "Conduct Extension",
|
||||
"id": "conduct",
|
||||
"description": "Executes a single spec-kit phase via sub-agent delegation to reduce context pollution.",
|
||||
"author": "twbrandon7",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/twbrandon7/spec-kit-conduct-ext/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/twbrandon7/spec-kit-conduct-ext",
|
||||
"homepage": "https://github.com/twbrandon7/spec-kit-conduct-ext",
|
||||
"documentation": "https://github.com/twbrandon7/spec-kit-conduct-ext/blob/main/README.md",
|
||||
"changelog": "https://github.com/twbrandon7/spec-kit-conduct-ext/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.1"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"conduct",
|
||||
"workflow",
|
||||
"automation"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-19T12:08:20Z",
|
||||
"updated_at": "2026-03-19T12:08:20Z"
|
||||
},
|
||||
"docguard": {
|
||||
"name": "DocGuard \u2014 CDD Enforcement",
|
||||
"id": "docguard",
|
||||
"description": "Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies.",
|
||||
"author": "raccioly",
|
||||
"version": "0.9.11",
|
||||
"download_url": "https://github.com/raccioly/docguard/releases/download/v0.9.11/spec-kit-docguard-v0.9.11.zip",
|
||||
"repository": "https://github.com/raccioly/docguard",
|
||||
"homepage": "https://www.npmjs.com/package/docguard-cli",
|
||||
"documentation": "https://github.com/raccioly/docguard/blob/main/extensions/spec-kit-docguard/README.md",
|
||||
"changelog": "https://github.com/raccioly/docguard/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0",
|
||||
"tools": [
|
||||
{
|
||||
"name": "node",
|
||||
"version": ">=18.0.0",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 6,
|
||||
"hooks": 3
|
||||
},
|
||||
"tags": [
|
||||
"documentation",
|
||||
"validation",
|
||||
"quality",
|
||||
"cdd",
|
||||
"traceability",
|
||||
"ai-agents",
|
||||
"enforcement",
|
||||
"spec-kit"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-13T00:00:00Z",
|
||||
"updated_at": "2026-03-18T18:53:31Z"
|
||||
},
|
||||
"doctor": {
|
||||
"name": "Project Health Check",
|
||||
"id": "doctor",
|
||||
@@ -271,48 +124,13 @@
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"orchestration",
|
||||
"workflow",
|
||||
"human-in-the-loop",
|
||||
"parallel"
|
||||
],
|
||||
"tags": ["orchestration", "workflow", "human-in-the-loop", "parallel"],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-06T00:00:00Z",
|
||||
"updated_at": "2026-03-06T00:00:00Z"
|
||||
},
|
||||
"iterate": {
|
||||
"name": "Iterate",
|
||||
"id": "iterate",
|
||||
"description": "Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "2.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-iterate/archive/refs/tags/v2.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-iterate",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-iterate",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-iterate/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-iterate/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"iteration",
|
||||
"change-management",
|
||||
"spec-maintenance"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"jira": {
|
||||
"name": "Jira Integration",
|
||||
"id": "jira",
|
||||
@@ -373,49 +191,13 @@
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"implementation",
|
||||
"automation",
|
||||
"loop",
|
||||
"copilot"
|
||||
],
|
||||
"tags": ["implementation", "automation", "loop", "copilot"],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-09T00:00:00Z",
|
||||
"updated_at": "2026-03-09T00:00:00Z"
|
||||
},
|
||||
"reconcile": {
|
||||
"name": "Reconcile Extension",
|
||||
"id": "reconcile",
|
||||
"description": "Reconcile implementation drift by surgically updating the feature's own spec, plan, and tasks.",
|
||||
"author": "Stanislav Deviatov",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/stn1slv/spec-kit-reconcile/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/stn1slv/spec-kit-reconcile",
|
||||
"homepage": "https://github.com/stn1slv/spec-kit-reconcile",
|
||||
"documentation": "https://github.com/stn1slv/spec-kit-reconcile/blob/main/README.md",
|
||||
"changelog": "https://github.com/stn1slv/spec-kit-reconcile/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"reconcile",
|
||||
"drift",
|
||||
"tasks",
|
||||
"remediation"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-14T00:00:00Z",
|
||||
"updated_at": "2026-03-14T00:00:00Z"
|
||||
},
|
||||
"retrospective": {
|
||||
"name": "Retrospective Extension",
|
||||
"id": "retrospective",
|
||||
@@ -467,53 +249,13 @@
|
||||
"commands": 7,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"code-review",
|
||||
"quality",
|
||||
"review",
|
||||
"testing",
|
||||
"error-handling",
|
||||
"type-design",
|
||||
"simplification"
|
||||
],
|
||||
"tags": ["code-review", "quality", "review", "testing", "error-handling", "type-design", "simplification"],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-06T00:00:00Z",
|
||||
"updated_at": "2026-03-06T00:00:00Z"
|
||||
},
|
||||
"speckit-utils": {
|
||||
"name": "SDD Utilities",
|
||||
"id": "speckit-utils",
|
||||
"description": "Resume interrupted workflows, validate project health, and verify spec-to-task traceability.",
|
||||
"author": "mvanhorn",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mvanhorn/speckit-utils/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/mvanhorn/speckit-utils",
|
||||
"homepage": "https://github.com/mvanhorn/speckit-utils",
|
||||
"documentation": "https://github.com/mvanhorn/speckit-utils/blob/main/README.md",
|
||||
"changelog": "https://github.com/mvanhorn/speckit-utils/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 3,
|
||||
"hooks": 2
|
||||
},
|
||||
"tags": [
|
||||
"resume",
|
||||
"doctor",
|
||||
"validate",
|
||||
"workflow",
|
||||
"health-check"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"sync": {
|
||||
"name": "Spec Sync",
|
||||
"id": "sync",
|
||||
@@ -549,7 +291,7 @@
|
||||
"understanding": {
|
||||
"name": "Understanding",
|
||||
"id": "understanding",
|
||||
"description": "Automated requirements quality analysis \u2014 validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"author": "Ladislav Bihari",
|
||||
"version": "3.4.0",
|
||||
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
||||
@@ -587,38 +329,6 @@
|
||||
"created_at": "2026-03-07T00:00:00Z",
|
||||
"updated_at": "2026-03-07T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"v-model": {
|
||||
"name": "V-Model Extension Pack",
|
||||
"id": "v-model",
|
||||
@@ -651,37 +361,6 @@
|
||||
"created_at": "2026-02-20T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"verify": {
|
||||
"name": "Verify Extension",
|
||||
"id": "verify",
|
||||
@@ -713,37 +392,6 @@
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-03T00:00:00Z",
|
||||
"updated_at": "2026-03-03T00:00:00Z"
|
||||
},
|
||||
"verify-tasks": {
|
||||
"name": "Verify Tasks Extension",
|
||||
"id": "verify-tasks",
|
||||
"description": "Detect phantom completions: tasks marked [X] in tasks.md with no real implementation.",
|
||||
"author": "Dave Sharpe",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/datastone-inc/spec-kit-verify-tasks/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/datastone-inc/spec-kit-verify-tasks",
|
||||
"homepage": "https://github.com/datastone-inc/spec-kit-verify-tasks",
|
||||
"documentation": "https://github.com/datastone-inc/spec-kit-verify-tasks/blob/main/README.md",
|
||||
"changelog": "https://github.com/datastone-inc/spec-kit-verify-tasks/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"verification",
|
||||
"quality",
|
||||
"phantom-completion",
|
||||
"tasks"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,15 +13,13 @@ When Spec Kit needs a template (e.g. `spec-template`), it walks a resolution sta
|
||||
|
||||
If no preset is installed, core templates are used — exactly the same behavior as before presets existed.
|
||||
|
||||
Template resolution happens **at runtime** — although preset files are copied into `.specify/presets/<id>/` during installation, Spec Kit walks the resolution stack on every template lookup rather than merging templates into a single location.
|
||||
|
||||
For detailed resolution and command registration flows, see [ARCHITECTURE.md](ARCHITECTURE.md).
|
||||
|
||||
## Command Overrides
|
||||
|
||||
Presets can also override the commands that guide the SDD workflow. Templates define *what* gets produced (specs, plans, constitutions); commands define *how* the LLM produces them (the step-by-step instructions).
|
||||
|
||||
Unlike templates, command overrides are applied **at install time**. When a preset includes `type: "command"` entries, the commands are registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
|
||||
When a preset includes `type: "command"` entries, the commands are automatically registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
|
||||
|
||||
## Quick Start
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.3.2"
|
||||
version = "0.3.0"
|
||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
@@ -14,7 +14,6 @@ dependencies = [
|
||||
"pyyaml>=6.0",
|
||||
"packaging>=23.0",
|
||||
"pathspec>=0.12.0",
|
||||
"json5>=0.13.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
@@ -27,23 +26,6 @@ build-backend = "hatchling.build"
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/specify_cli"]
|
||||
|
||||
[tool.hatch.build.targets.wheel.force-include]
|
||||
# Bundle core assets so `specify init` works without network access (air-gapped / enterprise)
|
||||
# Page templates (exclude commands/ — bundled separately below to avoid duplication)
|
||||
"templates/agent-file-template.md" = "specify_cli/core_pack/templates/agent-file-template.md"
|
||||
"templates/checklist-template.md" = "specify_cli/core_pack/templates/checklist-template.md"
|
||||
"templates/constitution-template.md" = "specify_cli/core_pack/templates/constitution-template.md"
|
||||
"templates/plan-template.md" = "specify_cli/core_pack/templates/plan-template.md"
|
||||
"templates/spec-template.md" = "specify_cli/core_pack/templates/spec-template.md"
|
||||
"templates/tasks-template.md" = "specify_cli/core_pack/templates/tasks-template.md"
|
||||
"templates/vscode-settings.json" = "specify_cli/core_pack/templates/vscode-settings.json"
|
||||
# Command templates
|
||||
"templates/commands" = "specify_cli/core_pack/commands"
|
||||
"scripts/bash" = "specify_cli/core_pack/scripts/bash"
|
||||
"scripts/powershell" = "specify_cli/core_pack/scripts/powershell"
|
||||
".github/workflows/scripts/create-release-packages.sh" = "specify_cli/core_pack/release_scripts/create-release-packages.sh"
|
||||
".github/workflows/scripts/create-release-packages.ps1" = "specify_cli/core_pack/release_scripts/create-release-packages.ps1"
|
||||
|
||||
[project.optional-dependencies]
|
||||
test = [
|
||||
"pytest>=7.0",
|
||||
|
||||
@@ -168,7 +168,7 @@ if $JSON_MODE; then
|
||||
if [[ ${#docs[@]} -eq 0 ]]; then
|
||||
json_docs="[]"
|
||||
else
|
||||
json_docs=$(for d in "${docs[@]}"; do printf '"%s",' "$(json_escape "$d")"; done)
|
||||
json_docs=$(printf '"%s",' "${docs[@]}")
|
||||
json_docs="[${json_docs%,}]"
|
||||
fi
|
||||
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$(json_escape "$FEATURE_DIR")" "$json_docs"
|
||||
|
||||
@@ -33,27 +33,16 @@ get_current_branch() {
|
||||
if [[ -d "$specs_dir" ]]; then
|
||||
local latest_feature=""
|
||||
local highest=0
|
||||
local latest_timestamp=""
|
||||
|
||||
for dir in "$specs_dir"/*; do
|
||||
if [[ -d "$dir" ]]; then
|
||||
local dirname=$(basename "$dir")
|
||||
if [[ "$dirname" =~ ^([0-9]{8}-[0-9]{6})- ]]; then
|
||||
# Timestamp-based branch: compare lexicographically
|
||||
local ts="${BASH_REMATCH[1]}"
|
||||
if [[ "$ts" > "$latest_timestamp" ]]; then
|
||||
latest_timestamp="$ts"
|
||||
latest_feature=$dirname
|
||||
fi
|
||||
elif [[ "$dirname" =~ ^([0-9]{3})- ]]; then
|
||||
if [[ "$dirname" =~ ^([0-9]{3})- ]]; then
|
||||
local number=${BASH_REMATCH[1]}
|
||||
number=$((10#$number))
|
||||
if [[ "$number" -gt "$highest" ]]; then
|
||||
highest=$number
|
||||
# Only update if no timestamp branch found yet
|
||||
if [[ -z "$latest_timestamp" ]]; then
|
||||
latest_feature=$dirname
|
||||
fi
|
||||
latest_feature=$dirname
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -83,9 +72,9 @@ check_feature_branch() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]] && [[ ! "$branch" =~ ^[0-9]{8}-[0-9]{6}- ]]; then
|
||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
||||
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
||||
echo "Feature branches should be named like: 001-feature-name or 20260319-143022-feature-name" >&2
|
||||
echo "Feature branches should be named like: 001-feature-name" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -101,18 +90,15 @@ find_feature_dir_by_prefix() {
|
||||
local branch_name="$2"
|
||||
local specs_dir="$repo_root/specs"
|
||||
|
||||
# Extract prefix from branch (e.g., "004" from "004-whatever" or "20260319-143022" from timestamp branches)
|
||||
local prefix=""
|
||||
if [[ "$branch_name" =~ ^([0-9]{8}-[0-9]{6})- ]]; then
|
||||
prefix="${BASH_REMATCH[1]}"
|
||||
elif [[ "$branch_name" =~ ^([0-9]{3})- ]]; then
|
||||
prefix="${BASH_REMATCH[1]}"
|
||||
else
|
||||
# If branch doesn't have a recognized prefix, fall back to exact match
|
||||
# Extract numeric prefix from branch (e.g., "004" from "004-whatever")
|
||||
if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then
|
||||
# If branch doesn't have numeric prefix, fall back to exact match
|
||||
echo "$specs_dir/$branch_name"
|
||||
return
|
||||
fi
|
||||
|
||||
local prefix="${BASH_REMATCH[1]}"
|
||||
|
||||
# Search for directories in specs/ that start with this prefix
|
||||
local matches=()
|
||||
if [[ -d "$specs_dir" ]]; then
|
||||
@@ -133,7 +119,7 @@ find_feature_dir_by_prefix() {
|
||||
else
|
||||
# Multiple matches - this shouldn't happen with proper naming convention
|
||||
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
||||
echo "Please ensure only one spec directory exists per prefix." >&2
|
||||
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -175,7 +161,7 @@ has_jq() {
|
||||
}
|
||||
|
||||
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
||||
# Handles backslash, double-quote, and JSON-required control character escapes (RFC 8259).
|
||||
# Handles backslash, double-quote, and control characters (newline, tab, carriage return).
|
||||
json_escape() {
|
||||
local s="$1"
|
||||
s="${s//\\/\\\\}"
|
||||
@@ -183,23 +169,7 @@ json_escape() {
|
||||
s="${s//$'\n'/\\n}"
|
||||
s="${s//$'\t'/\\t}"
|
||||
s="${s//$'\r'/\\r}"
|
||||
s="${s//$'\b'/\\b}"
|
||||
s="${s//$'\f'/\\f}"
|
||||
# Escape any remaining U+0001-U+001F control characters as \uXXXX.
|
||||
# (U+0000/NUL cannot appear in bash strings and is excluded.)
|
||||
# LC_ALL=C ensures ${#s} counts bytes and ${s:$i:1} yields single bytes,
|
||||
# so multi-byte UTF-8 sequences (first byte >= 0xC0) pass through intact.
|
||||
local LC_ALL=C
|
||||
local i char code
|
||||
for (( i=0; i<${#s}; i++ )); do
|
||||
char="${s:$i:1}"
|
||||
printf -v code '%d' "'$char" 2>/dev/null || code=256
|
||||
if (( code >= 1 && code <= 31 )); then
|
||||
printf '\\u%04x' "$code"
|
||||
else
|
||||
printf '%s' "$char"
|
||||
fi
|
||||
done
|
||||
printf '%s' "$s"
|
||||
}
|
||||
|
||||
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||
@@ -224,11 +194,9 @@ resolve_template() {
|
||||
if [ -d "$presets_dir" ]; then
|
||||
local registry_file="$presets_dir/.registry"
|
||||
if [ -f "$registry_file" ] && command -v python3 >/dev/null 2>&1; then
|
||||
# Read preset IDs sorted by priority (lower number = higher precedence).
|
||||
# The python3 call is wrapped in an if-condition so that set -e does not
|
||||
# abort the function when python3 exits non-zero (e.g. invalid JSON).
|
||||
local sorted_presets=""
|
||||
if sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
|
||||
# Read preset IDs sorted by priority (lower number = higher precedence)
|
||||
local sorted_presets
|
||||
sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
|
||||
import json, sys, os
|
||||
try:
|
||||
with open(os.environ['SPECKIT_REGISTRY']) as f:
|
||||
@@ -238,17 +206,14 @@ try:
|
||||
print(pid)
|
||||
except Exception:
|
||||
sys.exit(1)
|
||||
" 2>/dev/null); then
|
||||
if [ -n "$sorted_presets" ]; then
|
||||
# python3 succeeded and returned preset IDs — search in priority order
|
||||
while IFS= read -r preset_id; do
|
||||
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
|
||||
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||
done <<< "$sorted_presets"
|
||||
fi
|
||||
# python3 succeeded but registry has no presets — nothing to search
|
||||
" 2>/dev/null)
|
||||
if [ $? -eq 0 ] && [ -n "$sorted_presets" ]; then
|
||||
while IFS= read -r preset_id; do
|
||||
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
|
||||
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||
done <<< "$sorted_presets"
|
||||
else
|
||||
# python3 failed (missing, or registry parse error) — fall back to unordered directory scan
|
||||
# python3 returned empty list — fall through to directory scan
|
||||
for preset in "$presets_dir"/*/; do
|
||||
[ -d "$preset" ] || continue
|
||||
local candidate="$preset/templates/${template_name}.md"
|
||||
@@ -281,9 +246,8 @@ except Exception:
|
||||
local core="$base/${template_name}.md"
|
||||
[ -f "$core" ] && echo "$core" && return 0
|
||||
|
||||
# Template not found in any location.
|
||||
# Return 1 so callers can distinguish "not found" from "found".
|
||||
# Callers running under set -e should use: TEMPLATE=$(resolve_template ...) || true
|
||||
return 1
|
||||
# Return success with empty output so callers using set -e don't abort;
|
||||
# callers check [ -n "$TEMPLATE" ] to detect "not found".
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
@@ -5,14 +5,13 @@ set -e
|
||||
JSON_MODE=false
|
||||
SHORT_NAME=""
|
||||
BRANCH_NUMBER=""
|
||||
USE_TIMESTAMP=false
|
||||
ARGS=()
|
||||
i=1
|
||||
while [ $i -le $# ]; do
|
||||
arg="${!i}"
|
||||
case "$arg" in
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
;;
|
||||
--short-name)
|
||||
if [ $((i + 1)) -gt $# ]; then
|
||||
@@ -41,27 +40,22 @@ while [ $i -le $# ]; do
|
||||
fi
|
||||
BRANCH_NUMBER="$next_arg"
|
||||
;;
|
||||
--timestamp)
|
||||
USE_TIMESTAMP=true
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --json Output in JSON format"
|
||||
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||
echo " --number N Specify branch number manually (overrides auto-detection)"
|
||||
echo " --timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 'Add user authentication system' --short-name 'user-auth'"
|
||||
echo " $0 'Implement OAuth2 integration for API' --number 5"
|
||||
echo " $0 --timestamp --short-name 'user-auth' 'Add user authentication'"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
ARGS+=("$arg")
|
||||
*)
|
||||
ARGS+=("$arg")
|
||||
;;
|
||||
esac
|
||||
i=$((i + 1))
|
||||
@@ -69,7 +63,7 @@ done
|
||||
|
||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -102,13 +96,10 @@ get_highest_from_specs() {
|
||||
for dir in "$specs_dir"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
# Only match sequential prefixes (###-*), skip timestamp dirs
|
||||
if echo "$dirname" | grep -q '^[0-9]\{3\}-'; then
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\{3\}')
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
fi
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
fi
|
||||
done
|
||||
fi
|
||||
@@ -147,7 +138,7 @@ check_existing_branches() {
|
||||
local specs_dir="$1"
|
||||
|
||||
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
|
||||
git fetch --all --prune >/dev/null 2>&1 || true
|
||||
git fetch --all --prune 2>/dev/null || true
|
||||
|
||||
# Get highest number from ALL branches (not just matching short name)
|
||||
local highest_branch=$(get_highest_from_branches)
|
||||
@@ -171,6 +162,17 @@ clean_branch_name() {
|
||||
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
|
||||
}
|
||||
|
||||
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
||||
json_escape() {
|
||||
local s="$1"
|
||||
s="${s//\\/\\\\}"
|
||||
s="${s//\"/\\\"}"
|
||||
s="${s//$'\n'/\\n}"
|
||||
s="${s//$'\t'/\\t}"
|
||||
s="${s//$'\r'/\\r}"
|
||||
printf '%s' "$s"
|
||||
}
|
||||
|
||||
# Resolve repository root. Prefer git information when available, but fall back
|
||||
# to searching for repository markers so the workflow still functions in repositories that
|
||||
# were initialised with --no-git.
|
||||
@@ -251,42 +253,29 @@ else
|
||||
BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
|
||||
fi
|
||||
|
||||
# Warn if --number and --timestamp are both specified
|
||||
if [ "$USE_TIMESTAMP" = true ] && [ -n "$BRANCH_NUMBER" ]; then
|
||||
>&2 echo "[specify] Warning: --number is ignored when --timestamp is used"
|
||||
BRANCH_NUMBER=""
|
||||
fi
|
||||
|
||||
# Determine branch prefix
|
||||
if [ "$USE_TIMESTAMP" = true ]; then
|
||||
FEATURE_NUM=$(date +%Y%m%d-%H%M%S)
|
||||
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
||||
else
|
||||
# Determine branch number
|
||||
if [ -z "$BRANCH_NUMBER" ]; then
|
||||
if [ "$HAS_GIT" = true ]; then
|
||||
# Check existing branches on remotes
|
||||
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
|
||||
else
|
||||
# Fall back to local directory check
|
||||
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
|
||||
BRANCH_NUMBER=$((HIGHEST + 1))
|
||||
fi
|
||||
# Determine branch number
|
||||
if [ -z "$BRANCH_NUMBER" ]; then
|
||||
if [ "$HAS_GIT" = true ]; then
|
||||
# Check existing branches on remotes
|
||||
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
|
||||
else
|
||||
# Fall back to local directory check
|
||||
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
|
||||
BRANCH_NUMBER=$((HIGHEST + 1))
|
||||
fi
|
||||
|
||||
# Force base-10 interpretation to prevent octal conversion (e.g., 010 → 8 in octal, but should be 10 in decimal)
|
||||
FEATURE_NUM=$(printf "%03d" "$((10#$BRANCH_NUMBER))")
|
||||
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
||||
fi
|
||||
|
||||
# Force base-10 interpretation to prevent octal conversion (e.g., 010 → 8 in octal, but should be 10 in decimal)
|
||||
FEATURE_NUM=$(printf "%03d" "$((10#$BRANCH_NUMBER))")
|
||||
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
||||
|
||||
# GitHub enforces a 244-byte limit on branch names
|
||||
# Validate and truncate if necessary
|
||||
MAX_BRANCH_LENGTH=244
|
||||
if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
|
||||
# Calculate how much we need to trim from suffix
|
||||
# Account for prefix length: timestamp (15) + hyphen (1) = 16, or sequential (3) + hyphen (1) = 4
|
||||
PREFIX_LENGTH=$(( ${#FEATURE_NUM} + 1 ))
|
||||
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - PREFIX_LENGTH))
|
||||
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
|
||||
|
||||
# Truncate suffix at word boundary if possible
|
||||
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
|
||||
@@ -305,11 +294,7 @@ if [ "$HAS_GIT" = true ]; then
|
||||
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
||||
# Check if branch already exists
|
||||
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
||||
if [ "$USE_TIMESTAMP" = true ]; then
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Rerun to get a new timestamp or use a different --short-name."
|
||||
else
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||
fi
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
||||
@@ -323,14 +308,9 @@ fi
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT")
|
||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
||||
touch "$SPEC_FILE"
|
||||
fi
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
||||
|
||||
# Inform the user how to persist the feature variable in their own shell
|
||||
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
|
||||
|
||||
@@ -39,7 +39,7 @@ check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
# Copy plan template if it exists
|
||||
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT") || true
|
||||
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT")
|
||||
if [[ -n "$TEMPLATE" ]] && [[ -f "$TEMPLATE" ]]; then
|
||||
cp "$TEMPLATE" "$IMPL_PLAN"
|
||||
echo "Copied plan template to $IMPL_PLAN"
|
||||
|
||||
@@ -30,12 +30,12 @@
|
||||
#
|
||||
# 5. Multi-Agent Support
|
||||
# - Handles agent-specific file paths and naming conventions
|
||||
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Junie, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Kimi Code, Pi Coding Agent, iFlow CLI, Antigravity or Generic
|
||||
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Qoder CLI, Amp, SHAI, Tabnine CLI, Kiro CLI, Mistral Vibe, Kimi Code, Antigravity or Generic
|
||||
# - Can update single agents or all existing agent files
|
||||
# - Creates default Claude file if no agent files exist
|
||||
#
|
||||
# Usage: ./update-agent-context.sh [agent_type]
|
||||
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic
|
||||
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic
|
||||
# Leave empty to update all existing agent files
|
||||
|
||||
set -e
|
||||
@@ -68,13 +68,12 @@ CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
|
||||
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
||||
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
||||
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
|
||||
JUNIE_FILE="$REPO_ROOT/.junie/AGENTS.md"
|
||||
KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
|
||||
AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
|
||||
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
||||
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
||||
QODER_FILE="$REPO_ROOT/QODER.md"
|
||||
# Amp, Kiro CLI, IBM Bob, and Pi all share AGENTS.md — use AGENTS_FILE to avoid
|
||||
# AMP, Kiro CLI, and IBM Bob all share AGENTS.md — use AGENTS_FILE to avoid
|
||||
# updating the same file multiple times.
|
||||
AMP_FILE="$AGENTS_FILE"
|
||||
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
||||
@@ -84,8 +83,6 @@ AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
||||
BOB_FILE="$AGENTS_FILE"
|
||||
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
||||
KIMI_FILE="$REPO_ROOT/KIMI.md"
|
||||
TRAE_FILE="$REPO_ROOT/.trae/rules/AGENTS.md"
|
||||
IFLOW_FILE="$REPO_ROOT/IFLOW.md"
|
||||
|
||||
# Template file
|
||||
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
|
||||
@@ -639,9 +636,6 @@ update_specific_agent() {
|
||||
windsurf)
|
||||
update_agent_file "$WINDSURF_FILE" "Windsurf" || return 1
|
||||
;;
|
||||
junie)
|
||||
update_agent_file "$JUNIE_FILE" "Junie" || return 1
|
||||
;;
|
||||
kilocode)
|
||||
update_agent_file "$KILOCODE_FILE" "Kilo Code" || return 1
|
||||
;;
|
||||
@@ -681,90 +675,67 @@ update_specific_agent() {
|
||||
kimi)
|
||||
update_agent_file "$KIMI_FILE" "Kimi Code" || return 1
|
||||
;;
|
||||
trae)
|
||||
update_agent_file "$TRAE_FILE" "Trae" || return 1
|
||||
;;
|
||||
pi)
|
||||
update_agent_file "$AGENTS_FILE" "Pi Coding Agent" || return 1
|
||||
;;
|
||||
iflow)
|
||||
update_agent_file "$IFLOW_FILE" "iFlow CLI" || return 1
|
||||
;;
|
||||
generic)
|
||||
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown agent type '$agent_type'"
|
||||
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic"
|
||||
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Helper: skip non-existent files and files already updated (dedup by
|
||||
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
|
||||
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
|
||||
# Uses a linear array instead of associative array for bash 3.2 compatibility.
|
||||
# Note: defined at top level because bash 3.2 does not support true
|
||||
# nested/local functions. _updated_paths, _found_agent, and _all_ok are
|
||||
# initialised exclusively inside update_all_existing_agents so that
|
||||
# sourcing this script has no side effects on the caller's environment.
|
||||
|
||||
_update_if_new() {
|
||||
local file="$1" name="$2"
|
||||
[[ -f "$file" ]] || return 0
|
||||
local real_path
|
||||
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
|
||||
local p
|
||||
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
|
||||
for p in "${_updated_paths[@]}"; do
|
||||
[[ "$p" == "$real_path" ]] && return 0
|
||||
done
|
||||
fi
|
||||
# Record the file as seen before attempting the update so that:
|
||||
# (a) aliases pointing to the same path are not retried on failure
|
||||
# (b) _found_agent reflects file existence, not update success
|
||||
_updated_paths+=("$real_path")
|
||||
_found_agent=true
|
||||
update_agent_file "$file" "$name"
|
||||
}
|
||||
|
||||
update_all_existing_agents() {
|
||||
_found_agent=false
|
||||
_updated_paths=()
|
||||
local _all_ok=true
|
||||
local found_agent=false
|
||||
local _updated_paths=()
|
||||
|
||||
_update_if_new "$CLAUDE_FILE" "Claude Code" || _all_ok=false
|
||||
_update_if_new "$GEMINI_FILE" "Gemini CLI" || _all_ok=false
|
||||
_update_if_new "$COPILOT_FILE" "GitHub Copilot" || _all_ok=false
|
||||
_update_if_new "$CURSOR_FILE" "Cursor IDE" || _all_ok=false
|
||||
_update_if_new "$QWEN_FILE" "Qwen Code" || _all_ok=false
|
||||
_update_if_new "$AGENTS_FILE" "Codex/opencode" || _all_ok=false
|
||||
_update_if_new "$AMP_FILE" "Amp" || _all_ok=false
|
||||
_update_if_new "$KIRO_FILE" "Kiro CLI" || _all_ok=false
|
||||
_update_if_new "$BOB_FILE" "IBM Bob" || _all_ok=false
|
||||
_update_if_new "$WINDSURF_FILE" "Windsurf" || _all_ok=false
|
||||
_update_if_new "$JUNIE_FILE" "Junie" || _all_ok=false
|
||||
_update_if_new "$KILOCODE_FILE" "Kilo Code" || _all_ok=false
|
||||
_update_if_new "$AUGGIE_FILE" "Auggie CLI" || _all_ok=false
|
||||
_update_if_new "$ROO_FILE" "Roo Code" || _all_ok=false
|
||||
_update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI" || _all_ok=false
|
||||
_update_if_new "$SHAI_FILE" "SHAI" || _all_ok=false
|
||||
_update_if_new "$TABNINE_FILE" "Tabnine CLI" || _all_ok=false
|
||||
_update_if_new "$QODER_FILE" "Qoder CLI" || _all_ok=false
|
||||
_update_if_new "$AGY_FILE" "Antigravity" || _all_ok=false
|
||||
_update_if_new "$VIBE_FILE" "Mistral Vibe" || _all_ok=false
|
||||
_update_if_new "$KIMI_FILE" "Kimi Code" || _all_ok=false
|
||||
_update_if_new "$TRAE_FILE" "Trae" || _all_ok=false
|
||||
_update_if_new "$IFLOW_FILE" "iFlow CLI" || _all_ok=false
|
||||
# Helper: skip non-existent files and files already updated (dedup by
|
||||
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
|
||||
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
|
||||
# Uses a linear array instead of associative array for bash 3.2 compatibility.
|
||||
update_if_new() {
|
||||
local file="$1" name="$2"
|
||||
[[ -f "$file" ]] || return 0
|
||||
local real_path
|
||||
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
|
||||
local p
|
||||
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
|
||||
for p in "${_updated_paths[@]}"; do
|
||||
[[ "$p" == "$real_path" ]] && return 0
|
||||
done
|
||||
fi
|
||||
update_agent_file "$file" "$name" || return 1
|
||||
_updated_paths+=("$real_path")
|
||||
found_agent=true
|
||||
}
|
||||
|
||||
update_if_new "$CLAUDE_FILE" "Claude Code"
|
||||
update_if_new "$GEMINI_FILE" "Gemini CLI"
|
||||
update_if_new "$COPILOT_FILE" "GitHub Copilot"
|
||||
update_if_new "$CURSOR_FILE" "Cursor IDE"
|
||||
update_if_new "$QWEN_FILE" "Qwen Code"
|
||||
update_if_new "$AGENTS_FILE" "Codex/opencode"
|
||||
update_if_new "$AMP_FILE" "Amp"
|
||||
update_if_new "$KIRO_FILE" "Kiro CLI"
|
||||
update_if_new "$BOB_FILE" "IBM Bob"
|
||||
update_if_new "$WINDSURF_FILE" "Windsurf"
|
||||
update_if_new "$KILOCODE_FILE" "Kilo Code"
|
||||
update_if_new "$AUGGIE_FILE" "Auggie CLI"
|
||||
update_if_new "$ROO_FILE" "Roo Code"
|
||||
update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||
update_if_new "$SHAI_FILE" "SHAI"
|
||||
update_if_new "$TABNINE_FILE" "Tabnine CLI"
|
||||
update_if_new "$QODER_FILE" "Qoder CLI"
|
||||
update_if_new "$AGY_FILE" "Antigravity"
|
||||
update_if_new "$VIBE_FILE" "Mistral Vibe"
|
||||
update_if_new "$KIMI_FILE" "Kimi Code"
|
||||
|
||||
# If no agent files exist, create a default Claude file
|
||||
if [[ "$_found_agent" == false ]]; then
|
||||
if [[ "$found_agent" == false ]]; then
|
||||
log_info "No existing agent files found, creating default Claude file..."
|
||||
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
||||
fi
|
||||
|
||||
[[ "$_all_ok" == true ]]
|
||||
}
|
||||
print_summary() {
|
||||
echo
|
||||
@@ -783,7 +754,7 @@ print_summary() {
|
||||
fi
|
||||
|
||||
echo
|
||||
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic]"
|
||||
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic]"
|
||||
}
|
||||
|
||||
#==============================================================================
|
||||
|
||||
@@ -38,28 +38,17 @@ function Get-CurrentBranch {
|
||||
if (Test-Path $specsDir) {
|
||||
$latestFeature = ""
|
||||
$highest = 0
|
||||
$latestTimestamp = ""
|
||||
|
||||
|
||||
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
|
||||
if ($_.Name -match '^(\d{8}-\d{6})-') {
|
||||
# Timestamp-based branch: compare lexicographically
|
||||
$ts = $matches[1]
|
||||
if ($ts -gt $latestTimestamp) {
|
||||
$latestTimestamp = $ts
|
||||
$latestFeature = $_.Name
|
||||
}
|
||||
} elseif ($_.Name -match '^(\d{3})-') {
|
||||
if ($_.Name -match '^(\d{3})-') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) {
|
||||
$highest = $num
|
||||
# Only update if no timestamp branch found yet
|
||||
if (-not $latestTimestamp) {
|
||||
$latestFeature = $_.Name
|
||||
}
|
||||
$latestFeature = $_.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if ($latestFeature) {
|
||||
return $latestFeature
|
||||
}
|
||||
@@ -90,9 +79,9 @@ function Test-FeatureBranch {
|
||||
return $true
|
||||
}
|
||||
|
||||
if ($Branch -notmatch '^[0-9]{3}-' -and $Branch -notmatch '^\d{8}-\d{6}-') {
|
||||
if ($Branch -notmatch '^[0-9]{3}-') {
|
||||
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
||||
Write-Output "Feature branches should be named like: 001-feature-name or 20260319-143022-feature-name"
|
||||
Write-Output "Feature branches should be named like: 001-feature-name"
|
||||
return $false
|
||||
}
|
||||
return $true
|
||||
|
||||
@@ -4,36 +4,32 @@
|
||||
param(
|
||||
[switch]$Json,
|
||||
[string]$ShortName,
|
||||
[Parameter()]
|
||||
[int]$Number = 0,
|
||||
[switch]$Timestamp,
|
||||
[switch]$Help,
|
||||
[Parameter(Position = 0, ValueFromRemainingArguments = $true)]
|
||||
[Parameter(ValueFromRemainingArguments = $true)]
|
||||
[string[]]$FeatureDescription
|
||||
)
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
# Show help if requested
|
||||
if ($Help) {
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] <feature description>"
|
||||
Write-Host ""
|
||||
Write-Host "Options:"
|
||||
Write-Host " -Json Output in JSON format"
|
||||
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
||||
Write-Host " -Timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
Write-Host " -Help Show this help message"
|
||||
Write-Host ""
|
||||
Write-Host "Examples:"
|
||||
Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'"
|
||||
Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'"
|
||||
Write-Host " ./create-new-feature.ps1 -Timestamp -ShortName 'user-auth' 'Add user authentication'"
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Check if feature description provided
|
||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] <feature description>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -75,7 +71,7 @@ function Get-HighestNumberFromSpecs {
|
||||
$highest = 0
|
||||
if (Test-Path $SpecsDir) {
|
||||
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
|
||||
if ($_.Name -match '^(\d{3})-') {
|
||||
if ($_.Name -match '^(\d+)') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
}
|
||||
@@ -96,7 +92,7 @@ function Get-HighestNumberFromBranches {
|
||||
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
|
||||
|
||||
# Extract feature number if branch matches pattern ###-*
|
||||
if ($cleanBranch -match '^(\d{3})-') {
|
||||
if ($cleanBranch -match '^(\d+)-') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
}
|
||||
@@ -219,40 +215,27 @@ if ($ShortName) {
|
||||
$branchSuffix = Get-BranchName -Description $featureDesc
|
||||
}
|
||||
|
||||
# Warn if -Number and -Timestamp are both specified
|
||||
if ($Timestamp -and $Number -ne 0) {
|
||||
Write-Warning "[specify] Warning: -Number is ignored when -Timestamp is used"
|
||||
$Number = 0
|
||||
}
|
||||
|
||||
# Determine branch prefix
|
||||
if ($Timestamp) {
|
||||
$featureNum = Get-Date -Format 'yyyyMMdd-HHmmss'
|
||||
$branchName = "$featureNum-$branchSuffix"
|
||||
} else {
|
||||
# Determine branch number
|
||||
if ($Number -eq 0) {
|
||||
if ($hasGit) {
|
||||
# Check existing branches on remotes
|
||||
$Number = Get-NextBranchNumber -SpecsDir $specsDir
|
||||
} else {
|
||||
# Fall back to local directory check
|
||||
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
|
||||
}
|
||||
# Determine branch number
|
||||
if ($Number -eq 0) {
|
||||
if ($hasGit) {
|
||||
# Check existing branches on remotes
|
||||
$Number = Get-NextBranchNumber -SpecsDir $specsDir
|
||||
} else {
|
||||
# Fall back to local directory check
|
||||
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
|
||||
}
|
||||
|
||||
$featureNum = ('{0:000}' -f $Number)
|
||||
$branchName = "$featureNum-$branchSuffix"
|
||||
}
|
||||
|
||||
$featureNum = ('{0:000}' -f $Number)
|
||||
$branchName = "$featureNum-$branchSuffix"
|
||||
|
||||
# GitHub enforces a 244-byte limit on branch names
|
||||
# Validate and truncate if necessary
|
||||
$maxBranchLength = 244
|
||||
if ($branchName.Length -gt $maxBranchLength) {
|
||||
# Calculate how much we need to trim from suffix
|
||||
# Account for prefix length: timestamp (15) + hyphen (1) = 16, or sequential (3) + hyphen (1) = 4
|
||||
$prefixLength = $featureNum.Length + 1
|
||||
$maxSuffixLength = $maxBranchLength - $prefixLength
|
||||
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||
$maxSuffixLength = $maxBranchLength - 4
|
||||
|
||||
# Truncate suffix
|
||||
$truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength))
|
||||
@@ -282,11 +265,7 @@ if ($hasGit) {
|
||||
# Check if branch already exists
|
||||
$existingBranch = git branch --list $branchName 2>$null
|
||||
if ($existingBranch) {
|
||||
if ($Timestamp) {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Rerun to get a new timestamp or use a different -ShortName."
|
||||
} else {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||
}
|
||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
||||
|
||||
@@ -9,7 +9,7 @@ Mirrors the behavior of scripts/bash/update-agent-context.sh:
|
||||
2. Plan Data Extraction
|
||||
3. Agent File Management (create from template or update existing)
|
||||
4. Content Generation (technology stack, recent changes, timestamp)
|
||||
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, junie, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, kimi, trae, pi, iflow, generic)
|
||||
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, tabnine, kiro-cli, agy, bob, vibe, qodercli, kimi, generic)
|
||||
|
||||
.PARAMETER AgentType
|
||||
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
|
||||
@@ -25,7 +25,7 @@ Relies on common helper functions in common.ps1
|
||||
#>
|
||||
param(
|
||||
[Parameter(Position=0)]
|
||||
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','junie','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','kimi','trae','pi','iflow','generic')]
|
||||
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','tabnine','kiro-cli','agy','bob','qodercli','vibe','kimi','generic')]
|
||||
[string]$AgentType
|
||||
)
|
||||
|
||||
@@ -51,7 +51,6 @@ $CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
||||
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
||||
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md'
|
||||
$JUNIE_FILE = Join-Path $REPO_ROOT '.junie/AGENTS.md'
|
||||
$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md'
|
||||
$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md'
|
||||
$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md'
|
||||
@@ -65,8 +64,6 @@ $AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
|
||||
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
$VIBE_FILE = Join-Path $REPO_ROOT '.vibe/agents/specify-agents.md'
|
||||
$KIMI_FILE = Join-Path $REPO_ROOT 'KIMI.md'
|
||||
$TRAE_FILE = Join-Path $REPO_ROOT '.trae/rules/AGENTS.md'
|
||||
$IFLOW_FILE = Join-Path $REPO_ROOT 'IFLOW.md'
|
||||
|
||||
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
|
||||
|
||||
@@ -398,7 +395,6 @@ function Update-SpecificAgent {
|
||||
'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' }
|
||||
'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' }
|
||||
'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' }
|
||||
'junie' { Update-AgentFile -TargetFile $JUNIE_FILE -AgentName 'Junie' }
|
||||
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
||||
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
||||
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
||||
@@ -412,11 +408,8 @@ function Update-SpecificAgent {
|
||||
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
|
||||
'vibe' { Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe' }
|
||||
'kimi' { Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code' }
|
||||
'trae' { Update-AgentFile -TargetFile $TRAE_FILE -AgentName 'Trae' }
|
||||
'pi' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Pi Coding Agent' }
|
||||
'iflow' { Update-AgentFile -TargetFile $IFLOW_FILE -AgentName 'iFlow CLI' }
|
||||
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
|
||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic'; return $false }
|
||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|generic'; return $false }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -430,7 +423,6 @@ function Update-AllExistingAgents {
|
||||
if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $JUNIE_FILE) { if (-not (Update-AgentFile -TargetFile $JUNIE_FILE -AgentName 'Junie')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
||||
@@ -443,8 +435,6 @@ function Update-AllExistingAgents {
|
||||
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $VIBE_FILE) { if (-not (Update-AgentFile -TargetFile $VIBE_FILE -AgentName 'Mistral Vibe')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $KIMI_FILE) { if (-not (Update-AgentFile -TargetFile $KIMI_FILE -AgentName 'Kimi Code')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $TRAE_FILE) { if (-not (Update-AgentFile -TargetFile $TRAE_FILE -AgentName 'Trae')) { $ok = $false }; $found = $true }
|
||||
if (Test-Path $IFLOW_FILE) { if (-not (Update-AgentFile -TargetFile $IFLOW_FILE -AgentName 'iFlow CLI')) { $ok = $false }; $found = $true }
|
||||
if (-not $found) {
|
||||
Write-Info 'No existing agent files found, creating default Claude file...'
|
||||
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
|
||||
@@ -459,7 +449,7 @@ function Print-Summary {
|
||||
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
|
||||
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
|
||||
Write-Host ''
|
||||
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|junie|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|kimi|trae|pi|iflow|generic]'
|
||||
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|tabnine|kiro-cli|agy|bob|vibe|qodercli|generic]'
|
||||
}
|
||||
|
||||
function Main {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,6 @@ command files into agent-specific directories in the correct format.
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
import platform
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -60,19 +59,13 @@ class CommandRegistrar:
|
||||
"extension": ".md"
|
||||
},
|
||||
"codex": {
|
||||
"dir": ".agents/skills",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": "/SKILL.md",
|
||||
},
|
||||
"windsurf": {
|
||||
"dir": ".windsurf/workflows",
|
||||
"dir": ".codex/prompts",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
},
|
||||
"junie": {
|
||||
"dir": ".junie/commands",
|
||||
"windsurf": {
|
||||
"dir": ".windsurf/workflows",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
@@ -113,12 +106,6 @@ class CommandRegistrar:
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
},
|
||||
"pi": {
|
||||
"dir": ".pi/prompts",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
},
|
||||
"amp": {
|
||||
"dir": ".agents/commands",
|
||||
"format": "markdown",
|
||||
@@ -147,19 +134,7 @@ class CommandRegistrar:
|
||||
"dir": ".kimi/skills",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": "/SKILL.md",
|
||||
},
|
||||
"trae": {
|
||||
"dir": ".trae/rules",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
},
|
||||
"iflow": {
|
||||
"dir": ".iflow/commands",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md"
|
||||
"extension": "/SKILL.md"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,9 +164,6 @@ class CommandRegistrar:
|
||||
except yaml.YAMLError:
|
||||
frontmatter = {}
|
||||
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
|
||||
return frontmatter, body
|
||||
|
||||
@staticmethod
|
||||
@@ -219,14 +191,11 @@ class CommandRegistrar:
|
||||
Returns:
|
||||
Modified frontmatter with adjusted paths
|
||||
"""
|
||||
for script_key in ("scripts", "agent_scripts"):
|
||||
scripts = frontmatter.get(script_key)
|
||||
if not isinstance(scripts, dict):
|
||||
continue
|
||||
|
||||
for key, script_path in scripts.items():
|
||||
if isinstance(script_path, str) and script_path.startswith("../../scripts/"):
|
||||
scripts[key] = f".specify/scripts/{script_path[14:]}"
|
||||
if "scripts" in frontmatter:
|
||||
for key in frontmatter["scripts"]:
|
||||
script_path = frontmatter["scripts"][key]
|
||||
if script_path.startswith("../../scripts/"):
|
||||
frontmatter["scripts"][key] = f".specify/scripts/{script_path[14:]}"
|
||||
return frontmatter
|
||||
|
||||
def render_markdown_command(
|
||||
@@ -283,101 +252,6 @@ class CommandRegistrar:
|
||||
|
||||
return "\n".join(toml_lines)
|
||||
|
||||
def render_skill_command(
|
||||
self,
|
||||
agent_name: str,
|
||||
skill_name: str,
|
||||
frontmatter: dict,
|
||||
body: str,
|
||||
source_id: str,
|
||||
source_file: str,
|
||||
project_root: Path,
|
||||
) -> str:
|
||||
"""Render a command override as a SKILL.md file.
|
||||
|
||||
SKILL-target agents should receive the same skills-oriented
|
||||
frontmatter shape used elsewhere in the project instead of the
|
||||
original command frontmatter.
|
||||
|
||||
Technical debt note:
|
||||
Spec-kit currently has multiple SKILL.md generators (template packaging,
|
||||
init-time conversion, and extension/preset overrides). Keep the skill
|
||||
frontmatter keys aligned (name/description/compatibility/metadata, with
|
||||
metadata.author and metadata.source subkeys) to avoid drift across agents.
|
||||
"""
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
|
||||
if agent_name == "codex":
|
||||
body = self._resolve_codex_skill_placeholders(frontmatter, body, project_root)
|
||||
|
||||
description = frontmatter.get("description", f"Spec-kit workflow command: {skill_name}")
|
||||
skill_frontmatter = {
|
||||
"name": skill_name,
|
||||
"description": description,
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": f"{source_id}:{source_file}",
|
||||
},
|
||||
}
|
||||
return self.render_frontmatter(skill_frontmatter) + "\n" + body
|
||||
|
||||
@staticmethod
|
||||
def _resolve_codex_skill_placeholders(frontmatter: dict, body: str, project_root: Path) -> str:
|
||||
"""Resolve script placeholders for Codex skill overrides.
|
||||
|
||||
This intentionally scopes the fix to Codex, which is the newly
|
||||
migrated runtime path in this PR. Existing Kimi behavior is left
|
||||
unchanged for now.
|
||||
"""
|
||||
try:
|
||||
from . import load_init_options
|
||||
except ImportError:
|
||||
return body
|
||||
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
|
||||
scripts = frontmatter.get("scripts", {}) or {}
|
||||
agent_scripts = frontmatter.get("agent_scripts", {}) or {}
|
||||
if not isinstance(scripts, dict):
|
||||
scripts = {}
|
||||
if not isinstance(agent_scripts, dict):
|
||||
agent_scripts = {}
|
||||
|
||||
script_variant = load_init_options(project_root).get("script")
|
||||
if script_variant not in {"sh", "ps"}:
|
||||
fallback_order = []
|
||||
default_variant = "ps" if platform.system().lower().startswith("win") else "sh"
|
||||
secondary_variant = "sh" if default_variant == "ps" else "ps"
|
||||
|
||||
if default_variant in scripts or default_variant in agent_scripts:
|
||||
fallback_order.append(default_variant)
|
||||
if secondary_variant in scripts or secondary_variant in agent_scripts:
|
||||
fallback_order.append(secondary_variant)
|
||||
|
||||
for key in scripts:
|
||||
if key not in fallback_order:
|
||||
fallback_order.append(key)
|
||||
for key in agent_scripts:
|
||||
if key not in fallback_order:
|
||||
fallback_order.append(key)
|
||||
|
||||
script_variant = fallback_order[0] if fallback_order else None
|
||||
|
||||
script_command = scripts.get(script_variant) if script_variant else None
|
||||
if script_command:
|
||||
script_command = script_command.replace("{ARGS}", "$ARGUMENTS")
|
||||
body = body.replace("{SCRIPT}", script_command)
|
||||
|
||||
agent_script_command = agent_scripts.get(script_variant) if script_variant else None
|
||||
if agent_script_command:
|
||||
agent_script_command = agent_script_command.replace("{ARGS}", "$ARGUMENTS")
|
||||
body = body.replace("{AGENT_SCRIPT}", agent_script_command)
|
||||
|
||||
return body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", "codex")
|
||||
|
||||
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
||||
"""Convert argument placeholder format.
|
||||
|
||||
@@ -391,18 +265,6 @@ class CommandRegistrar:
|
||||
"""
|
||||
return content.replace(from_placeholder, to_placeholder)
|
||||
|
||||
@staticmethod
|
||||
def _compute_output_name(agent_name: str, cmd_name: str, agent_config: Dict[str, Any]) -> str:
|
||||
"""Compute the on-disk command or skill name for an agent."""
|
||||
if agent_config["extension"] != "/SKILL.md":
|
||||
return cmd_name
|
||||
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
|
||||
return f"speckit.{short_name}" if agent_name == "kimi" else f"speckit-{short_name}"
|
||||
|
||||
def register_commands(
|
||||
self,
|
||||
agent_name: str,
|
||||
@@ -454,20 +316,14 @@ class CommandRegistrar:
|
||||
body, "$ARGUMENTS", agent_config["args"]
|
||||
)
|
||||
|
||||
output_name = self._compute_output_name(agent_name, cmd_name, agent_config)
|
||||
|
||||
if agent_config["extension"] == "/SKILL.md":
|
||||
output = self.render_skill_command(
|
||||
agent_name, output_name, frontmatter, body, source_id, cmd_file, project_root
|
||||
)
|
||||
elif agent_config["format"] == "markdown":
|
||||
if agent_config["format"] == "markdown":
|
||||
output = self.render_markdown_command(frontmatter, body, source_id, context_note)
|
||||
elif agent_config["format"] == "toml":
|
||||
output = self.render_toml_command(frontmatter, body, source_id)
|
||||
else:
|
||||
raise ValueError(f"Unsupported format: {agent_config['format']}")
|
||||
|
||||
dest_file = commands_dir / f"{output_name}{agent_config['extension']}"
|
||||
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest_file.write_text(output, encoding="utf-8")
|
||||
|
||||
@@ -477,15 +333,9 @@ class CommandRegistrar:
|
||||
registered.append(cmd_name)
|
||||
|
||||
for alias in cmd_info.get("aliases", []):
|
||||
alias_output_name = self._compute_output_name(agent_name, alias, agent_config)
|
||||
alias_output = output
|
||||
if agent_config["extension"] == "/SKILL.md":
|
||||
alias_output = self.render_skill_command(
|
||||
agent_name, alias_output_name, frontmatter, body, source_id, cmd_file, project_root
|
||||
)
|
||||
alias_file = commands_dir / f"{alias_output_name}{agent_config['extension']}"
|
||||
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
|
||||
alias_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
alias_file.write_text(alias_output, encoding="utf-8")
|
||||
alias_file.write_text(output, encoding="utf-8")
|
||||
if agent_name == "copilot":
|
||||
self.write_copilot_prompt(project_root, alias)
|
||||
registered.append(alias)
|
||||
@@ -528,7 +378,7 @@ class CommandRegistrar:
|
||||
results = {}
|
||||
|
||||
for agent_name, agent_config in self.AGENT_CONFIGS.items():
|
||||
agent_dir = project_root / agent_config["dir"]
|
||||
agent_dir = project_root / agent_config["dir"].split("/")[0]
|
||||
|
||||
if agent_dir.exists():
|
||||
try:
|
||||
@@ -562,8 +412,7 @@ class CommandRegistrar:
|
||||
commands_dir = project_root / agent_config["dir"]
|
||||
|
||||
for cmd_name in cmd_names:
|
||||
output_name = self._compute_output_name(agent_name, cmd_name, agent_config)
|
||||
cmd_file = commands_dir / f"{output_name}{agent_config['extension']}"
|
||||
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||
if cmd_file.exists():
|
||||
cmd_file.unlink()
|
||||
|
||||
|
||||
@@ -41,26 +41,6 @@ class CompatibilityError(ExtensionError):
|
||||
pass
|
||||
|
||||
|
||||
def normalize_priority(value: Any, default: int = 10) -> int:
|
||||
"""Normalize a stored priority value for sorting and display.
|
||||
|
||||
Corrupted registry data may contain missing, non-numeric, or non-positive
|
||||
values. In those cases, fall back to the default priority.
|
||||
|
||||
Args:
|
||||
value: Priority value to normalize (may be int, str, None, etc.)
|
||||
default: Default priority to use for invalid values (default: 10)
|
||||
|
||||
Returns:
|
||||
Normalized priority as positive integer (>= 1)
|
||||
"""
|
||||
try:
|
||||
priority = int(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
return priority if priority >= 1 else default
|
||||
|
||||
|
||||
@dataclass
|
||||
class CatalogEntry:
|
||||
"""Represents a single catalog entry in the catalog stack."""
|
||||
@@ -222,17 +202,7 @@ class ExtensionRegistry:
|
||||
|
||||
try:
|
||||
with open(self.registry_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
# Validate loaded data is a dict (handles corrupted registry files)
|
||||
if not isinstance(data, dict):
|
||||
return {
|
||||
"schema_version": self.SCHEMA_VERSION,
|
||||
"extensions": {}
|
||||
}
|
||||
# Normalize extensions field (handles corrupted extensions value)
|
||||
if not isinstance(data.get("extensions"), dict):
|
||||
data["extensions"] = {}
|
||||
return data
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, FileNotFoundError):
|
||||
# Corrupted or missing registry, start fresh
|
||||
return {
|
||||
@@ -254,7 +224,7 @@ class ExtensionRegistry:
|
||||
metadata: Extension metadata (version, source, etc.)
|
||||
"""
|
||||
self.data["extensions"][extension_id] = {
|
||||
**copy.deepcopy(metadata),
|
||||
**metadata,
|
||||
"installed_at": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
self._save()
|
||||
@@ -277,16 +247,12 @@ class ExtensionRegistry:
|
||||
Raises:
|
||||
KeyError: If extension is not installed
|
||||
"""
|
||||
extensions = self.data.get("extensions")
|
||||
if not isinstance(extensions, dict) or extension_id not in extensions:
|
||||
if extension_id not in self.data["extensions"]:
|
||||
raise KeyError(f"Extension '{extension_id}' is not installed")
|
||||
# Merge new metadata with existing, preserving original installed_at
|
||||
existing = extensions[extension_id]
|
||||
# Handle corrupted registry entries (e.g., string/list instead of dict)
|
||||
if not isinstance(existing, dict):
|
||||
existing = {}
|
||||
# Merge: existing fields preserved, new fields override (deep copy to prevent caller mutation)
|
||||
merged = {**existing, **copy.deepcopy(metadata)}
|
||||
existing = self.data["extensions"][extension_id]
|
||||
# Merge: existing fields preserved, new fields override
|
||||
merged = {**existing, **metadata}
|
||||
# Always preserve original installed_at based on key existence, not truthiness,
|
||||
# to handle cases where the field exists but may be falsy (legacy/corruption)
|
||||
if "installed_at" in existing:
|
||||
@@ -294,7 +260,7 @@ class ExtensionRegistry:
|
||||
else:
|
||||
# If not present in existing, explicitly remove from merged if caller provided it
|
||||
merged.pop("installed_at", None)
|
||||
extensions[extension_id] = merged
|
||||
self.data["extensions"][extension_id] = merged
|
||||
self._save()
|
||||
|
||||
def restore(self, extension_id: str, metadata: dict):
|
||||
@@ -307,16 +273,8 @@ class ExtensionRegistry:
|
||||
Args:
|
||||
extension_id: Extension ID
|
||||
metadata: Complete extension metadata including installed_at
|
||||
|
||||
Raises:
|
||||
ValueError: If metadata is None or not a dict
|
||||
"""
|
||||
if metadata is None or not isinstance(metadata, dict):
|
||||
raise ValueError(f"Cannot restore '{extension_id}': metadata must be a dict")
|
||||
# Ensure extensions dict exists (handle corrupted registry)
|
||||
if not isinstance(self.data.get("extensions"), dict):
|
||||
self.data["extensions"] = {}
|
||||
self.data["extensions"][extension_id] = copy.deepcopy(metadata)
|
||||
self.data["extensions"][extension_id] = dict(metadata)
|
||||
self._save()
|
||||
|
||||
def remove(self, extension_id: str):
|
||||
@@ -325,11 +283,8 @@ class ExtensionRegistry:
|
||||
Args:
|
||||
extension_id: Extension ID
|
||||
"""
|
||||
extensions = self.data.get("extensions")
|
||||
if not isinstance(extensions, dict):
|
||||
return
|
||||
if extension_id in extensions:
|
||||
del extensions[extension_id]
|
||||
if extension_id in self.data["extensions"]:
|
||||
del self.data["extensions"][extension_id]
|
||||
self._save()
|
||||
|
||||
def get(self, extension_id: str) -> Optional[dict]:
|
||||
@@ -342,49 +297,21 @@ class ExtensionRegistry:
|
||||
extension_id: Extension ID
|
||||
|
||||
Returns:
|
||||
Deep copy of extension metadata, or None if not found or corrupted
|
||||
Deep copy of extension metadata, or None if not found
|
||||
"""
|
||||
extensions = self.data.get("extensions")
|
||||
if not isinstance(extensions, dict):
|
||||
return None
|
||||
entry = extensions.get(extension_id)
|
||||
# Return None for missing or corrupted (non-dict) entries
|
||||
if entry is None or not isinstance(entry, dict):
|
||||
return None
|
||||
return copy.deepcopy(entry)
|
||||
entry = self.data["extensions"].get(extension_id)
|
||||
return copy.deepcopy(entry) if entry is not None else None
|
||||
|
||||
def list(self) -> Dict[str, dict]:
|
||||
"""Get all installed extensions with valid metadata.
|
||||
"""Get all installed extensions.
|
||||
|
||||
Returns a deep copy of extensions with dict metadata only.
|
||||
Corrupted entries (non-dict values) are filtered out.
|
||||
Returns a deep copy of the extensions mapping to prevent callers
|
||||
from accidentally mutating nested internal registry state.
|
||||
|
||||
Returns:
|
||||
Dictionary of extension_id -> metadata (deep copies), empty dict if corrupted
|
||||
Dictionary of extension_id -> metadata (deep copies)
|
||||
"""
|
||||
extensions = self.data.get("extensions", {}) or {}
|
||||
if not isinstance(extensions, dict):
|
||||
return {}
|
||||
# Filter to only valid dict entries to match type contract
|
||||
return {
|
||||
ext_id: copy.deepcopy(meta)
|
||||
for ext_id, meta in extensions.items()
|
||||
if isinstance(meta, dict)
|
||||
}
|
||||
|
||||
def keys(self) -> set:
|
||||
"""Get all extension IDs including corrupted entries.
|
||||
|
||||
Lightweight method that returns IDs without deep-copying metadata.
|
||||
Use this when you only need to check which extensions are tracked.
|
||||
|
||||
Returns:
|
||||
Set of extension IDs (includes corrupted entries)
|
||||
"""
|
||||
extensions = self.data.get("extensions", {}) or {}
|
||||
if not isinstance(extensions, dict):
|
||||
return set()
|
||||
return set(extensions.keys())
|
||||
return copy.deepcopy(self.data["extensions"])
|
||||
|
||||
def is_installed(self, extension_id: str) -> bool:
|
||||
"""Check if extension is installed.
|
||||
@@ -393,44 +320,9 @@ class ExtensionRegistry:
|
||||
extension_id: Extension ID
|
||||
|
||||
Returns:
|
||||
True if extension is installed, False if not or registry corrupted
|
||||
True if extension is installed
|
||||
"""
|
||||
extensions = self.data.get("extensions")
|
||||
if not isinstance(extensions, dict):
|
||||
return False
|
||||
return extension_id in extensions
|
||||
|
||||
def list_by_priority(self, include_disabled: bool = False) -> List[tuple]:
|
||||
"""Get all installed extensions sorted by priority.
|
||||
|
||||
Lower priority number = higher precedence (checked first).
|
||||
Extensions with equal priority are sorted alphabetically by ID
|
||||
for deterministic ordering.
|
||||
|
||||
Args:
|
||||
include_disabled: If True, include disabled extensions. Default False.
|
||||
|
||||
Returns:
|
||||
List of (extension_id, metadata_copy) tuples sorted by priority.
|
||||
Metadata is deep-copied to prevent accidental mutation.
|
||||
"""
|
||||
extensions = self.data.get("extensions", {}) or {}
|
||||
if not isinstance(extensions, dict):
|
||||
extensions = {}
|
||||
sortable_extensions = []
|
||||
for ext_id, meta in extensions.items():
|
||||
if not isinstance(meta, dict):
|
||||
continue
|
||||
# Skip disabled extensions unless explicitly requested
|
||||
if not include_disabled and not meta.get("enabled", True):
|
||||
continue
|
||||
metadata_copy = copy.deepcopy(meta)
|
||||
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
|
||||
sortable_extensions.append((ext_id, metadata_copy))
|
||||
return sorted(
|
||||
sortable_extensions,
|
||||
key=lambda item: (item[1]["priority"], item[0]),
|
||||
)
|
||||
return extension_id in self.data["extensions"]
|
||||
|
||||
|
||||
class ExtensionManager:
|
||||
@@ -548,8 +440,7 @@ class ExtensionManager:
|
||||
self,
|
||||
source_dir: Path,
|
||||
speckit_version: str,
|
||||
register_commands: bool = True,
|
||||
priority: int = 10,
|
||||
register_commands: bool = True
|
||||
) -> ExtensionManifest:
|
||||
"""Install extension from a local directory.
|
||||
|
||||
@@ -557,19 +448,14 @@ class ExtensionManager:
|
||||
source_dir: Path to extension directory
|
||||
speckit_version: Current spec-kit version
|
||||
register_commands: If True, register commands with AI agents
|
||||
priority: Resolution priority (lower = higher precedence, default 10)
|
||||
|
||||
Returns:
|
||||
Installed extension manifest
|
||||
|
||||
Raises:
|
||||
ValidationError: If manifest is invalid or priority is invalid
|
||||
ValidationError: If manifest is invalid
|
||||
CompatibilityError: If extension is incompatible
|
||||
"""
|
||||
# Validate priority
|
||||
if priority < 1:
|
||||
raise ValidationError("Priority must be a positive integer (1 or higher)")
|
||||
|
||||
# Load and validate manifest
|
||||
manifest_path = source_dir / "extension.yml"
|
||||
manifest = ExtensionManifest(manifest_path)
|
||||
@@ -611,7 +497,6 @@ class ExtensionManager:
|
||||
"source": "local",
|
||||
"manifest_hash": manifest.get_hash(),
|
||||
"enabled": True,
|
||||
"priority": priority,
|
||||
"registered_commands": registered_commands
|
||||
})
|
||||
|
||||
@@ -620,27 +505,21 @@ class ExtensionManager:
|
||||
def install_from_zip(
|
||||
self,
|
||||
zip_path: Path,
|
||||
speckit_version: str,
|
||||
priority: int = 10,
|
||||
speckit_version: str
|
||||
) -> ExtensionManifest:
|
||||
"""Install extension from ZIP file.
|
||||
|
||||
Args:
|
||||
zip_path: Path to extension ZIP file
|
||||
speckit_version: Current spec-kit version
|
||||
priority: Resolution priority (lower = higher precedence, default 10)
|
||||
|
||||
Returns:
|
||||
Installed extension manifest
|
||||
|
||||
Raises:
|
||||
ValidationError: If manifest is invalid or priority is invalid
|
||||
ValidationError: If manifest is invalid
|
||||
CompatibilityError: If extension is incompatible
|
||||
"""
|
||||
# Validate priority early
|
||||
if priority < 1:
|
||||
raise ValidationError("Priority must be a positive integer (1 or higher)")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
temp_path = Path(tmpdir)
|
||||
|
||||
@@ -675,7 +554,7 @@ class ExtensionManager:
|
||||
raise ValidationError("No extension.yml found in ZIP file")
|
||||
|
||||
# Install from extracted directory
|
||||
return self.install_from_directory(extension_dir, speckit_version, priority=priority)
|
||||
return self.install_from_directory(extension_dir, speckit_version)
|
||||
|
||||
def remove(self, extension_id: str, keep_config: bool = False) -> bool:
|
||||
"""Remove an installed extension.
|
||||
@@ -692,7 +571,7 @@ class ExtensionManager:
|
||||
|
||||
# Get registered commands before removal
|
||||
metadata = self.registry.get(extension_id)
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
registered_commands = metadata.get("registered_commands", {})
|
||||
|
||||
extension_dir = self.extensions_dir / extension_id
|
||||
|
||||
@@ -753,9 +632,6 @@ class ExtensionManager:
|
||||
result = []
|
||||
|
||||
for ext_id, metadata in self.registry.list().items():
|
||||
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
|
||||
if not isinstance(metadata, dict):
|
||||
metadata = {}
|
||||
ext_dir = self.extensions_dir / ext_id
|
||||
manifest_path = ext_dir / "extension.yml"
|
||||
|
||||
@@ -767,7 +643,6 @@ class ExtensionManager:
|
||||
"version": metadata.get("version", "unknown"),
|
||||
"description": manifest.description,
|
||||
"enabled": metadata.get("enabled", True),
|
||||
"priority": normalize_priority(metadata.get("priority")),
|
||||
"installed_at": metadata.get("installed_at"),
|
||||
"command_count": len(manifest.commands),
|
||||
"hook_count": len(manifest.hooks)
|
||||
@@ -780,7 +655,6 @@ class ExtensionManager:
|
||||
"version": metadata.get("version", "unknown"),
|
||||
"description": "⚠️ Corrupted extension",
|
||||
"enabled": False,
|
||||
"priority": normalize_priority(metadata.get("priority")),
|
||||
"installed_at": metadata.get("installed_at"),
|
||||
"command_count": 0,
|
||||
"hook_count": 0
|
||||
|
||||
@@ -7,7 +7,6 @@ Presets are self-contained, versioned collections of templates
|
||||
customize the Spec-Driven Development workflow.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import json
|
||||
import hashlib
|
||||
import os
|
||||
@@ -24,8 +23,6 @@ import yaml
|
||||
from packaging import version as pkg_version
|
||||
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
||||
|
||||
from .extensions import ExtensionRegistry, normalize_priority
|
||||
|
||||
|
||||
@dataclass
|
||||
class PresetCatalogEntry:
|
||||
@@ -238,17 +235,7 @@ class PresetRegistry:
|
||||
|
||||
try:
|
||||
with open(self.registry_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
# Validate loaded data is a dict (handles corrupted registry files)
|
||||
if not isinstance(data, dict):
|
||||
return {
|
||||
"schema_version": self.SCHEMA_VERSION,
|
||||
"presets": {}
|
||||
}
|
||||
# Normalize presets field (handles corrupted presets value)
|
||||
if not isinstance(data.get("presets"), dict):
|
||||
data["presets"] = {}
|
||||
return data
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, FileNotFoundError):
|
||||
return {
|
||||
"schema_version": self.SCHEMA_VERSION,
|
||||
@@ -269,7 +256,7 @@ class PresetRegistry:
|
||||
metadata: Pack metadata (version, source, etc.)
|
||||
"""
|
||||
self.data["presets"][pack_id] = {
|
||||
**copy.deepcopy(metadata),
|
||||
**metadata,
|
||||
"installed_at": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
self._save()
|
||||
@@ -280,152 +267,41 @@ class PresetRegistry:
|
||||
Args:
|
||||
pack_id: Preset ID
|
||||
"""
|
||||
packs = self.data.get("presets")
|
||||
if not isinstance(packs, dict):
|
||||
return
|
||||
if pack_id in packs:
|
||||
del packs[pack_id]
|
||||
if pack_id in self.data["presets"]:
|
||||
del self.data["presets"][pack_id]
|
||||
self._save()
|
||||
|
||||
def update(self, pack_id: str, updates: dict):
|
||||
"""Update preset metadata in registry.
|
||||
|
||||
Merges the provided updates with the existing entry, preserving any
|
||||
fields not specified. The installed_at timestamp is always preserved
|
||||
from the original entry.
|
||||
|
||||
Args:
|
||||
pack_id: Preset ID
|
||||
updates: Partial metadata to merge into existing metadata
|
||||
|
||||
Raises:
|
||||
KeyError: If preset is not installed
|
||||
"""
|
||||
packs = self.data.get("presets")
|
||||
if not isinstance(packs, dict) or pack_id not in packs:
|
||||
raise KeyError(f"Preset '{pack_id}' not found in registry")
|
||||
existing = packs[pack_id]
|
||||
# Handle corrupted registry entries (e.g., string/list instead of dict)
|
||||
if not isinstance(existing, dict):
|
||||
existing = {}
|
||||
# Merge: existing fields preserved, new fields override (deep copy to prevent caller mutation)
|
||||
merged = {**existing, **copy.deepcopy(updates)}
|
||||
# Always preserve original installed_at based on key existence, not truthiness,
|
||||
# to handle cases where the field exists but may be falsy (legacy/corruption)
|
||||
if "installed_at" in existing:
|
||||
merged["installed_at"] = existing["installed_at"]
|
||||
else:
|
||||
# If not present in existing, explicitly remove from merged if caller provided it
|
||||
merged.pop("installed_at", None)
|
||||
packs[pack_id] = merged
|
||||
self._save()
|
||||
|
||||
def restore(self, pack_id: str, metadata: dict):
|
||||
"""Restore preset metadata to registry without modifying timestamps.
|
||||
|
||||
Use this method for rollback scenarios where you have a complete backup
|
||||
of the registry entry (including installed_at) and want to restore it
|
||||
exactly as it was.
|
||||
|
||||
Args:
|
||||
pack_id: Preset ID
|
||||
metadata: Complete preset metadata including installed_at
|
||||
|
||||
Raises:
|
||||
ValueError: If metadata is None or not a dict
|
||||
"""
|
||||
if metadata is None or not isinstance(metadata, dict):
|
||||
raise ValueError(f"Cannot restore '{pack_id}': metadata must be a dict")
|
||||
# Ensure presets dict exists (handle corrupted registry)
|
||||
if not isinstance(self.data.get("presets"), dict):
|
||||
self.data["presets"] = {}
|
||||
self.data["presets"][pack_id] = copy.deepcopy(metadata)
|
||||
self._save()
|
||||
|
||||
def get(self, pack_id: str) -> Optional[dict]:
|
||||
"""Get preset metadata from registry.
|
||||
|
||||
Returns a deep copy to prevent callers from accidentally mutating
|
||||
nested internal registry state without going through the write path.
|
||||
|
||||
Args:
|
||||
pack_id: Preset ID
|
||||
|
||||
Returns:
|
||||
Deep copy of preset metadata, or None if not found or corrupted
|
||||
Pack metadata or None if not found
|
||||
"""
|
||||
packs = self.data.get("presets")
|
||||
if not isinstance(packs, dict):
|
||||
return None
|
||||
entry = packs.get(pack_id)
|
||||
# Return None for missing or corrupted (non-dict) entries
|
||||
if entry is None or not isinstance(entry, dict):
|
||||
return None
|
||||
return copy.deepcopy(entry)
|
||||
return self.data["presets"].get(pack_id)
|
||||
|
||||
def list(self) -> Dict[str, dict]:
|
||||
"""Get all installed presets with valid metadata.
|
||||
|
||||
Returns a deep copy of presets with dict metadata only.
|
||||
Corrupted entries (non-dict values) are filtered out.
|
||||
"""Get all installed presets.
|
||||
|
||||
Returns:
|
||||
Dictionary of pack_id -> metadata (deep copies), empty dict if corrupted
|
||||
Dictionary of pack_id -> metadata
|
||||
"""
|
||||
packs = self.data.get("presets", {}) or {}
|
||||
if not isinstance(packs, dict):
|
||||
return {}
|
||||
# Filter to only valid dict entries to match type contract
|
||||
return {
|
||||
pack_id: copy.deepcopy(meta)
|
||||
for pack_id, meta in packs.items()
|
||||
if isinstance(meta, dict)
|
||||
}
|
||||
return self.data["presets"]
|
||||
|
||||
def keys(self) -> set:
|
||||
"""Get all preset IDs including corrupted entries.
|
||||
|
||||
Lightweight method that returns IDs without deep-copying metadata.
|
||||
Use this when you only need to check which presets are tracked.
|
||||
|
||||
Returns:
|
||||
Set of preset IDs (includes corrupted entries)
|
||||
"""
|
||||
packs = self.data.get("presets", {}) or {}
|
||||
if not isinstance(packs, dict):
|
||||
return set()
|
||||
return set(packs.keys())
|
||||
|
||||
def list_by_priority(self, include_disabled: bool = False) -> List[tuple]:
|
||||
def list_by_priority(self) -> List[tuple]:
|
||||
"""Get all installed presets sorted by priority.
|
||||
|
||||
Lower priority number = higher precedence (checked first).
|
||||
Presets with equal priority are sorted alphabetically by ID
|
||||
for deterministic ordering.
|
||||
|
||||
Args:
|
||||
include_disabled: If True, include disabled presets. Default False.
|
||||
|
||||
Returns:
|
||||
List of (pack_id, metadata_copy) tuples sorted by priority.
|
||||
Metadata is deep-copied to prevent accidental mutation.
|
||||
List of (pack_id, metadata) tuples sorted by priority
|
||||
"""
|
||||
packs = self.data.get("presets", {}) or {}
|
||||
if not isinstance(packs, dict):
|
||||
packs = {}
|
||||
sortable_packs = []
|
||||
for pack_id, meta in packs.items():
|
||||
if not isinstance(meta, dict):
|
||||
continue
|
||||
# Skip disabled presets unless explicitly requested
|
||||
if not include_disabled and not meta.get("enabled", True):
|
||||
continue
|
||||
metadata_copy = copy.deepcopy(meta)
|
||||
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
|
||||
sortable_packs.append((pack_id, metadata_copy))
|
||||
packs = self.data["presets"]
|
||||
return sorted(
|
||||
sortable_packs,
|
||||
key=lambda item: (item[1]["priority"], item[0]),
|
||||
packs.items(),
|
||||
key=lambda item: item[1].get("priority", 10),
|
||||
)
|
||||
|
||||
def is_installed(self, pack_id: str) -> bool:
|
||||
@@ -435,12 +311,9 @@ class PresetRegistry:
|
||||
pack_id: Preset ID
|
||||
|
||||
Returns:
|
||||
True if pack is installed, False if not or registry corrupted
|
||||
True if pack is installed
|
||||
"""
|
||||
packs = self.data.get("presets")
|
||||
if not isinstance(packs, dict):
|
||||
return False
|
||||
return pack_id in packs
|
||||
return pack_id in self.data["presets"]
|
||||
|
||||
|
||||
class PresetManager:
|
||||
@@ -646,6 +519,8 @@ class PresetManager:
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
# Kimi CLI discovers skills by directory name and invokes them as
|
||||
# /skill:<name> — use dot separator to match packaging convention.
|
||||
if selected_ai == "kimi":
|
||||
skill_name = f"speckit.{short_name}"
|
||||
else:
|
||||
@@ -805,13 +680,9 @@ class PresetManager:
|
||||
Installed preset manifest
|
||||
|
||||
Raises:
|
||||
PresetValidationError: If manifest is invalid or priority is invalid
|
||||
PresetValidationError: If manifest is invalid
|
||||
PresetCompatibilityError: If pack is incompatible
|
||||
"""
|
||||
# Validate priority
|
||||
if priority < 1:
|
||||
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
|
||||
|
||||
manifest_path = source_dir / "preset.yml"
|
||||
manifest = PresetManifest(manifest_path)
|
||||
|
||||
@@ -858,19 +729,14 @@ class PresetManager:
|
||||
Args:
|
||||
zip_path: Path to preset ZIP file
|
||||
speckit_version: Current spec-kit version
|
||||
priority: Resolution priority (lower = higher precedence, default 10)
|
||||
|
||||
Returns:
|
||||
Installed preset manifest
|
||||
|
||||
Raises:
|
||||
PresetValidationError: If manifest is invalid or priority is invalid
|
||||
PresetValidationError: If manifest is invalid
|
||||
PresetCompatibilityError: If pack is incompatible
|
||||
"""
|
||||
# Validate priority early
|
||||
if priority < 1:
|
||||
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
temp_path = Path(tmpdir)
|
||||
|
||||
@@ -942,9 +808,6 @@ class PresetManager:
|
||||
result = []
|
||||
|
||||
for pack_id, metadata in self.registry.list().items():
|
||||
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
|
||||
if not isinstance(metadata, dict):
|
||||
metadata = {}
|
||||
pack_dir = self.presets_dir / pack_id
|
||||
manifest_path = pack_dir / "preset.yml"
|
||||
|
||||
@@ -953,13 +816,13 @@ class PresetManager:
|
||||
result.append({
|
||||
"id": pack_id,
|
||||
"name": manifest.name,
|
||||
"version": metadata.get("version", manifest.version),
|
||||
"version": metadata["version"],
|
||||
"description": manifest.description,
|
||||
"enabled": metadata.get("enabled", True),
|
||||
"installed_at": metadata.get("installed_at"),
|
||||
"template_count": len(manifest.templates),
|
||||
"tags": manifest.tags,
|
||||
"priority": normalize_priority(metadata.get("priority")),
|
||||
"priority": metadata.get("priority", 10),
|
||||
})
|
||||
except PresetValidationError:
|
||||
result.append({
|
||||
@@ -971,7 +834,7 @@ class PresetManager:
|
||||
"installed_at": metadata.get("installed_at"),
|
||||
"template_count": 0,
|
||||
"tags": [],
|
||||
"priority": normalize_priority(metadata.get("priority")),
|
||||
"priority": metadata.get("priority", 10),
|
||||
})
|
||||
|
||||
return result
|
||||
@@ -1530,48 +1393,6 @@ class PresetResolver:
|
||||
self.overrides_dir = self.templates_dir / "overrides"
|
||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||
|
||||
def _get_all_extensions_by_priority(self) -> list[tuple[int, str, dict | None]]:
|
||||
"""Build unified list of registered and unregistered extensions sorted by priority.
|
||||
|
||||
Registered extensions use their stored priority; unregistered directories
|
||||
get implicit priority=10. Results are sorted by (priority, ext_id) for
|
||||
deterministic ordering.
|
||||
|
||||
Returns:
|
||||
List of (priority, ext_id, metadata_or_none) tuples sorted by priority.
|
||||
"""
|
||||
if not self.extensions_dir.exists():
|
||||
return []
|
||||
|
||||
registry = ExtensionRegistry(self.extensions_dir)
|
||||
# Use keys() to track ALL extensions (including corrupted entries) without deep copy
|
||||
# This prevents corrupted entries from being picked up as "unregistered" dirs
|
||||
registered_extension_ids = registry.keys()
|
||||
|
||||
# Get all registered extensions including disabled; we filter disabled manually below
|
||||
all_registered = registry.list_by_priority(include_disabled=True)
|
||||
|
||||
all_extensions: list[tuple[int, str, dict | None]] = []
|
||||
|
||||
# Only include enabled extensions in the result
|
||||
for ext_id, metadata in all_registered:
|
||||
# Skip disabled extensions
|
||||
if not metadata.get("enabled", True):
|
||||
continue
|
||||
priority = normalize_priority(metadata.get("priority") if metadata else None)
|
||||
all_extensions.append((priority, ext_id, metadata))
|
||||
|
||||
# Add unregistered directories with implicit priority=10
|
||||
for ext_dir in self.extensions_dir.iterdir():
|
||||
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
|
||||
continue
|
||||
if ext_dir.name not in registered_extension_ids:
|
||||
all_extensions.append((10, ext_dir.name, None))
|
||||
|
||||
# Sort by (priority, ext_id) for deterministic ordering
|
||||
all_extensions.sort(key=lambda x: (x[0], x[1]))
|
||||
return all_extensions
|
||||
|
||||
def resolve(
|
||||
self,
|
||||
template_name: str,
|
||||
@@ -1624,18 +1445,18 @@ class PresetResolver:
|
||||
if candidate.exists():
|
||||
return candidate
|
||||
|
||||
# Priority 3: Extension-provided templates (sorted by priority — lower number wins)
|
||||
for _priority, ext_id, _metadata in self._get_all_extensions_by_priority():
|
||||
ext_dir = self.extensions_dir / ext_id
|
||||
if not ext_dir.is_dir():
|
||||
continue
|
||||
for subdir in subdirs:
|
||||
if subdir:
|
||||
candidate = ext_dir / subdir / f"{template_name}{ext}"
|
||||
else:
|
||||
candidate = ext_dir / f"{template_name}{ext}"
|
||||
if candidate.exists():
|
||||
return candidate
|
||||
# Priority 3: Extension-provided templates
|
||||
if self.extensions_dir.exists():
|
||||
for ext_dir in sorted(self.extensions_dir.iterdir()):
|
||||
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
|
||||
continue
|
||||
for subdir in subdirs:
|
||||
if subdir:
|
||||
candidate = ext_dir / subdir / f"{template_name}{ext}"
|
||||
else:
|
||||
candidate = ext_dir / "templates" / f"{template_name}{ext}"
|
||||
if candidate.exists():
|
||||
return candidate
|
||||
|
||||
# Priority 4: Core templates
|
||||
if template_type == "template":
|
||||
@@ -1693,24 +1514,17 @@ class PresetResolver:
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
for _priority, ext_id, ext_meta in self._get_all_extensions_by_priority():
|
||||
ext_dir = self.extensions_dir / ext_id
|
||||
if not ext_dir.is_dir():
|
||||
continue
|
||||
try:
|
||||
resolved.relative_to(ext_dir)
|
||||
if ext_meta:
|
||||
version = ext_meta.get("version", "?")
|
||||
if self.extensions_dir.exists():
|
||||
for ext_dir in sorted(self.extensions_dir.iterdir()):
|
||||
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
|
||||
continue
|
||||
try:
|
||||
resolved.relative_to(ext_dir)
|
||||
return {
|
||||
"path": resolved_str,
|
||||
"source": f"extension:{ext_id} v{version}",
|
||||
"source": f"extension:{ext_dir.name}",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"path": resolved_str,
|
||||
"source": f"extension:{ext_id} (unregistered)",
|
||||
}
|
||||
except ValueError:
|
||||
continue
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return {"path": resolved_str, "source": "core"}
|
||||
|
||||
@@ -19,7 +19,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- Check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.before_implement` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
@@ -174,7 +174,7 @@ Note: This command assumes a complete task breakdown exists in tasks.md. If task
|
||||
10. **Check for extension hooks**: After completion validation, check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.after_implement` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
|
||||
@@ -24,40 +24,6 @@ $ARGUMENTS
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Pre-Execution Checks
|
||||
|
||||
**Check for extension hooks (before planning)**:
|
||||
- Check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.before_plan` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Pre-Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Pre-Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
|
||||
Wait for the result of the hook command before proceeding to the Outline.
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
## Outline
|
||||
|
||||
1. **Setup**: Run `{SCRIPT}` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
@@ -75,35 +41,6 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
|
||||
|
||||
5. **Check for extension hooks**: After reporting, check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.after_plan` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
## Phases
|
||||
|
||||
### Phase 0: Outline & Research
|
||||
|
||||
@@ -21,40 +21,6 @@ $ARGUMENTS
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Pre-Execution Checks
|
||||
|
||||
**Check for extension hooks (before specification)**:
|
||||
- Check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.before_specify` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Pre-Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Pre-Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
|
||||
Wait for the result of the hook command before proceeding to the Outline.
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
## Outline
|
||||
|
||||
The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `{ARGS}` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
|
||||
@@ -73,16 +39,10 @@ Given that feature description, do this:
|
||||
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||
|
||||
2. **Create the feature branch** by running the script with `--short-name` (and `--json`). In sequential mode, do NOT pass `--number` — the script auto-detects the next available number. In timestamp mode, the script generates a `YYYYMMDD-HHMMSS` prefix automatically:
|
||||
|
||||
**Branch numbering mode**: Before running the script, check if `.specify/init-options.json` exists and read the `branch_numbering` value.
|
||||
- If `"timestamp"`, add `--timestamp` (Bash) or `-Timestamp` (PowerShell) to the script invocation
|
||||
- If `"sequential"` or absent, do not add any extra flag (default behavior)
|
||||
2. **Create the feature branch** by running the script with `--short-name` (and `--json`), and do NOT pass `--number` (the script auto-detects the next globally available number across all branches and spec directories):
|
||||
|
||||
- Bash example: `{SCRIPT} --json --short-name "user-auth" "Add user authentication"`
|
||||
- Bash (timestamp): `{SCRIPT} --json --timestamp --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `{SCRIPT} -Json -ShortName "user-auth" "Add user authentication"`
|
||||
- PowerShell (timestamp): `{SCRIPT} -Json -Timestamp -ShortName "user-auth" "Add user authentication"`
|
||||
|
||||
**IMPORTANT**:
|
||||
- Do NOT pass `--number` — the script determines the correct next number automatically
|
||||
@@ -216,35 +176,6 @@ Given that feature description, do this:
|
||||
|
||||
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
||||
|
||||
8. **Check for extension hooks**: After reporting completion, check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.after_specify` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
- For each executable hook, output the following based on its `optional` flag:
|
||||
- **Optional hook** (`optional: true`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Optional Hook**: {extension}
|
||||
Command: `/{command}`
|
||||
Description: {description}
|
||||
|
||||
Prompt: {prompt}
|
||||
To execute: `/{command}`
|
||||
```
|
||||
- **Mandatory hook** (`optional: false`):
|
||||
```
|
||||
## Extension Hooks
|
||||
|
||||
**Automatic Hook**: {extension}
|
||||
Executing: `/{command}`
|
||||
EXECUTE_COMMAND: {command}
|
||||
```
|
||||
- If no hooks are registered or `.specify/extensions.yml` does not exist, skip silently
|
||||
|
||||
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||
|
||||
## Quick Guidelines
|
||||
|
||||
@@ -28,7 +28,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
- Check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.before_tasks` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
@@ -100,7 +100,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
6. **Check for extension hooks**: After tasks.md is generated, check if `.specify/extensions.yml` exists in the project root.
|
||||
- If it exists, read it and look for entries under the `hooks.after_tasks` key
|
||||
- If the YAML cannot be parsed or is invalid, skip hook checking silently and continue normally
|
||||
- Filter out hooks where `enabled` is explicitly `false`. Treat hooks without an `enabled` field as enabled by default.
|
||||
- Filter to only hooks where `enabled: true`
|
||||
- For each remaining hook, do **not** attempt to interpret or evaluate hook `condition` expressions:
|
||||
- If the hook has no `condition` field, or it is null/empty, treat the hook as executable
|
||||
- If the hook defines a non-empty `condition`, skip the hook and leave condition evaluation to the HookExecutor implementation
|
||||
|
||||
@@ -29,17 +29,11 @@ class TestAgentConfigConsistency:
|
||||
assert "q" not in cfg
|
||||
|
||||
def test_extension_registrar_includes_codex(self):
|
||||
"""Extension command registrar should include codex targeting .agents/skills."""
|
||||
"""Extension command registrar should include codex targeting .codex/prompts."""
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
assert "codex" in cfg
|
||||
assert cfg["codex"]["dir"] == ".agents/skills"
|
||||
assert cfg["codex"]["extension"] == "/SKILL.md"
|
||||
|
||||
def test_runtime_codex_uses_native_skills(self):
|
||||
"""Codex runtime config should point at .agents/skills."""
|
||||
assert AGENT_CONFIG["codex"]["folder"] == ".agents/"
|
||||
assert AGENT_CONFIG["codex"]["commands_subdir"] == "skills"
|
||||
assert cfg["codex"]["dir"] == ".codex/prompts"
|
||||
|
||||
def test_release_agent_lists_include_kiro_cli_and_exclude_q(self):
|
||||
"""Bash and PowerShell release scripts should agree on agent key set for Kiro."""
|
||||
@@ -77,16 +71,6 @@ class TestAgentConfigConsistency:
|
||||
assert re.search(r"shai\)\s*\n.*?\.shai/commands", sh_text, re.S) is not None
|
||||
assert re.search(r"agy\)\s*\n.*?\.agent/commands", sh_text, re.S) is not None
|
||||
|
||||
def test_release_scripts_generate_codex_skills(self):
|
||||
"""Release scripts should generate Codex skills in .agents/skills."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert ".agents/skills" in sh_text
|
||||
assert ".agents/skills" in ps_text
|
||||
assert re.search(r"codex\)\s*\n.*?create_skills.*?\.agents/skills.*?\"-\"", sh_text, re.S) is not None
|
||||
assert re.search(r"'codex'\s*\{.*?\.agents/skills.*?New-Skills.*?-Separator '-'", ps_text, re.S) is not None
|
||||
|
||||
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
||||
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
||||
assert "roo" in AI_ASSISTANT_HELP
|
||||
@@ -249,221 +233,3 @@ class TestAgentConfigConsistency:
|
||||
def test_ai_help_includes_kimi(self):
|
||||
"""CLI help text for --ai should include kimi."""
|
||||
assert "kimi" in AI_ASSISTANT_HELP
|
||||
|
||||
# --- Trae IDE consistency checks ---
|
||||
|
||||
def test_trae_in_agent_config(self):
|
||||
"""AGENT_CONFIG should include trae with correct folder and commands_subdir."""
|
||||
assert "trae" in AGENT_CONFIG
|
||||
assert AGENT_CONFIG["trae"]["folder"] == ".trae/"
|
||||
assert AGENT_CONFIG["trae"]["commands_subdir"] == "rules"
|
||||
assert AGENT_CONFIG["trae"]["requires_cli"] is False
|
||||
assert AGENT_CONFIG["trae"]["install_url"] is None
|
||||
|
||||
def test_trae_in_extension_registrar(self):
|
||||
"""Extension command registrar should include trae using .trae/rules and markdown, if present."""
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
assert "trae" in cfg
|
||||
trae_cfg = cfg["trae"]
|
||||
assert trae_cfg["format"] == "markdown"
|
||||
assert trae_cfg["args"] == "$ARGUMENTS"
|
||||
assert trae_cfg["extension"] == ".md"
|
||||
|
||||
def test_trae_in_release_agent_lists(self):
|
||||
"""Bash and PowerShell release scripts should include trae in agent lists."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
||||
assert sh_match is not None
|
||||
sh_agents = sh_match.group(1).split()
|
||||
|
||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
||||
assert ps_match is not None
|
||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
||||
|
||||
assert "trae" in sh_agents
|
||||
assert "trae" in ps_agents
|
||||
|
||||
def test_trae_in_release_scripts_generate_commands(self):
|
||||
"""Release scripts should generate markdown commands for trae in .trae/rules."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert ".trae/rules" in sh_text
|
||||
assert ".trae/rules" in ps_text
|
||||
assert re.search(r"'trae'\s*\{.*?\.trae/rules", ps_text, re.S) is not None
|
||||
|
||||
def test_trae_in_github_release_output(self):
|
||||
"""GitHub release script should include trae template packages."""
|
||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
||||
|
||||
assert "spec-kit-template-trae-sh-" in gh_release_text
|
||||
assert "spec-kit-template-trae-ps-" in gh_release_text
|
||||
|
||||
def test_trae_in_agent_context_scripts(self):
|
||||
"""Agent context scripts should support trae agent type."""
|
||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert "trae" in bash_text
|
||||
assert "TRAE_FILE" in bash_text
|
||||
assert "trae" in pwsh_text
|
||||
assert "TRAE_FILE" in pwsh_text
|
||||
|
||||
def test_trae_in_powershell_validate_set(self):
|
||||
"""PowerShell update-agent-context script should include 'trae' in ValidateSet."""
|
||||
ps_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
validate_set_match = re.search(r"\[ValidateSet\(([^)]*)\)\]", ps_text)
|
||||
assert validate_set_match is not None
|
||||
validate_set_values = re.findall(r"'([^']+)'", validate_set_match.group(1))
|
||||
|
||||
assert "trae" in validate_set_values
|
||||
|
||||
def test_ai_help_includes_trae(self):
|
||||
"""CLI help text for --ai should include trae."""
|
||||
assert "trae" in AI_ASSISTANT_HELP
|
||||
|
||||
# --- Pi Coding Agent consistency checks ---
|
||||
|
||||
def test_pi_in_agent_config(self):
|
||||
"""AGENT_CONFIG should include pi with correct folder and commands_subdir."""
|
||||
assert "pi" in AGENT_CONFIG
|
||||
assert AGENT_CONFIG["pi"]["folder"] == ".pi/"
|
||||
assert AGENT_CONFIG["pi"]["commands_subdir"] == "prompts"
|
||||
assert AGENT_CONFIG["pi"]["requires_cli"] is True
|
||||
assert AGENT_CONFIG["pi"]["install_url"] is not None
|
||||
|
||||
def test_pi_in_extension_registrar(self):
|
||||
"""Extension command registrar should include pi using .pi/prompts."""
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
assert "pi" in cfg
|
||||
pi_cfg = cfg["pi"]
|
||||
assert pi_cfg["dir"] == ".pi/prompts"
|
||||
assert pi_cfg["format"] == "markdown"
|
||||
assert pi_cfg["args"] == "$ARGUMENTS"
|
||||
assert pi_cfg["extension"] == ".md"
|
||||
|
||||
def test_pi_in_release_agent_lists(self):
|
||||
"""Bash and PowerShell release scripts should include pi in agent lists."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
||||
assert sh_match is not None
|
||||
sh_agents = sh_match.group(1).split()
|
||||
|
||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
||||
assert ps_match is not None
|
||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
||||
|
||||
assert "pi" in sh_agents
|
||||
assert "pi" in ps_agents
|
||||
|
||||
def test_release_scripts_generate_pi_prompt_templates(self):
|
||||
"""Release scripts should generate Markdown prompt templates for pi in .pi/prompts."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert ".pi/prompts" in sh_text
|
||||
assert ".pi/prompts" in ps_text
|
||||
assert re.search(r"pi\)\s*\n.*?\.pi/prompts", sh_text, re.S) is not None
|
||||
assert re.search(r"'pi'\s*\{.*?\.pi/prompts", ps_text, re.S) is not None
|
||||
|
||||
def test_pi_in_powershell_validate_set(self):
|
||||
"""PowerShell update-agent-context script should include 'pi' in ValidateSet."""
|
||||
ps_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
validate_set_match = re.search(r"\[ValidateSet\(([^)]*)\)\]", ps_text)
|
||||
assert validate_set_match is not None
|
||||
validate_set_values = re.findall(r"'([^']+)'", validate_set_match.group(1))
|
||||
|
||||
assert "pi" in validate_set_values
|
||||
|
||||
def test_pi_in_github_release_output(self):
|
||||
"""GitHub release script should include pi template packages."""
|
||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
||||
|
||||
assert "spec-kit-template-pi-sh-" in gh_release_text
|
||||
assert "spec-kit-template-pi-ps-" in gh_release_text
|
||||
|
||||
def test_agent_context_scripts_include_pi(self):
|
||||
"""Agent context scripts should support pi agent type."""
|
||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert "pi" in bash_text
|
||||
assert "Pi Coding Agent" in bash_text
|
||||
assert "pi" in pwsh_text
|
||||
assert "Pi Coding Agent" in pwsh_text
|
||||
|
||||
def test_ai_help_includes_pi(self):
|
||||
"""CLI help text for --ai should include pi."""
|
||||
assert "pi" in AI_ASSISTANT_HELP
|
||||
|
||||
# --- iFlow CLI consistency checks ---
|
||||
|
||||
def test_iflow_in_agent_config(self):
|
||||
"""AGENT_CONFIG should include iflow with correct folder and commands_subdir."""
|
||||
assert "iflow" in AGENT_CONFIG
|
||||
assert AGENT_CONFIG["iflow"]["folder"] == ".iflow/"
|
||||
assert AGENT_CONFIG["iflow"]["commands_subdir"] == "commands"
|
||||
assert AGENT_CONFIG["iflow"]["requires_cli"] is True
|
||||
|
||||
def test_iflow_in_extension_registrar(self):
|
||||
"""Extension command registrar should include iflow targeting .iflow/commands."""
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
assert "iflow" in cfg
|
||||
assert cfg["iflow"]["dir"] == ".iflow/commands"
|
||||
assert cfg["iflow"]["format"] == "markdown"
|
||||
assert cfg["iflow"]["args"] == "$ARGUMENTS"
|
||||
|
||||
def test_iflow_in_release_agent_lists(self):
|
||||
"""Bash and PowerShell release scripts should include iflow in agent lists."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
sh_match = re.search(r"ALL_AGENTS=\(([^)]*)\)", sh_text)
|
||||
assert sh_match is not None
|
||||
sh_agents = sh_match.group(1).split()
|
||||
|
||||
ps_match = re.search(r"\$AllAgents = @\(([^)]*)\)", ps_text)
|
||||
assert ps_match is not None
|
||||
ps_agents = re.findall(r"'([^']+)'", ps_match.group(1))
|
||||
|
||||
assert "iflow" in sh_agents
|
||||
assert "iflow" in ps_agents
|
||||
|
||||
def test_iflow_in_release_scripts_build_variant(self):
|
||||
"""Release scripts should generate Markdown commands for iflow in .iflow/commands."""
|
||||
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert ".iflow/commands" in sh_text
|
||||
assert ".iflow/commands" in ps_text
|
||||
assert re.search(r"'iflow'\s*\{.*?\.iflow/commands", ps_text, re.S) is not None
|
||||
|
||||
def test_iflow_in_github_release_output(self):
|
||||
"""GitHub release script should include iflow template packages."""
|
||||
gh_release_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-github-release.sh").read_text(encoding="utf-8")
|
||||
|
||||
assert "spec-kit-template-iflow-sh-" in gh_release_text
|
||||
assert "spec-kit-template-iflow-ps-" in gh_release_text
|
||||
|
||||
def test_iflow_in_agent_context_scripts(self):
|
||||
"""Agent context scripts should support iflow agent type."""
|
||||
bash_text = (REPO_ROOT / "scripts" / "bash" / "update-agent-context.sh").read_text(encoding="utf-8")
|
||||
pwsh_text = (REPO_ROOT / "scripts" / "powershell" / "update-agent-context.ps1").read_text(encoding="utf-8")
|
||||
|
||||
assert "iflow" in bash_text
|
||||
assert "IFLOW_FILE" in bash_text
|
||||
assert "iflow" in pwsh_text
|
||||
assert "IFLOW_FILE" in pwsh_text
|
||||
|
||||
def test_ai_help_includes_iflow(self):
|
||||
"""CLI help text for --ai should include iflow."""
|
||||
assert "iflow" in AI_ASSISTANT_HELP
|
||||
|
||||
@@ -62,7 +62,7 @@ def templates_dir(project_dir):
|
||||
tpl_root.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Template with valid YAML frontmatter
|
||||
(tpl_root / "speckit.specify.md").write_text(
|
||||
(tpl_root / "specify.md").write_text(
|
||||
"---\n"
|
||||
"description: Create or update the feature specification.\n"
|
||||
"handoffs:\n"
|
||||
@@ -79,7 +79,7 @@ def templates_dir(project_dir):
|
||||
)
|
||||
|
||||
# Template with minimal frontmatter
|
||||
(tpl_root / "speckit.plan.md").write_text(
|
||||
(tpl_root / "plan.md").write_text(
|
||||
"---\n"
|
||||
"description: Generate implementation plan.\n"
|
||||
"---\n"
|
||||
@@ -91,7 +91,7 @@ def templates_dir(project_dir):
|
||||
)
|
||||
|
||||
# Template with no frontmatter
|
||||
(tpl_root / "speckit.tasks.md").write_text(
|
||||
(tpl_root / "tasks.md").write_text(
|
||||
"# Tasks Command\n"
|
||||
"\n"
|
||||
"Body without frontmatter.\n",
|
||||
@@ -99,7 +99,7 @@ def templates_dir(project_dir):
|
||||
)
|
||||
|
||||
# Template with empty YAML frontmatter (yaml.safe_load returns None)
|
||||
(tpl_root / "speckit.empty_fm.md").write_text(
|
||||
(tpl_root / "empty_fm.md").write_text(
|
||||
"---\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
@@ -182,11 +182,6 @@ class TestGetSkillsDir:
|
||||
result = _get_skills_dir(project_dir, "kiro-cli")
|
||||
assert result == project_dir / ".kiro" / "skills"
|
||||
|
||||
def test_pi_skills_dir(self, project_dir):
|
||||
"""Pi should use .pi/skills/."""
|
||||
result = _get_skills_dir(project_dir, "pi")
|
||||
assert result == project_dir / ".pi" / "skills"
|
||||
|
||||
def test_unknown_agent_uses_default(self, project_dir):
|
||||
"""Unknown agents should fall back to DEFAULT_SKILLS_DIR."""
|
||||
result = _get_skills_dir(project_dir, "nonexistent-agent")
|
||||
@@ -342,7 +337,7 @@ class TestInstallAiSkills:
|
||||
cmds_dir = project_dir / ".claude" / "commands"
|
||||
cmds_dir.mkdir(parents=True)
|
||||
|
||||
(cmds_dir / "speckit.broken.md").write_text(
|
||||
(cmds_dir / "broken.md").write_text(
|
||||
"---\n"
|
||||
"description: [unclosed bracket\n"
|
||||
" invalid: yaml: content: here\n"
|
||||
@@ -427,27 +422,6 @@ class TestInstallAiSkills:
|
||||
assert (cmds_dir / "speckit.specify.md").exists()
|
||||
assert (cmds_dir / "speckit.plan.md").exists()
|
||||
|
||||
def test_pi_prompt_dir_installs_skills(self, project_dir):
|
||||
"""Pi should install skills directly from .pi/prompts/."""
|
||||
prompts_dir = project_dir / ".pi" / "prompts"
|
||||
prompts_dir.mkdir(parents=True)
|
||||
(prompts_dir / "speckit.specify.md").write_text(
|
||||
"---\ndescription: Create or update the feature specification.\n---\n\n# Specify\n\nBody.\n"
|
||||
)
|
||||
(prompts_dir / "speckit.plan.md").write_text(
|
||||
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
||||
)
|
||||
|
||||
result = install_ai_skills(project_dir, "pi")
|
||||
|
||||
assert result is True
|
||||
skills_dir = project_dir / ".pi" / "skills"
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
assert len(skill_dirs) >= 1
|
||||
assert (prompts_dir / "speckit.specify.md").exists()
|
||||
assert (prompts_dir / "speckit.plan.md").exists()
|
||||
|
||||
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
||||
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
||||
"""install_ai_skills should produce skills for every configured agent."""
|
||||
@@ -456,12 +430,9 @@ class TestInstallAiSkills:
|
||||
|
||||
# Place .md templates in the agent's commands directory
|
||||
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
||||
commands_subdir = AGENT_CONFIG[agent_key].get("commands_subdir", "commands")
|
||||
cmds_dir = proj / agent_folder.rstrip("/") / commands_subdir
|
||||
cmds_dir = proj / agent_folder.rstrip("/") / "commands"
|
||||
cmds_dir.mkdir(parents=True)
|
||||
# Copilot uses speckit.*.agent.md templates; other agents use speckit.*.md
|
||||
fname = "speckit.specify.agent.md" if agent_key == "copilot" else "speckit.specify.md"
|
||||
(cmds_dir / fname).write_text(
|
||||
(cmds_dir / "specify.md").write_text(
|
||||
"---\ndescription: Test command\n---\n\n# Test\n\nBody.\n"
|
||||
)
|
||||
|
||||
@@ -471,105 +442,13 @@ class TestInstallAiSkills:
|
||||
skills_dir = _get_skills_dir(proj, agent_key)
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
# Kimi uses dotted skill names; other agents use hyphen-separated names.
|
||||
# Kimi uses dot-separator (speckit.specify) to match /skill:speckit.* invocation;
|
||||
# all other agents use hyphen-separator (speckit-specify).
|
||||
expected_skill_name = "speckit.specify" if agent_key == "kimi" else "speckit-specify"
|
||||
assert expected_skill_name in skill_dirs
|
||||
assert (skills_dir / expected_skill_name / "SKILL.md").exists()
|
||||
|
||||
def test_copilot_ignores_non_speckit_agents(self, project_dir):
|
||||
"""Non-speckit markdown in .github/agents/ must not produce skills."""
|
||||
agents_dir = project_dir / ".github" / "agents"
|
||||
agents_dir.mkdir(parents=True, exist_ok=True)
|
||||
(agents_dir / "speckit.plan.agent.md").write_text(
|
||||
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
||||
)
|
||||
(agents_dir / "my-custom-agent.agent.md").write_text(
|
||||
"---\ndescription: A user custom agent\n---\n\n# Custom\n\nBody.\n"
|
||||
)
|
||||
|
||||
result = install_ai_skills(project_dir, "copilot")
|
||||
|
||||
assert result is True
|
||||
skills_dir = _get_skills_dir(project_dir, "copilot")
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
assert "speckit-plan" in skill_dirs
|
||||
assert "speckit-my-custom-agent.agent" not in skill_dirs
|
||||
assert "speckit-my-custom-agent" not in skill_dirs
|
||||
|
||||
@pytest.mark.parametrize("agent_key,custom_file", [
|
||||
("claude", "review.md"),
|
||||
("cursor-agent", "deploy.md"),
|
||||
("qwen", "my-workflow.md"),
|
||||
])
|
||||
def test_non_speckit_commands_ignored_for_all_agents(self, temp_dir, agent_key, custom_file):
|
||||
"""User-authored command files must not produce skills for any agent."""
|
||||
proj = temp_dir / f"proj-{agent_key}"
|
||||
proj.mkdir()
|
||||
|
||||
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
||||
commands_subdir = AGENT_CONFIG[agent_key].get("commands_subdir", "commands")
|
||||
cmds_dir = proj / agent_folder.rstrip("/") / commands_subdir
|
||||
cmds_dir.mkdir(parents=True)
|
||||
(cmds_dir / "speckit.specify.md").write_text(
|
||||
"---\ndescription: Create spec.\n---\n\n# Specify\n\nBody.\n"
|
||||
)
|
||||
(cmds_dir / custom_file).write_text(
|
||||
"---\ndescription: User custom command\n---\n\n# Custom\n\nBody.\n"
|
||||
)
|
||||
|
||||
result = install_ai_skills(proj, agent_key)
|
||||
|
||||
assert result is True
|
||||
skills_dir = _get_skills_dir(proj, agent_key)
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
assert "speckit-specify" in skill_dirs
|
||||
custom_stem = Path(custom_file).stem
|
||||
assert f"speckit-{custom_stem}" not in skill_dirs
|
||||
|
||||
def test_copilot_fallback_when_only_non_speckit_agents(self, project_dir):
|
||||
"""Fallback to templates/commands/ when .github/agents/ has no speckit.*.md files."""
|
||||
agents_dir = project_dir / ".github" / "agents"
|
||||
agents_dir.mkdir(parents=True, exist_ok=True)
|
||||
# Only a user-authored agent, no speckit.* templates
|
||||
(agents_dir / "my-custom-agent.agent.md").write_text(
|
||||
"---\ndescription: A user custom agent\n---\n\n# Custom\n\nBody.\n"
|
||||
)
|
||||
|
||||
result = install_ai_skills(project_dir, "copilot")
|
||||
|
||||
# Should succeed via fallback to templates/commands/
|
||||
assert result is True
|
||||
skills_dir = _get_skills_dir(project_dir, "copilot")
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
# Should have skills from fallback templates, not from the custom agent
|
||||
assert "speckit-plan" in skill_dirs
|
||||
assert not any("my-custom" in d for d in skill_dirs)
|
||||
|
||||
@pytest.mark.parametrize("agent_key", ["claude", "cursor-agent", "qwen"])
|
||||
def test_fallback_when_only_non_speckit_commands(self, temp_dir, agent_key):
|
||||
"""Fallback to templates/commands/ when agent dir has no speckit.*.md files."""
|
||||
proj = temp_dir / f"proj-{agent_key}"
|
||||
proj.mkdir()
|
||||
|
||||
agent_folder = AGENT_CONFIG[agent_key]["folder"]
|
||||
commands_subdir = AGENT_CONFIG[agent_key].get("commands_subdir", "commands")
|
||||
cmds_dir = proj / agent_folder.rstrip("/") / commands_subdir
|
||||
cmds_dir.mkdir(parents=True)
|
||||
# Only a user-authored command, no speckit.* templates
|
||||
(cmds_dir / "my-custom-command.md").write_text(
|
||||
"---\ndescription: User custom command\n---\n\n# Custom\n\nBody.\n"
|
||||
)
|
||||
|
||||
result = install_ai_skills(proj, agent_key)
|
||||
|
||||
# Should succeed via fallback to templates/commands/
|
||||
assert result is True
|
||||
skills_dir = _get_skills_dir(proj, agent_key)
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
assert not any("my-custom" in d for d in skill_dirs)
|
||||
|
||||
class TestCommandCoexistence:
|
||||
"""Verify install_ai_skills never touches command files.
|
||||
@@ -581,16 +460,14 @@ class TestCommandCoexistence:
|
||||
|
||||
def test_existing_commands_preserved_claude(self, project_dir, templates_dir, commands_dir_claude):
|
||||
"""install_ai_skills must NOT remove pre-existing .claude/commands files."""
|
||||
# Verify commands exist before (templates_dir adds 4 speckit.* files,
|
||||
# commands_dir_claude overlaps with 3 of them)
|
||||
before = list(commands_dir_claude.glob("speckit.*"))
|
||||
assert len(before) >= 3
|
||||
# Verify commands exist before
|
||||
assert len(list(commands_dir_claude.glob("speckit.*"))) == 3
|
||||
|
||||
install_ai_skills(project_dir, "claude")
|
||||
|
||||
# Commands must still be there — install_ai_skills never touches them
|
||||
remaining = list(commands_dir_claude.glob("speckit.*"))
|
||||
assert len(remaining) == len(before)
|
||||
assert len(remaining) == 3
|
||||
|
||||
def test_existing_commands_preserved_gemini(self, project_dir, templates_dir, commands_dir_gemini):
|
||||
"""install_ai_skills must NOT remove pre-existing .gemini/commands files."""
|
||||
@@ -693,82 +570,6 @@ class TestNewProjectCommandSkip:
|
||||
prompts_dir = target / ".kiro" / "prompts"
|
||||
assert not prompts_dir.exists()
|
||||
|
||||
def test_codex_native_skills_preserved_without_conversion(self, tmp_path):
|
||||
"""Codex should keep bundled .agents/skills and skip install_ai_skills conversion."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "new-codex-proj"
|
||||
|
||||
def fake_download(project_path, *args, **kwargs):
|
||||
skill_dir = project_path / ".agents" / "skills" / "speckit-specify"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
||||
|
||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.install_ai_skills") as mock_skills, \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/codex"):
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["init", str(target), "--ai", "codex", "--ai-skills", "--script", "sh", "--no-git"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_skills.assert_not_called()
|
||||
assert (target / ".agents" / "skills" / "speckit-specify" / "SKILL.md").exists()
|
||||
|
||||
def test_codex_native_skills_missing_fails_clearly(self, tmp_path):
|
||||
"""Codex native skills init should fail if bundled skills are missing."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "missing-codex-skills"
|
||||
|
||||
with patch("specify_cli.download_and_extract_template", lambda *args, **kwargs: None), \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.install_ai_skills") as mock_skills, \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/codex"):
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["init", str(target), "--ai", "codex", "--ai-skills", "--script", "sh", "--no-git"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 1
|
||||
mock_skills.assert_not_called()
|
||||
assert "Expected bundled agent skills" in result.output
|
||||
|
||||
def test_codex_native_skills_ignores_non_speckit_skill_dirs(self, tmp_path):
|
||||
"""Non-spec-kit SKILL.md files should not satisfy Codex bundled-skills validation."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "foreign-codex-skills"
|
||||
|
||||
def fake_download(project_path, *args, **kwargs):
|
||||
skill_dir = project_path / ".agents" / "skills" / "other-tool"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Foreign skill\n---\n\nBody.\n")
|
||||
|
||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.install_ai_skills") as mock_skills, \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/codex"):
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["init", str(target), "--ai", "codex", "--ai-skills", "--script", "sh", "--no-git"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 1
|
||||
mock_skills.assert_not_called()
|
||||
assert "Expected bundled agent skills" in result.output
|
||||
|
||||
def test_commands_preserved_when_skills_fail(self, tmp_path):
|
||||
"""If skills fail, commands should NOT be removed (safety net)."""
|
||||
from typer.testing import CliRunner
|
||||
@@ -912,17 +713,6 @@ class TestCliValidation:
|
||||
assert "Explicit command support was deprecated in Antigravity version 1.20.5." in result.output
|
||||
assert "--ai-skills" in result.output
|
||||
|
||||
def test_codex_without_ai_skills_fails(self):
|
||||
"""--ai codex without --ai-skills should fail with exit code 1."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, ["init", "test-proj", "--ai", "codex"])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "Custom prompt-based spec-kit initialization is deprecated for Codex CLI" in result.output
|
||||
assert "--ai-skills" in result.output
|
||||
|
||||
def test_interactive_agy_without_ai_skills_prompts_skills(self, monkeypatch):
|
||||
"""Interactive selector returning agy without --ai-skills should automatically enable --ai-skills."""
|
||||
from typer.testing import CliRunner
|
||||
@@ -965,72 +755,6 @@ class TestCliValidation:
|
||||
assert result.exit_code == 0
|
||||
assert "Explicit command support was deprecated" not in result.output
|
||||
|
||||
def test_interactive_codex_without_ai_skills_enables_skills(self, monkeypatch):
|
||||
"""Interactive selector returning codex without --ai-skills should automatically enable --ai-skills."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
def _fake_select_with_arrows(*args, **kwargs):
|
||||
options = kwargs.get("options")
|
||||
if options is None and len(args) >= 1:
|
||||
options = args[0]
|
||||
|
||||
if isinstance(options, dict) and "codex" in options:
|
||||
return "codex"
|
||||
if isinstance(options, (list, tuple)) and "codex" in options:
|
||||
return "codex"
|
||||
|
||||
if isinstance(options, dict) and options:
|
||||
return next(iter(options.keys()))
|
||||
if isinstance(options, (list, tuple)) and options:
|
||||
return options[0]
|
||||
|
||||
return None
|
||||
|
||||
monkeypatch.setattr("specify_cli.select_with_arrows", _fake_select_with_arrows)
|
||||
|
||||
def _fake_download(*args, **kwargs):
|
||||
project_path = Path(args[0])
|
||||
skill_dir = project_path / ".agents" / "skills" / "speckit-specify"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
||||
|
||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
||||
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(app, ["init", "test-proj", "--no-git", "--ignore-agent-tools"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Custom prompt-based spec-kit initialization is deprecated for Codex CLI" not in result.output
|
||||
assert ".agents/skills" in result.output
|
||||
assert "$speckit-constitution" in result.output
|
||||
assert "/speckit.constitution" not in result.output
|
||||
assert "Optional skills that you can use for your specs" in result.output
|
||||
|
||||
def test_kimi_next_steps_show_skill_invocation(self, monkeypatch):
|
||||
"""Kimi next-steps guidance should display /skill:speckit.* usage."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
def _fake_download(*args, **kwargs):
|
||||
project_path = Path(args[0])
|
||||
skill_dir = project_path / ".kimi" / "skills" / "speckit.specify"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
||||
|
||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
||||
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["init", "test-proj", "--ai", "kimi", "--no-git", "--ignore-agent-tools"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "/skill:speckit.constitution" in result.output
|
||||
assert "/speckit.constitution" not in result.output
|
||||
assert "Optional skills that you can use for your specs" in result.output
|
||||
|
||||
def test_ai_skills_flag_appears_in_help(self):
|
||||
"""--ai-skills should appear in init --help output."""
|
||||
from typer.testing import CliRunner
|
||||
@@ -1050,12 +774,10 @@ class TestCliValidation:
|
||||
target = tmp_path / "kiro-alias-proj"
|
||||
|
||||
with patch("specify_cli.download_and_extract_template") as mock_download, \
|
||||
patch("specify_cli.scaffold_from_core_pack", create=True) as mock_scaffold, \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
|
||||
mock_scaffold.return_value = True
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
@@ -1071,14 +793,9 @@ class TestCliValidation:
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Without --offline, the download path should be taken.
|
||||
assert mock_download.called, (
|
||||
"Expected download_and_extract_template to be called (default non-offline path)"
|
||||
)
|
||||
assert mock_download.called
|
||||
# download_and_extract_template(project_path, ai_assistant, script_type, ...)
|
||||
assert mock_download.call_args.args[1] == "kiro-cli"
|
||||
assert not mock_scaffold.called, (
|
||||
"scaffold_from_core_pack should not be called without --offline"
|
||||
)
|
||||
|
||||
def test_q_removed_from_agent_config(self):
|
||||
"""Amazon Q legacy key should not remain in AGENT_CONFIG."""
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
"""
|
||||
Unit tests for branch numbering options (sequential vs timestamp).
|
||||
|
||||
Tests cover:
|
||||
- Persisting branch_numbering in init-options.json
|
||||
- Default value when branch_numbering is None
|
||||
- Validation of branch_numbering values
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from specify_cli import save_init_options
|
||||
|
||||
|
||||
class TestSaveBranchNumbering:
|
||||
"""Tests for save_init_options with branch_numbering."""
|
||||
|
||||
def test_save_branch_numbering_timestamp(self, tmp_path: Path):
|
||||
opts = {"branch_numbering": "timestamp", "ai": "claude"}
|
||||
save_init_options(tmp_path, opts)
|
||||
|
||||
saved = json.loads((tmp_path / ".specify/init-options.json").read_text())
|
||||
assert saved["branch_numbering"] == "timestamp"
|
||||
|
||||
def test_save_branch_numbering_sequential(self, tmp_path: Path):
|
||||
opts = {"branch_numbering": "sequential", "ai": "claude"}
|
||||
save_init_options(tmp_path, opts)
|
||||
|
||||
saved = json.loads((tmp_path / ".specify/init-options.json").read_text())
|
||||
assert saved["branch_numbering"] == "sequential"
|
||||
|
||||
def test_branch_numbering_defaults_to_sequential(self, tmp_path: Path, monkeypatch):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
|
||||
def _fake_download(project_path, *args, **kwargs):
|
||||
Path(project_path).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
||||
|
||||
project_dir = tmp_path / "proj"
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, ["init", str(project_dir), "--ai", "claude", "--ignore-agent-tools"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
saved = json.loads((project_dir / ".specify/init-options.json").read_text())
|
||||
assert saved["branch_numbering"] == "sequential"
|
||||
|
||||
|
||||
class TestBranchNumberingValidation:
|
||||
"""Tests for branch_numbering CLI validation via CliRunner."""
|
||||
|
||||
def test_invalid_branch_numbering_rejected(self, tmp_path: Path):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--ai", "claude", "--branch-numbering", "foobar"])
|
||||
assert result.exit_code == 1
|
||||
assert "Invalid --branch-numbering" in result.output
|
||||
|
||||
def test_valid_branch_numbering_sequential(self, tmp_path: Path, monkeypatch):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
|
||||
def _fake_download(project_path, *args, **kwargs):
|
||||
Path(project_path).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--ai", "claude", "--branch-numbering", "sequential", "--ignore-agent-tools"])
|
||||
assert result.exit_code == 0
|
||||
assert "Invalid --branch-numbering" not in (result.output or "")
|
||||
|
||||
def test_valid_branch_numbering_timestamp(self, tmp_path: Path, monkeypatch):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
|
||||
def _fake_download(project_path, *args, **kwargs):
|
||||
Path(project_path).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
monkeypatch.setattr("specify_cli.download_and_extract_template", _fake_download)
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--ai", "claude", "--branch-numbering", "timestamp", "--ignore-agent-tools"])
|
||||
assert result.exit_code == 0
|
||||
assert "Invalid --branch-numbering" not in (result.output or "")
|
||||
@@ -1,613 +0,0 @@
|
||||
"""
|
||||
Validation tests for offline/air-gapped scaffolding (PR #1803).
|
||||
|
||||
For every supported AI agent (except "generic") the scaffold output is verified
|
||||
against invariants and compared byte-for-byte with the canonical output produced
|
||||
by create-release-packages.sh.
|
||||
|
||||
Since scaffold_from_core_pack() now invokes the release script at runtime, the
|
||||
parity test (section 9) runs the script independently and compares the results
|
||||
to ensure the integration is correct.
|
||||
|
||||
Per-agent invariants verified
|
||||
──────────────────────────────
|
||||
• Command files are written to the directory declared in AGENT_CONFIG
|
||||
• File count matches the number of source templates
|
||||
• Extension is correct: .toml (TOML agents), .agent.md (copilot), .md (rest)
|
||||
• No unresolved placeholders remain ({SCRIPT}, {ARGS}, __AGENT__)
|
||||
• Argument token is correct: {{args}} for TOML agents, $ARGUMENTS for others
|
||||
• Path rewrites applied: scripts/ → .specify/scripts/ etc.
|
||||
• TOML files have "description" and "prompt" fields
|
||||
• Markdown files have parseable YAML frontmatter
|
||||
• Copilot: companion speckit.*.prompt.md files are generated in prompts/
|
||||
• .specify/scripts/ contains at least one script file
|
||||
• .specify/templates/ contains at least one template file
|
||||
|
||||
Parity invariant
|
||||
────────────────
|
||||
Every file produced by scaffold_from_core_pack() must be byte-for-byte
|
||||
identical to the same file in the ZIP produced by the release script.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import tomllib
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from specify_cli import (
|
||||
AGENT_CONFIG,
|
||||
_TOML_AGENTS,
|
||||
_locate_core_pack,
|
||||
scaffold_from_core_pack,
|
||||
)
|
||||
|
||||
_REPO_ROOT = Path(__file__).parent.parent
|
||||
_RELEASE_SCRIPT = _REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh"
|
||||
|
||||
|
||||
def _find_bash() -> str | None:
|
||||
"""Return the path to a usable bash on this machine, or None."""
|
||||
# Prefer PATH lookup so non-standard install locations (Nix, CI) are found.
|
||||
on_path = shutil.which("bash")
|
||||
if on_path:
|
||||
return on_path
|
||||
candidates = [
|
||||
"/opt/homebrew/bin/bash",
|
||||
"/usr/local/bin/bash",
|
||||
"/bin/bash",
|
||||
"/usr/bin/bash",
|
||||
]
|
||||
for candidate in candidates:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[candidate, "--version"],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return candidate
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
def _run_release_script(agent: str, script_type: str, bash: str, output_dir: Path) -> Path:
|
||||
"""Run create-release-packages.sh for *agent*/*script_type* and return the
|
||||
path to the generated ZIP. *output_dir* receives the build artifacts so
|
||||
the repo working tree stays clean."""
|
||||
env = os.environ.copy()
|
||||
env["AGENTS"] = agent
|
||||
env["SCRIPTS"] = script_type
|
||||
env["GENRELEASES_DIR"] = str(output_dir)
|
||||
|
||||
result = subprocess.run(
|
||||
[bash, str(_RELEASE_SCRIPT), "v0.0.0"],
|
||||
capture_output=True, text=True,
|
||||
cwd=str(_REPO_ROOT),
|
||||
env=env,
|
||||
timeout=300,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
pytest.fail(
|
||||
f"Release script failed with exit code {result.returncode}\n"
|
||||
f"stdout:\n{result.stdout}\nstderr:\n{result.stderr}"
|
||||
)
|
||||
|
||||
zip_pattern = f"spec-kit-template-{agent}-{script_type}-v0.0.0.zip"
|
||||
zip_path = output_dir / zip_pattern
|
||||
if not zip_path.exists():
|
||||
pytest.fail(
|
||||
f"Release script did not produce expected ZIP: {zip_path}\n"
|
||||
f"stdout:\n{result.stdout}\nstderr:\n{result.stderr}"
|
||||
)
|
||||
return zip_path
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Number of source command templates (one per .md file in templates/commands/)
|
||||
|
||||
|
||||
def _commands_dir() -> Path:
|
||||
"""Return the command templates directory (source-checkout or core_pack)."""
|
||||
core = _locate_core_pack()
|
||||
if core and (core / "commands").is_dir():
|
||||
return core / "commands"
|
||||
# Source-checkout fallback
|
||||
repo_root = Path(__file__).parent.parent
|
||||
return repo_root / "templates" / "commands"
|
||||
|
||||
|
||||
def _get_source_template_stems() -> list[str]:
|
||||
"""Return the stems of source command template files (e.g. ['specify', 'plan', ...])."""
|
||||
return sorted(p.stem for p in _commands_dir().glob("*.md"))
|
||||
|
||||
|
||||
def _expected_cmd_dir(project_path: Path, agent: str) -> Path:
|
||||
"""Return the expected command-files directory for a given agent."""
|
||||
cfg = AGENT_CONFIG[agent]
|
||||
folder = (cfg.get("folder") or "").rstrip("/")
|
||||
subdir = cfg.get("commands_subdir", "commands")
|
||||
if folder:
|
||||
return project_path / folder / subdir
|
||||
return project_path / ".speckit" / subdir
|
||||
|
||||
|
||||
# Agents whose commands are laid out as <skills_dir>/<name>/SKILL.md.
|
||||
# Maps agent -> separator used in skill directory names.
|
||||
_SKILL_AGENTS: dict[str, str] = {"codex": "-", "kimi": "."}
|
||||
|
||||
|
||||
def _expected_ext(agent: str) -> str:
|
||||
if agent in _TOML_AGENTS:
|
||||
return "toml"
|
||||
if agent == "copilot":
|
||||
return "agent.md"
|
||||
if agent in _SKILL_AGENTS:
|
||||
return "SKILL.md"
|
||||
return "md"
|
||||
|
||||
|
||||
def _list_command_files(cmd_dir: Path, agent: str) -> list[Path]:
|
||||
"""List generated command files, handling skills-based directory layouts."""
|
||||
if agent in _SKILL_AGENTS:
|
||||
sep = _SKILL_AGENTS[agent]
|
||||
return sorted(cmd_dir.glob(f"speckit{sep}*/SKILL.md"))
|
||||
ext = _expected_ext(agent)
|
||||
return sorted(cmd_dir.glob(f"speckit.*.{ext}"))
|
||||
|
||||
|
||||
def _collect_relative_files(root: Path) -> dict[str, bytes]:
|
||||
"""Walk *root* and return {relative_posix_path: file_bytes}."""
|
||||
result: dict[str, bytes] = {}
|
||||
for p in root.rglob("*"):
|
||||
if p.is_file():
|
||||
result[p.relative_to(root).as_posix()] = p.read_bytes()
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def source_template_stems() -> list[str]:
|
||||
return _get_source_template_stems()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def scaffolded_sh(tmp_path_factory):
|
||||
"""Session-scoped cache: scaffold once per agent with script_type='sh'."""
|
||||
cache = {}
|
||||
def _get(agent: str) -> Path:
|
||||
if agent not in cache:
|
||||
project = tmp_path_factory.mktemp(f"scaffold_sh_{agent}")
|
||||
ok = scaffold_from_core_pack(project, agent, "sh")
|
||||
assert ok, f"scaffold_from_core_pack returned False for agent '{agent}'"
|
||||
cache[agent] = project
|
||||
return cache[agent]
|
||||
return _get
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def scaffolded_ps(tmp_path_factory):
|
||||
"""Session-scoped cache: scaffold once per agent with script_type='ps'."""
|
||||
cache = {}
|
||||
def _get(agent: str) -> Path:
|
||||
if agent not in cache:
|
||||
project = tmp_path_factory.mktemp(f"scaffold_ps_{agent}")
|
||||
ok = scaffold_from_core_pack(project, agent, "ps")
|
||||
assert ok, f"scaffold_from_core_pack returned False for agent '{agent}'"
|
||||
cache[agent] = project
|
||||
return cache[agent]
|
||||
return _get
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parametrize over all agents except "generic"
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_TESTABLE_AGENTS = [a for a in AGENT_CONFIG if a != "generic"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 1. Bundled scaffold — directory structure
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_scaffold_creates_specify_scripts(agent, scaffolded_sh):
|
||||
"""scaffold_from_core_pack copies at least one script into .specify/scripts/."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
scripts_dir = project / ".specify" / "scripts" / "bash"
|
||||
assert scripts_dir.is_dir(), f".specify/scripts/bash/ missing for agent '{agent}'"
|
||||
assert any(scripts_dir.iterdir()), f".specify/scripts/bash/ is empty for agent '{agent}'"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_scaffold_creates_specify_templates(agent, scaffolded_sh):
|
||||
"""scaffold_from_core_pack copies at least one page template into .specify/templates/."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
tpl_dir = project / ".specify" / "templates"
|
||||
assert tpl_dir.is_dir(), f".specify/templates/ missing for agent '{agent}'"
|
||||
assert any(tpl_dir.iterdir()), ".specify/templates/ is empty"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_scaffold_command_dir_location(agent, scaffolded_sh):
|
||||
"""Command files land in the directory declared by AGENT_CONFIG."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
assert cmd_dir.is_dir(), (
|
||||
f"Command dir '{cmd_dir.relative_to(project)}' not created for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 2. Bundled scaffold — file count
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_scaffold_command_file_count(agent, scaffolded_sh, source_template_stems):
|
||||
"""One command file is generated per source template for every agent."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
generated = _list_command_files(cmd_dir, agent)
|
||||
|
||||
if cmd_dir.is_dir():
|
||||
dir_listing = list(cmd_dir.iterdir())
|
||||
else:
|
||||
dir_listing = f"<command dir missing: {cmd_dir}>"
|
||||
|
||||
assert len(generated) == len(source_template_stems), (
|
||||
f"Agent '{agent}': expected {len(source_template_stems)} command files "
|
||||
f"({_expected_ext(agent)}), found {len(generated)}. Dir: {dir_listing}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_scaffold_command_file_names(agent, scaffolded_sh, source_template_stems):
|
||||
"""Each source template stem maps to a corresponding speckit.<stem>.<ext> file."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for stem in source_template_stems:
|
||||
if agent in _SKILL_AGENTS:
|
||||
sep = _SKILL_AGENTS[agent]
|
||||
expected = cmd_dir / f"speckit{sep}{stem}" / "SKILL.md"
|
||||
else:
|
||||
ext = _expected_ext(agent)
|
||||
expected = cmd_dir / f"speckit.{stem}.{ext}"
|
||||
assert expected.is_file(), (
|
||||
f"Agent '{agent}': expected file '{expected.name}' not found in '{cmd_dir}'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 3. Bundled scaffold — content invariants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_no_unresolved_script_placeholder(agent, scaffolded_sh):
|
||||
"""{SCRIPT} must not appear in any generated command file."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for f in cmd_dir.rglob("*"):
|
||||
if f.is_file():
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert "{SCRIPT}" not in content, (
|
||||
f"Unresolved {{SCRIPT}} in '{f.relative_to(project)}' for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_no_unresolved_agent_placeholder(agent, scaffolded_sh):
|
||||
"""__AGENT__ must not appear in any generated command file."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for f in cmd_dir.rglob("*"):
|
||||
if f.is_file():
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert "__AGENT__" not in content, (
|
||||
f"Unresolved __AGENT__ in '{f.relative_to(project)}' for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_no_unresolved_args_placeholder(agent, scaffolded_sh):
|
||||
"""{ARGS} must not appear in any generated command file (replaced with agent-specific token)."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for f in cmd_dir.rglob("*"):
|
||||
if f.is_file():
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert "{ARGS}" not in content, (
|
||||
f"Unresolved {{ARGS}} in '{f.relative_to(project)}' for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
# Build a set of template stems that actually contain {ARGS} in their source.
|
||||
_TEMPLATES_WITH_ARGS: frozenset[str] = frozenset(
|
||||
p.stem
|
||||
for p in _commands_dir().glob("*.md")
|
||||
if "{ARGS}" in p.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_argument_token_format(agent, scaffolded_sh):
|
||||
"""For templates that carry an {ARGS} token:
|
||||
- TOML agents must emit {{args}}
|
||||
- Markdown agents must emit $ARGUMENTS
|
||||
Templates without {ARGS} (e.g. implement, plan) are skipped.
|
||||
"""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
|
||||
for f in _list_command_files(cmd_dir, agent):
|
||||
# Recover the stem from the file path
|
||||
if agent in _SKILL_AGENTS:
|
||||
sep = _SKILL_AGENTS[agent]
|
||||
stem = f.parent.name.removeprefix(f"speckit{sep}")
|
||||
else:
|
||||
ext = _expected_ext(agent)
|
||||
stem = f.name.removeprefix("speckit.").removesuffix(f".{ext}")
|
||||
if stem not in _TEMPLATES_WITH_ARGS:
|
||||
continue # this template has no argument token
|
||||
|
||||
content = f.read_text(encoding="utf-8")
|
||||
if agent in _TOML_AGENTS:
|
||||
assert "{{args}}" in content, (
|
||||
f"TOML agent '{agent}': expected '{{{{args}}}}' in '{f.name}'"
|
||||
)
|
||||
else:
|
||||
assert "$ARGUMENTS" in content, (
|
||||
f"Markdown agent '{agent}': expected '$ARGUMENTS' in '{f.name}'"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_path_rewrites_applied(agent, scaffolded_sh):
|
||||
"""Bare scripts/ and templates/ paths must be rewritten to .specify/ variants.
|
||||
|
||||
YAML frontmatter 'source:' metadata fields are excluded — they reference
|
||||
the original template path for provenance, not a runtime path.
|
||||
"""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for f in cmd_dir.rglob("*"):
|
||||
if not f.is_file():
|
||||
continue
|
||||
content = f.read_text(encoding="utf-8")
|
||||
|
||||
# Strip YAML frontmatter before checking — source: metadata is not a runtime path
|
||||
body = content
|
||||
if content.startswith("---"):
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
body = parts[2]
|
||||
|
||||
# Should not contain bare (non-.specify/) script paths
|
||||
assert not re.search(r'(?<!\.specify/)scripts/', body), (
|
||||
f"Bare scripts/ path found in '{f.relative_to(project)}' for agent '{agent}'"
|
||||
)
|
||||
assert not re.search(r'(?<!\.specify/)templates/', body), (
|
||||
f"Bare templates/ path found in '{f.relative_to(project)}' for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 4. TOML format checks
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.mark.parametrize("agent", sorted(_TOML_AGENTS))
|
||||
def test_toml_format_valid(agent, scaffolded_sh):
|
||||
"""TOML agents: every command file must have description and prompt fields."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for f in cmd_dir.glob("speckit.*.toml"):
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert 'description = "' in content, (
|
||||
f"Missing 'description' in '{f.name}' for agent '{agent}'"
|
||||
)
|
||||
assert 'prompt = """' in content, (
|
||||
f"Missing 'prompt' block in '{f.name}' for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 5. Markdown frontmatter checks
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_MARKDOWN_AGENTS = [a for a in _TESTABLE_AGENTS if a not in _TOML_AGENTS]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("agent", _MARKDOWN_AGENTS)
|
||||
def test_markdown_has_frontmatter(agent, scaffolded_sh):
|
||||
"""Markdown agents: every command file must start with valid YAML frontmatter."""
|
||||
project = scaffolded_sh(agent)
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
for f in _list_command_files(cmd_dir, agent):
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert content.startswith("---"), (
|
||||
f"No YAML frontmatter in '{f.name}' for agent '{agent}'"
|
||||
)
|
||||
parts = content.split("---", 2)
|
||||
assert len(parts) >= 3, f"Incomplete frontmatter in '{f.name}'"
|
||||
fm = yaml.safe_load(parts[1])
|
||||
assert fm is not None, f"Empty frontmatter in '{f.name}'"
|
||||
assert "description" in fm, (
|
||||
f"'description' key missing from frontmatter in '{f.name}' for agent '{agent}'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 6. Copilot-specific: companion .prompt.md files
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def test_copilot_companion_prompt_files(scaffolded_sh, source_template_stems):
|
||||
"""Copilot: a speckit.<stem>.prompt.md companion is created for every .agent.md file."""
|
||||
project = scaffolded_sh("copilot")
|
||||
|
||||
prompts_dir = project / ".github" / "prompts"
|
||||
assert prompts_dir.is_dir(), ".github/prompts/ not created for copilot"
|
||||
|
||||
for stem in source_template_stems:
|
||||
prompt_file = prompts_dir / f"speckit.{stem}.prompt.md"
|
||||
assert prompt_file.is_file(), (
|
||||
f"Companion prompt file '{prompt_file.name}' missing for copilot"
|
||||
)
|
||||
|
||||
|
||||
def test_copilot_prompt_file_content(scaffolded_sh, source_template_stems):
|
||||
"""Copilot companion .prompt.md files must reference their parent .agent.md."""
|
||||
project = scaffolded_sh("copilot")
|
||||
|
||||
prompts_dir = project / ".github" / "prompts"
|
||||
for stem in source_template_stems:
|
||||
f = prompts_dir / f"speckit.{stem}.prompt.md"
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert f"agent: speckit.{stem}" in content, (
|
||||
f"Companion '{f.name}' does not reference 'speckit.{stem}'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 7. PowerShell script variant
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_scaffold_powershell_variant(agent, scaffolded_ps, source_template_stems):
|
||||
"""scaffold_from_core_pack with script_type='ps' creates correct files."""
|
||||
project = scaffolded_ps(agent)
|
||||
|
||||
scripts_dir = project / ".specify" / "scripts" / "powershell"
|
||||
assert scripts_dir.is_dir(), f".specify/scripts/powershell/ missing for '{agent}'"
|
||||
assert any(scripts_dir.iterdir()), ".specify/scripts/powershell/ is empty"
|
||||
|
||||
cmd_dir = _expected_cmd_dir(project, agent)
|
||||
generated = _list_command_files(cmd_dir, agent)
|
||||
assert len(generated) == len(source_template_stems)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 8. Parity: bundled vs. real create-release-packages.sh ZIP
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def release_script_trees(tmp_path_factory):
|
||||
"""Session-scoped cache: run release script once per (agent, script_type)."""
|
||||
cache: dict[tuple[str, str], dict[str, bytes]] = {}
|
||||
bash = _find_bash()
|
||||
|
||||
def _get(agent: str, script_type: str) -> dict[str, bytes] | None:
|
||||
if bash is None:
|
||||
return None
|
||||
key = (agent, script_type)
|
||||
if key not in cache:
|
||||
tmp = tmp_path_factory.mktemp(f"release_{agent}_{script_type}")
|
||||
gen_dir = tmp / "genreleases"
|
||||
gen_dir.mkdir()
|
||||
zip_path = _run_release_script(agent, script_type, bash, gen_dir)
|
||||
extracted = tmp / "extracted"
|
||||
extracted.mkdir()
|
||||
with zipfile.ZipFile(zip_path) as zf:
|
||||
zf.extractall(extracted)
|
||||
cache[key] = _collect_relative_files(extracted)
|
||||
return cache[key]
|
||||
return _get
|
||||
|
||||
|
||||
@pytest.mark.parametrize("script_type", ["sh", "ps"])
|
||||
@pytest.mark.parametrize("agent", _TESTABLE_AGENTS)
|
||||
def test_parity_bundled_vs_release_script(agent, script_type, scaffolded_sh, scaffolded_ps, release_script_trees):
|
||||
"""scaffold_from_core_pack() file tree is identical to the ZIP produced by
|
||||
create-release-packages.sh for every agent and script type.
|
||||
|
||||
This is the true end-to-end parity check: the Python offline path must
|
||||
produce exactly the same artifacts as the canonical shell release script.
|
||||
|
||||
Both sides are session-cached: each agent/script_type combination is
|
||||
scaffolded and release-scripted only once across all tests.
|
||||
"""
|
||||
script_tree = release_script_trees(agent, script_type)
|
||||
if script_tree is None:
|
||||
pytest.skip("bash required to run create-release-packages.sh")
|
||||
|
||||
# Reuse session-cached scaffold output
|
||||
if script_type == "sh":
|
||||
bundled_dir = scaffolded_sh(agent)
|
||||
else:
|
||||
bundled_dir = scaffolded_ps(agent)
|
||||
|
||||
bundled_tree = _collect_relative_files(bundled_dir)
|
||||
|
||||
only_bundled = set(bundled_tree) - set(script_tree)
|
||||
only_script = set(script_tree) - set(bundled_tree)
|
||||
|
||||
assert not only_bundled, (
|
||||
f"Agent '{agent}' ({script_type}): files only in bundled output (not in release ZIP):\n "
|
||||
+ "\n ".join(sorted(only_bundled))
|
||||
)
|
||||
assert not only_script, (
|
||||
f"Agent '{agent}' ({script_type}): files only in release ZIP (not in bundled output):\n "
|
||||
+ "\n ".join(sorted(only_script))
|
||||
)
|
||||
|
||||
for name in bundled_tree:
|
||||
assert bundled_tree[name] == script_tree[name], (
|
||||
f"Agent '{agent}' ({script_type}): file '{name}' content differs between "
|
||||
f"bundled output and release script ZIP"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Section 10 – pyproject.toml force-include covers all template files
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def test_pyproject_force_include_covers_all_templates():
|
||||
"""Every file in templates/ (excluding commands/) must be listed in
|
||||
pyproject.toml's [tool.hatch.build.targets.wheel.force-include] section.
|
||||
|
||||
This prevents new template files from being silently omitted from the
|
||||
wheel, which would break ``specify init --offline``.
|
||||
"""
|
||||
templates_dir = _REPO_ROOT / "templates"
|
||||
# Collect all files directly in templates/ (not in subdirectories like commands/)
|
||||
repo_template_files = sorted(
|
||||
f.name for f in templates_dir.iterdir()
|
||||
if f.is_file()
|
||||
)
|
||||
assert repo_template_files, "Expected at least one template file in templates/"
|
||||
|
||||
pyproject_path = _REPO_ROOT / "pyproject.toml"
|
||||
with open(pyproject_path, "rb") as f:
|
||||
pyproject = tomllib.load(f)
|
||||
force_include = pyproject.get("tool", {}).get("hatch", {}).get("build", {}).get("targets", {}).get("wheel", {}).get("force-include", {})
|
||||
|
||||
missing = [
|
||||
name for name in repo_template_files
|
||||
if f"templates/{name}" not in force_include
|
||||
]
|
||||
assert not missing, (
|
||||
"Template files not listed in pyproject.toml force-include "
|
||||
"(offline scaffolding will miss them):\n "
|
||||
+ "\n ".join(missing)
|
||||
)
|
||||
@@ -26,7 +26,6 @@ from specify_cli.extensions import (
|
||||
ExtensionError,
|
||||
ValidationError,
|
||||
CompatibilityError,
|
||||
normalize_priority,
|
||||
version_satisfies,
|
||||
)
|
||||
|
||||
@@ -122,57 +121,6 @@ def project_dir(temp_dir):
|
||||
return proj_dir
|
||||
|
||||
|
||||
# ===== normalize_priority Tests =====
|
||||
|
||||
class TestNormalizePriority:
|
||||
"""Test normalize_priority helper function."""
|
||||
|
||||
def test_valid_integer(self):
|
||||
"""Test with valid integer priority."""
|
||||
assert normalize_priority(5) == 5
|
||||
assert normalize_priority(1) == 1
|
||||
assert normalize_priority(100) == 100
|
||||
|
||||
def test_valid_string_number(self):
|
||||
"""Test with string that can be converted to int."""
|
||||
assert normalize_priority("5") == 5
|
||||
assert normalize_priority("10") == 10
|
||||
|
||||
def test_zero_returns_default(self):
|
||||
"""Test that zero priority returns default."""
|
||||
assert normalize_priority(0) == 10
|
||||
assert normalize_priority(0, default=5) == 5
|
||||
|
||||
def test_negative_returns_default(self):
|
||||
"""Test that negative priority returns default."""
|
||||
assert normalize_priority(-1) == 10
|
||||
assert normalize_priority(-100, default=5) == 5
|
||||
|
||||
def test_none_returns_default(self):
|
||||
"""Test that None returns default."""
|
||||
assert normalize_priority(None) == 10
|
||||
assert normalize_priority(None, default=5) == 5
|
||||
|
||||
def test_invalid_string_returns_default(self):
|
||||
"""Test that non-numeric string returns default."""
|
||||
assert normalize_priority("invalid") == 10
|
||||
assert normalize_priority("abc", default=5) == 5
|
||||
|
||||
def test_float_truncates(self):
|
||||
"""Test that float is truncated to int."""
|
||||
assert normalize_priority(5.9) == 5
|
||||
assert normalize_priority(3.1) == 3
|
||||
|
||||
def test_empty_string_returns_default(self):
|
||||
"""Test that empty string returns default."""
|
||||
assert normalize_priority("") == 10
|
||||
|
||||
def test_custom_default(self):
|
||||
"""Test custom default value."""
|
||||
assert normalize_priority(None, default=20) == 20
|
||||
assert normalize_priority("invalid", default=1) == 1
|
||||
|
||||
|
||||
# ===== ExtensionManifest Tests =====
|
||||
|
||||
class TestExtensionManifest:
|
||||
@@ -420,48 +368,6 @@ class TestExtensionRegistry:
|
||||
assert registry.is_installed("test-ext")
|
||||
assert registry.get("test-ext")["version"] == "1.0.0"
|
||||
|
||||
def test_restore_rejects_none_metadata(self, temp_dir):
|
||||
"""Test restore() raises ValueError for None metadata."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
|
||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
||||
registry.restore("test-ext", None)
|
||||
|
||||
def test_restore_rejects_non_dict_metadata(self, temp_dir):
|
||||
"""Test restore() raises ValueError for non-dict metadata."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
|
||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
||||
registry.restore("test-ext", "not-a-dict")
|
||||
|
||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
||||
registry.restore("test-ext", ["list", "not", "dict"])
|
||||
|
||||
def test_restore_uses_deep_copy(self, temp_dir):
|
||||
"""Test restore() deep copies metadata to prevent mutation."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
|
||||
original_metadata = {
|
||||
"version": "1.0.0",
|
||||
"nested": {"key": "original"},
|
||||
}
|
||||
registry.restore("test-ext", original_metadata)
|
||||
|
||||
# Mutate the original metadata after restore
|
||||
original_metadata["version"] = "MUTATED"
|
||||
original_metadata["nested"]["key"] = "MUTATED"
|
||||
|
||||
# Registry should have the original values
|
||||
stored = registry.get("test-ext")
|
||||
assert stored["version"] == "1.0.0"
|
||||
assert stored["nested"]["key"] == "original"
|
||||
|
||||
def test_get_returns_deep_copy(self, temp_dir):
|
||||
"""Test that get() returns deep copies for nested structures."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
@@ -481,26 +387,6 @@ class TestExtensionRegistry:
|
||||
internal = registry.data["extensions"]["test-ext"]
|
||||
assert internal["registered_commands"] == {"claude": ["cmd1"]}
|
||||
|
||||
def test_get_returns_none_for_corrupted_entry(self, temp_dir):
|
||||
"""Test that get() returns None for corrupted (non-dict) entries."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
|
||||
# Directly corrupt the registry with non-dict entries
|
||||
registry.data["extensions"]["corrupted-string"] = "not a dict"
|
||||
registry.data["extensions"]["corrupted-list"] = ["not", "a", "dict"]
|
||||
registry.data["extensions"]["corrupted-int"] = 42
|
||||
registry._save()
|
||||
|
||||
# All corrupted entries should return None
|
||||
assert registry.get("corrupted-string") is None
|
||||
assert registry.get("corrupted-list") is None
|
||||
assert registry.get("corrupted-int") is None
|
||||
# Non-existent should also return None
|
||||
assert registry.get("nonexistent") is None
|
||||
|
||||
def test_list_returns_deep_copy(self, temp_dir):
|
||||
"""Test that list() returns deep copies for nested structures."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
@@ -520,20 +406,6 @@ class TestExtensionRegistry:
|
||||
internal = registry.data["extensions"]["test-ext"]
|
||||
assert internal["registered_commands"] == {"claude": ["cmd1"]}
|
||||
|
||||
def test_list_returns_empty_dict_for_corrupted_registry(self, temp_dir):
|
||||
"""Test that list() returns empty dict when extensions is not a dict."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
|
||||
# Corrupt the registry - extensions is a list instead of dict
|
||||
registry.data["extensions"] = ["not", "a", "dict"]
|
||||
registry._save()
|
||||
|
||||
# list() should return empty dict, not crash
|
||||
result = registry.list()
|
||||
assert result == {}
|
||||
|
||||
|
||||
# ===== ExtensionManager Tests =====
|
||||
|
||||
@@ -665,19 +537,9 @@ class TestCommandRegistrar:
|
||||
assert "q" not in CommandRegistrar.AGENT_CONFIGS
|
||||
|
||||
def test_codex_agent_config_present(self):
|
||||
"""Codex should be mapped to .agents/skills."""
|
||||
"""Codex should be mapped to .codex/prompts."""
|
||||
assert "codex" in CommandRegistrar.AGENT_CONFIGS
|
||||
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".agents/skills"
|
||||
assert CommandRegistrar.AGENT_CONFIGS["codex"]["extension"] == "/SKILL.md"
|
||||
|
||||
def test_pi_agent_config_present(self):
|
||||
"""Pi should be mapped to .pi/prompts."""
|
||||
assert "pi" in CommandRegistrar.AGENT_CONFIGS
|
||||
cfg = CommandRegistrar.AGENT_CONFIGS["pi"]
|
||||
assert cfg["dir"] == ".pi/prompts"
|
||||
assert cfg["format"] == "markdown"
|
||||
assert cfg["args"] == "$ARGUMENTS"
|
||||
assert cfg["extension"] == ".md"
|
||||
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".codex/prompts"
|
||||
|
||||
def test_qwen_agent_config_is_markdown(self):
|
||||
"""Qwen should use Markdown format with $ARGUMENTS (not TOML)."""
|
||||
@@ -718,21 +580,6 @@ $ARGUMENTS
|
||||
assert frontmatter == {}
|
||||
assert body == content
|
||||
|
||||
def test_parse_frontmatter_non_mapping_returns_empty_dict(self):
|
||||
"""Non-mapping YAML frontmatter should not crash downstream renderers."""
|
||||
content = """---
|
||||
- item1
|
||||
- item2
|
||||
---
|
||||
|
||||
# Command body
|
||||
"""
|
||||
registrar = CommandRegistrar()
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
|
||||
assert frontmatter == {}
|
||||
assert "Command body" in body
|
||||
|
||||
def test_render_frontmatter(self):
|
||||
"""Test rendering frontmatter to YAML."""
|
||||
frontmatter = {
|
||||
@@ -824,299 +671,6 @@ $ARGUMENTS
|
||||
assert (claude_dir / "speckit.alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.shortcut.md").exists()
|
||||
|
||||
def test_unregister_commands_for_codex_skills_uses_mapped_names(self, project_dir):
|
||||
"""Codex skill cleanup should use the same mapped names as registration."""
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
(skills_dir / "speckit-specify").mkdir(parents=True)
|
||||
(skills_dir / "speckit-specify" / "SKILL.md").write_text("body")
|
||||
(skills_dir / "speckit-shortcut").mkdir(parents=True)
|
||||
(skills_dir / "speckit-shortcut" / "SKILL.md").write_text("body")
|
||||
|
||||
registrar = CommandRegistrar()
|
||||
registrar.unregister_commands(
|
||||
{"codex": ["speckit.specify", "speckit.shortcut"]},
|
||||
project_dir,
|
||||
)
|
||||
|
||||
assert not (skills_dir / "speckit-specify" / "SKILL.md").exists()
|
||||
assert not (skills_dir / "speckit-shortcut" / "SKILL.md").exists()
|
||||
|
||||
def test_register_commands_for_all_agents_distinguishes_codex_from_amp(self, extension_dir, project_dir):
|
||||
"""A Codex project under .agents/skills should not implicitly activate Amp."""
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(extension_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registered = registrar.register_commands_for_all_agents(manifest, extension_dir, project_dir)
|
||||
|
||||
assert "codex" in registered
|
||||
assert "amp" not in registered
|
||||
assert not (project_dir / ".agents" / "commands").exists()
|
||||
|
||||
def test_codex_skill_registration_writes_skill_frontmatter(self, extension_dir, project_dir):
|
||||
"""Codex SKILL.md output should use skills-oriented frontmatter."""
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(extension_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, extension_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test.hello" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert "name: speckit-test.hello" in content
|
||||
assert "description: Test hello command" in content
|
||||
assert "compatibility:" in content
|
||||
assert "metadata:" in content
|
||||
assert "source: test-ext:commands/hello.md" in content
|
||||
assert "<!-- Extension:" not in content
|
||||
|
||||
def test_codex_skill_registration_resolves_script_placeholders(self, project_dir, temp_dir):
|
||||
"""Codex SKILL.md overrides should resolve script placeholders."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "ext-scripted"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-scripted",
|
||||
"name": "Scripted Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.plan",
|
||||
"file": "commands/plan.md",
|
||||
"description": "Scripted command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"""---
|
||||
description: "Scripted command"
|
||||
scripts:
|
||||
sh: ../../scripts/bash/setup-plan.sh --json "{ARGS}"
|
||||
ps: ../../scripts/powershell/setup-plan.ps1 -Json
|
||||
agent_scripts:
|
||||
sh: ../../scripts/bash/update-agent-context.sh __AGENT__
|
||||
ps: ../../scripts/powershell/update-agent-context.ps1 -AgentType __AGENT__
|
||||
---
|
||||
|
||||
Run {SCRIPT}
|
||||
Then {AGENT_SCRIPT}
|
||||
Agent __AGENT__
|
||||
"""
|
||||
)
|
||||
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text('{"ai":"codex","ai_skills":true,"script":"sh"}')
|
||||
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test.plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "{AGENT_SCRIPT}" not in content
|
||||
assert "__AGENT__" not in content
|
||||
assert "{ARGS}" not in content
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert ".specify/scripts/bash/update-agent-context.sh codex" in content
|
||||
|
||||
def test_codex_skill_alias_frontmatter_matches_alias_name(self, project_dir, temp_dir):
|
||||
"""Codex alias skills should render their own matching `name:` frontmatter."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "ext-alias-skill"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-alias-skill",
|
||||
"name": "Alias Skill Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Alias skill\n---\n\nBody\n")
|
||||
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
primary = skills_dir / "speckit-alias.cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-shortcut" / "SKILL.md"
|
||||
|
||||
assert primary.exists()
|
||||
assert alias.exists()
|
||||
assert "name: speckit-alias.cmd" in primary.read_text()
|
||||
assert "name: speckit-shortcut" in alias.read_text()
|
||||
|
||||
def test_codex_skill_registration_uses_fallback_script_variant_without_init_options(
|
||||
self, project_dir, temp_dir
|
||||
):
|
||||
"""Codex placeholder substitution should still work without init-options.json."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "ext-script-fallback"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-script-fallback",
|
||||
"name": "Script fallback",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.fallback.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"""---
|
||||
description: "Fallback scripted command"
|
||||
scripts:
|
||||
sh: ../../scripts/bash/setup-plan.sh --json "{ARGS}"
|
||||
ps: ../../scripts/powershell/setup-plan.ps1 -Json
|
||||
agent_scripts:
|
||||
sh: ../../scripts/bash/update-agent-context.sh __AGENT__
|
||||
---
|
||||
|
||||
Run {SCRIPT}
|
||||
Then {AGENT_SCRIPT}
|
||||
"""
|
||||
)
|
||||
|
||||
# Intentionally do NOT create .specify/init-options.json
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-fallback.plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "{AGENT_SCRIPT}" not in content
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert ".specify/scripts/bash/update-agent-context.sh codex" in content
|
||||
|
||||
def test_codex_skill_registration_fallback_prefers_powershell_on_windows(
|
||||
self, project_dir, temp_dir, monkeypatch
|
||||
):
|
||||
"""Without init metadata, Windows fallback should prefer ps scripts over sh."""
|
||||
import yaml
|
||||
|
||||
monkeypatch.setattr("specify_cli.agents.platform.system", lambda: "Windows")
|
||||
|
||||
ext_dir = temp_dir / "ext-script-windows-fallback"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-script-windows-fallback",
|
||||
"name": "Script fallback windows",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.windows.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"""---
|
||||
description: "Windows fallback scripted command"
|
||||
scripts:
|
||||
sh: ../../scripts/bash/setup-plan.sh --json "{ARGS}"
|
||||
ps: ../../scripts/powershell/setup-plan.ps1 -Json
|
||||
agent_scripts:
|
||||
sh: ../../scripts/bash/update-agent-context.sh __AGENT__
|
||||
ps: ../../scripts/powershell/update-agent-context.ps1 -AgentType __AGENT__
|
||||
---
|
||||
|
||||
Run {SCRIPT}
|
||||
Then {AGENT_SCRIPT}
|
||||
"""
|
||||
)
|
||||
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-windows.plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert ".specify/scripts/powershell/setup-plan.ps1 -Json" in content
|
||||
assert ".specify/scripts/powershell/update-agent-context.ps1 -AgentType codex" in content
|
||||
assert ".specify/scripts/bash/setup-plan.sh" not in content
|
||||
|
||||
def test_register_commands_for_copilot(self, extension_dir, project_dir):
|
||||
"""Test registering commands for Copilot agent with .agent.md extension."""
|
||||
# Create .github/agents directory (Copilot project)
|
||||
@@ -2783,439 +2337,3 @@ class TestExtensionUpdateCLI:
|
||||
|
||||
for cmd_file in command_files:
|
||||
assert cmd_file.exists(), f"Expected command file to be restored after rollback: {cmd_file}"
|
||||
|
||||
|
||||
class TestExtensionListCLI:
|
||||
"""Test extension list CLI output format."""
|
||||
|
||||
def test_list_shows_extension_id(self, extension_dir, project_dir):
|
||||
"""extension list should display the extension ID."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install the extension using the manager
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "list"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
# Verify the extension ID is shown in the output
|
||||
assert "test-ext" in result.output
|
||||
# Verify name and version are also shown
|
||||
assert "Test Extension" in result.output
|
||||
assert "1.0.0" in result.output
|
||||
|
||||
|
||||
class TestExtensionPriority:
|
||||
"""Test extension priority-based resolution."""
|
||||
|
||||
def test_list_by_priority_empty(self, temp_dir):
|
||||
"""Test list_by_priority on empty registry."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
result = registry.list_by_priority()
|
||||
|
||||
assert result == []
|
||||
|
||||
def test_list_by_priority_single(self, temp_dir):
|
||||
"""Test list_by_priority with single extension."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.add("test-ext", {"version": "1.0.0", "priority": 5})
|
||||
|
||||
result = registry.list_by_priority()
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "test-ext"
|
||||
assert result[0][1]["priority"] == 5
|
||||
|
||||
def test_list_by_priority_ordering(self, temp_dir):
|
||||
"""Test list_by_priority returns extensions sorted by priority."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
# Add in non-priority order
|
||||
registry.add("ext-low", {"version": "1.0.0", "priority": 20})
|
||||
registry.add("ext-high", {"version": "1.0.0", "priority": 1})
|
||||
registry.add("ext-mid", {"version": "1.0.0", "priority": 10})
|
||||
|
||||
result = registry.list_by_priority()
|
||||
|
||||
assert len(result) == 3
|
||||
# Lower priority number = higher precedence (first)
|
||||
assert result[0][0] == "ext-high"
|
||||
assert result[1][0] == "ext-mid"
|
||||
assert result[2][0] == "ext-low"
|
||||
|
||||
def test_list_by_priority_default(self, temp_dir):
|
||||
"""Test list_by_priority uses default priority of 10."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
# Add without explicit priority
|
||||
registry.add("ext-default", {"version": "1.0.0"})
|
||||
registry.add("ext-high", {"version": "1.0.0", "priority": 1})
|
||||
registry.add("ext-low", {"version": "1.0.0", "priority": 20})
|
||||
|
||||
result = registry.list_by_priority()
|
||||
|
||||
assert len(result) == 3
|
||||
# ext-high (1), ext-default (10), ext-low (20)
|
||||
assert result[0][0] == "ext-high"
|
||||
assert result[1][0] == "ext-default"
|
||||
assert result[2][0] == "ext-low"
|
||||
|
||||
def test_list_by_priority_invalid_priority_defaults(self, temp_dir):
|
||||
"""Malformed priority values fall back to the default priority."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.add("ext-high", {"version": "1.0.0", "priority": 1})
|
||||
registry.data["extensions"]["ext-invalid"] = {
|
||||
"version": "1.0.0",
|
||||
"priority": "high",
|
||||
}
|
||||
registry._save()
|
||||
|
||||
result = registry.list_by_priority()
|
||||
|
||||
assert [item[0] for item in result] == ["ext-high", "ext-invalid"]
|
||||
assert result[1][1]["priority"] == 10
|
||||
|
||||
def test_list_by_priority_excludes_disabled(self, temp_dir):
|
||||
"""Test that list_by_priority excludes disabled extensions by default."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.add("ext-enabled", {"version": "1.0.0", "enabled": True, "priority": 5})
|
||||
registry.add("ext-disabled", {"version": "1.0.0", "enabled": False, "priority": 1})
|
||||
registry.add("ext-default", {"version": "1.0.0", "priority": 10}) # no enabled field = True
|
||||
|
||||
# Default: exclude disabled
|
||||
by_priority = registry.list_by_priority()
|
||||
ext_ids = [p[0] for p in by_priority]
|
||||
assert "ext-enabled" in ext_ids
|
||||
assert "ext-default" in ext_ids
|
||||
assert "ext-disabled" not in ext_ids
|
||||
|
||||
def test_list_by_priority_includes_disabled_when_requested(self, temp_dir):
|
||||
"""Test that list_by_priority includes disabled extensions when requested."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.add("ext-enabled", {"version": "1.0.0", "enabled": True, "priority": 5})
|
||||
registry.add("ext-disabled", {"version": "1.0.0", "enabled": False, "priority": 1})
|
||||
|
||||
# Include disabled
|
||||
by_priority = registry.list_by_priority(include_disabled=True)
|
||||
ext_ids = [p[0] for p in by_priority]
|
||||
assert "ext-enabled" in ext_ids
|
||||
assert "ext-disabled" in ext_ids
|
||||
# Disabled ext has lower priority number, so it comes first when included
|
||||
assert ext_ids[0] == "ext-disabled"
|
||||
|
||||
def test_install_with_priority(self, extension_dir, project_dir):
|
||||
"""Test that install_from_directory stores priority."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False, priority=5)
|
||||
|
||||
metadata = manager.registry.get("test-ext")
|
||||
assert metadata["priority"] == 5
|
||||
|
||||
def test_install_default_priority(self, extension_dir, project_dir):
|
||||
"""Test that install_from_directory uses default priority of 10."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
metadata = manager.registry.get("test-ext")
|
||||
assert metadata["priority"] == 10
|
||||
|
||||
def test_list_installed_includes_priority(self, extension_dir, project_dir):
|
||||
"""Test that list_installed includes priority in returned data."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False, priority=3)
|
||||
|
||||
installed = manager.list_installed()
|
||||
|
||||
assert len(installed) == 1
|
||||
assert installed[0]["priority"] == 3
|
||||
|
||||
def test_priority_preserved_on_update(self, temp_dir):
|
||||
"""Test that registry update preserves priority."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.add("test-ext", {"version": "1.0.0", "priority": 5, "enabled": True})
|
||||
|
||||
# Update with new metadata (no priority specified)
|
||||
registry.update("test-ext", {"enabled": False})
|
||||
|
||||
updated = registry.get("test-ext")
|
||||
assert updated["priority"] == 5 # Preserved
|
||||
assert updated["enabled"] is False # Updated
|
||||
|
||||
def test_corrupted_extension_entry_not_picked_up_as_unregistered(self, project_dir):
|
||||
"""Corrupted registry entries are still tracked and NOT picked up as unregistered."""
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
|
||||
valid_dir = extensions_dir / "valid-ext" / "templates"
|
||||
valid_dir.mkdir(parents=True)
|
||||
(valid_dir / "other-template.md").write_text("# Valid\n")
|
||||
|
||||
broken_dir = extensions_dir / "broken-ext" / "templates"
|
||||
broken_dir.mkdir(parents=True)
|
||||
(broken_dir / "target-template.md").write_text("# Broken Target\n")
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.add("valid-ext", {"version": "1.0.0", "priority": 10})
|
||||
# Corrupt the entry - should still be tracked, not picked up as unregistered
|
||||
registry.data["extensions"]["broken-ext"] = "corrupted"
|
||||
registry._save()
|
||||
|
||||
from specify_cli.presets import PresetResolver
|
||||
|
||||
resolver = PresetResolver(project_dir)
|
||||
# Corrupted extension templates should NOT be resolved
|
||||
resolved = resolver.resolve("target-template")
|
||||
assert resolved is None
|
||||
|
||||
# Valid extension template should still resolve
|
||||
valid_resolved = resolver.resolve("other-template")
|
||||
assert valid_resolved is not None
|
||||
assert "Valid" in valid_resolved.read_text()
|
||||
|
||||
|
||||
class TestExtensionPriorityCLI:
|
||||
"""Test extension priority CLI integration."""
|
||||
|
||||
def test_add_with_priority_option(self, extension_dir, project_dir):
|
||||
"""Test extension add command with --priority option."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, [
|
||||
"extension", "add", str(extension_dir), "--dev", "--priority", "3"
|
||||
])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
metadata = manager.registry.get("test-ext")
|
||||
assert metadata["priority"] == 3
|
||||
|
||||
def test_list_shows_priority(self, extension_dir, project_dir):
|
||||
"""Test extension list shows priority."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install extension with priority
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False, priority=7)
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "list"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "Priority: 7" in result.output
|
||||
|
||||
def test_set_priority_changes_priority(self, extension_dir, project_dir):
|
||||
"""Test set-priority command changes extension priority."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install extension with default priority
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
# Verify default priority
|
||||
assert manager.registry.get("test-ext")["priority"] == 10
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "set-priority", "test-ext", "5"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "priority changed: 10 → 5" in result.output
|
||||
|
||||
# Reload registry to see updated value
|
||||
manager2 = ExtensionManager(project_dir)
|
||||
assert manager2.registry.get("test-ext")["priority"] == 5
|
||||
|
||||
def test_set_priority_same_value_no_change(self, extension_dir, project_dir):
|
||||
"""Test set-priority with same value shows already set message."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install extension with priority 5
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False, priority=5)
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "set-priority", "test-ext", "5"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "already has priority 5" in result.output
|
||||
|
||||
def test_set_priority_invalid_value(self, extension_dir, project_dir):
|
||||
"""Test set-priority rejects invalid priority values."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install extension
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "set-priority", "test-ext", "0"])
|
||||
|
||||
assert result.exit_code == 1, result.output
|
||||
assert "Priority must be a positive integer" in result.output
|
||||
|
||||
def test_set_priority_not_installed(self, project_dir):
|
||||
"""Test set-priority fails for non-installed extension."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Ensure .specify exists
|
||||
(project_dir / ".specify").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "set-priority", "nonexistent", "5"])
|
||||
|
||||
assert result.exit_code == 1, result.output
|
||||
assert "not installed" in result.output.lower() or "no extensions installed" in result.output.lower()
|
||||
|
||||
def test_set_priority_by_display_name(self, extension_dir, project_dir):
|
||||
"""Test set-priority works with extension display name."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install extension
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
# Use display name "Test Extension" instead of ID "test-ext"
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["extension", "set-priority", "Test Extension", "3"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "priority changed" in result.output
|
||||
|
||||
# Reload registry to see updated value
|
||||
manager2 = ExtensionManager(project_dir)
|
||||
assert manager2.registry.get("test-ext")["priority"] == 3
|
||||
|
||||
|
||||
class TestExtensionPriorityBackwardsCompatibility:
|
||||
"""Test backwards compatibility for extensions installed before priority feature."""
|
||||
|
||||
def test_legacy_extension_without_priority_field(self, temp_dir):
|
||||
"""Extensions installed before priority feature should default to 10."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
# Simulate legacy registry entry without priority field
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
registry.data["extensions"]["legacy-ext"] = {
|
||||
"version": "1.0.0",
|
||||
"source": "local",
|
||||
"enabled": True,
|
||||
"installed_at": "2025-01-01T00:00:00Z",
|
||||
# No "priority" field - simulates pre-feature extension
|
||||
}
|
||||
registry._save()
|
||||
|
||||
# Reload registry
|
||||
registry2 = ExtensionRegistry(extensions_dir)
|
||||
|
||||
# list_by_priority should use default of 10
|
||||
result = registry2.list_by_priority()
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "legacy-ext"
|
||||
# Priority defaults to 10 and is normalized in returned metadata
|
||||
assert result[0][1]["priority"] == 10
|
||||
|
||||
def test_legacy_extension_in_list_installed(self, extension_dir, project_dir):
|
||||
"""list_installed returns priority=10 for legacy extensions without priority field."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
|
||||
# Install extension normally
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
# Manually remove priority to simulate legacy extension
|
||||
ext_data = manager.registry.data["extensions"]["test-ext"]
|
||||
del ext_data["priority"]
|
||||
manager.registry._save()
|
||||
|
||||
# list_installed should still return priority=10
|
||||
installed = manager.list_installed()
|
||||
assert len(installed) == 1
|
||||
assert installed[0]["priority"] == 10
|
||||
|
||||
def test_mixed_legacy_and_new_extensions_ordering(self, temp_dir):
|
||||
"""Legacy extensions (no priority) sort with default=10 among prioritized extensions."""
|
||||
extensions_dir = temp_dir / "extensions"
|
||||
extensions_dir.mkdir()
|
||||
|
||||
registry = ExtensionRegistry(extensions_dir)
|
||||
|
||||
# Add extension with explicit priority=5
|
||||
registry.add("ext-with-priority", {"version": "1.0.0", "priority": 5})
|
||||
|
||||
# Add legacy extension without priority (manually)
|
||||
registry.data["extensions"]["legacy-ext"] = {
|
||||
"version": "1.0.0",
|
||||
"source": "local",
|
||||
"enabled": True,
|
||||
# No priority field
|
||||
}
|
||||
registry._save()
|
||||
|
||||
# Add extension with priority=15
|
||||
registry.add("ext-low-priority", {"version": "1.0.0", "priority": 15})
|
||||
|
||||
# Reload and check ordering
|
||||
registry2 = ExtensionRegistry(extensions_dir)
|
||||
result = registry2.list_by_priority()
|
||||
|
||||
assert len(result) == 3
|
||||
# Order: ext-with-priority (5), legacy-ext (defaults to 10), ext-low-priority (15)
|
||||
assert result[0][0] == "ext-with-priority"
|
||||
assert result[1][0] == "legacy-ext"
|
||||
assert result[2][0] == "ext-low-priority"
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
import stat
|
||||
|
||||
from specify_cli import merge_json_files
|
||||
from specify_cli import handle_vscode_settings
|
||||
|
||||
# --- Dimension 2: Polite Deep Merge Strategy ---
|
||||
|
||||
def test_merge_json_files_type_mismatch_preservation(tmp_path):
|
||||
"""If user has a string but template wants a dict, PRESERVE user's string."""
|
||||
existing_file = tmp_path / "settings.json"
|
||||
# User might have overridden a setting with a simple string or different type
|
||||
existing_file.write_text('{"chat.editor.fontFamily": "CustomFont"}')
|
||||
|
||||
# Template might expect a dict for the same key (hypothetically)
|
||||
new_settings = {
|
||||
"chat.editor.fontFamily": {"font": "TemplateFont"}
|
||||
}
|
||||
|
||||
merged = merge_json_files(existing_file, new_settings)
|
||||
# Result is None because user settings were preserved and nothing else changed
|
||||
assert merged is None
|
||||
|
||||
def test_merge_json_files_deep_nesting(tmp_path):
|
||||
"""Verify deep recursive merging of new keys."""
|
||||
existing_file = tmp_path / "settings.json"
|
||||
existing_file.write_text("""
|
||||
{
|
||||
"a": {
|
||||
"b": {
|
||||
"c": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
""")
|
||||
|
||||
new_settings = {
|
||||
"a": {
|
||||
"b": {
|
||||
"d": 2 # New nested key
|
||||
},
|
||||
"e": 3 # New mid-level key
|
||||
}
|
||||
}
|
||||
|
||||
merged = merge_json_files(existing_file, new_settings)
|
||||
assert merged["a"]["b"]["c"] == 1
|
||||
assert merged["a"]["b"]["d"] == 2
|
||||
assert merged["a"]["e"] == 3
|
||||
|
||||
def test_merge_json_files_empty_existing(tmp_path):
|
||||
"""Merging into an empty/new file."""
|
||||
existing_file = tmp_path / "empty.json"
|
||||
existing_file.write_text("{}")
|
||||
|
||||
new_settings = {"a": 1}
|
||||
merged = merge_json_files(existing_file, new_settings)
|
||||
assert merged == {"a": 1}
|
||||
|
||||
# --- Dimension 3: Real-world Simulation ---
|
||||
|
||||
def test_merge_vscode_realistic_scenario(tmp_path):
|
||||
"""A realistic VSCode settings.json with many existing preferences, comments, and trailing commas."""
|
||||
existing_file = tmp_path / "vscode_settings.json"
|
||||
existing_file.write_text("""
|
||||
{
|
||||
"editor.fontSize": 12,
|
||||
"editor.formatOnSave": true, /* block comment */
|
||||
"files.exclude": {
|
||||
"**/.git": true,
|
||||
"**/node_modules": true,
|
||||
},
|
||||
"chat.promptFilesRecommendations": {
|
||||
"existing.tool": true,
|
||||
} // User comment
|
||||
}
|
||||
""")
|
||||
|
||||
template_settings = {
|
||||
"chat.promptFilesRecommendations": {
|
||||
"speckit.specify": True,
|
||||
"speckit.plan": True
|
||||
},
|
||||
"chat.tools.terminal.autoApprove": {
|
||||
".specify/scripts/bash/": True
|
||||
}
|
||||
}
|
||||
|
||||
merged = merge_json_files(existing_file, template_settings)
|
||||
|
||||
# Check preservation
|
||||
assert merged["editor.fontSize"] == 12
|
||||
assert merged["files.exclude"]["**/.git"] is True
|
||||
assert merged["chat.promptFilesRecommendations"]["existing.tool"] is True
|
||||
|
||||
# Check additions
|
||||
assert merged["chat.promptFilesRecommendations"]["speckit.specify"] is True
|
||||
assert merged["chat.tools.terminal.autoApprove"][".specify/scripts/bash/"] is True
|
||||
|
||||
# --- Dimension 4: Error Handling & Robustness ---
|
||||
|
||||
def test_merge_json_files_with_bom(tmp_path):
|
||||
"""Test files with UTF-8 BOM (sometimes created on Windows)."""
|
||||
existing_file = tmp_path / "bom.json"
|
||||
content = '{"a": 1}'
|
||||
# Prepend UTF-8 BOM
|
||||
existing_file.write_bytes(b'\xef\xbb\xbf' + content.encode('utf-8'))
|
||||
|
||||
new_settings = {"b": 2}
|
||||
merged = merge_json_files(existing_file, new_settings)
|
||||
assert merged == {"a": 1, "b": 2}
|
||||
|
||||
def test_merge_json_files_not_a_dictionary_template(tmp_path):
|
||||
"""If for some reason new_content is not a dict, PRESERVE existing settings by returning None."""
|
||||
existing_file = tmp_path / "ok.json"
|
||||
existing_file.write_text('{"a": 1}')
|
||||
|
||||
# Secure fallback: return None to skip writing and avoid clobbering
|
||||
assert merge_json_files(existing_file, ["not", "a", "dict"]) is None
|
||||
|
||||
def test_merge_json_files_unparseable_existing(tmp_path):
|
||||
"""If the existing file is unparseable JSON, return None to avoid overwriting it."""
|
||||
bad_file = tmp_path / "bad.json"
|
||||
bad_file.write_text('{"a": 1, missing_value}') # Invalid JSON
|
||||
|
||||
assert merge_json_files(bad_file, {"b": 2}) is None
|
||||
|
||||
|
||||
def test_merge_json_files_list_preservation(tmp_path):
|
||||
"""Verify that existing list values are preserved and NOT merged or overwritten."""
|
||||
existing_file = tmp_path / "list.json"
|
||||
existing_file.write_text('{"my.list": ["user_item"]}')
|
||||
|
||||
template_settings = {
|
||||
"my.list": ["template_item"]
|
||||
}
|
||||
|
||||
merged = merge_json_files(existing_file, template_settings)
|
||||
# The polite merge policy says: keep existing values if they exist and aren't both dicts.
|
||||
# Since nothing changed, it returns None.
|
||||
assert merged is None
|
||||
|
||||
def test_merge_json_files_no_changes(tmp_path):
|
||||
"""If the merge doesn't introduce any new keys or changes, return None to skip rewrite."""
|
||||
existing_file = tmp_path / "no_change.json"
|
||||
existing_file.write_text('{"a": 1, "b": {"c": 2}}')
|
||||
|
||||
template_settings = {
|
||||
"a": 1, # Already exists
|
||||
"b": {"c": 2} # Already exists nested
|
||||
}
|
||||
|
||||
# Should return None because result == existing
|
||||
assert merge_json_files(existing_file, template_settings) is None
|
||||
|
||||
def test_merge_json_files_type_mismatch_no_op(tmp_path):
|
||||
"""If a key exists with different type and we preserve it, it might still result in no change."""
|
||||
existing_file = tmp_path / "mismatch_no_op.json"
|
||||
existing_file.write_text('{"a": "user_string"}')
|
||||
|
||||
template_settings = {
|
||||
"a": {"key": "template_dict"} # Mismatch, will be ignored
|
||||
}
|
||||
|
||||
# Should return None because we preserved the user's string and nothing else changed
|
||||
assert merge_json_files(existing_file, template_settings) is None
|
||||
|
||||
|
||||
def test_handle_vscode_settings_preserves_mode_on_atomic_write(tmp_path):
|
||||
"""Atomic rewrite should preserve existing file mode bits."""
|
||||
vscode_dir = tmp_path / ".vscode"
|
||||
vscode_dir.mkdir()
|
||||
dest_file = vscode_dir / "settings.json"
|
||||
template_file = tmp_path / "template_settings.json"
|
||||
|
||||
dest_file.write_text('{"a": 1}\n', encoding="utf-8")
|
||||
dest_file.chmod(0o640)
|
||||
before_mode = stat.S_IMODE(dest_file.stat().st_mode)
|
||||
|
||||
template_file.write_text('{"b": 2}\n', encoding="utf-8")
|
||||
|
||||
handle_vscode_settings(
|
||||
template_file,
|
||||
dest_file,
|
||||
"settings.json",
|
||||
verbose=False,
|
||||
tracker=None,
|
||||
)
|
||||
|
||||
after_mode = stat.S_IMODE(dest_file.stat().st_mode)
|
||||
assert after_mode == before_mode
|
||||
@@ -32,7 +32,6 @@ from specify_cli.presets import (
|
||||
PresetCompatibilityError,
|
||||
VALID_PRESET_TEMPLATE_TYPES,
|
||||
)
|
||||
from specify_cli.extensions import ExtensionRegistry
|
||||
|
||||
|
||||
# ===== Fixtures =====
|
||||
@@ -369,172 +368,6 @@ class TestPresetRegistry:
|
||||
registry = PresetRegistry(packs_dir)
|
||||
assert registry.get("nonexistent") is None
|
||||
|
||||
def test_restore(self, temp_dir):
|
||||
"""Test restore() preserves timestamps exactly."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
# Create original entry with a specific timestamp
|
||||
original_metadata = {
|
||||
"version": "1.0.0",
|
||||
"source": "local",
|
||||
"installed_at": "2025-01-15T10:30:00+00:00",
|
||||
"enabled": True,
|
||||
}
|
||||
registry.restore("test-pack", original_metadata)
|
||||
|
||||
# Verify exact restoration
|
||||
restored = registry.get("test-pack")
|
||||
assert restored["installed_at"] == "2025-01-15T10:30:00+00:00"
|
||||
assert restored["version"] == "1.0.0"
|
||||
assert restored["enabled"] is True
|
||||
|
||||
def test_restore_rejects_none_metadata(self, temp_dir):
|
||||
"""Test restore() raises ValueError for None metadata."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
||||
registry.restore("test-pack", None)
|
||||
|
||||
def test_restore_rejects_non_dict_metadata(self, temp_dir):
|
||||
"""Test restore() raises ValueError for non-dict metadata."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
||||
registry.restore("test-pack", "not-a-dict")
|
||||
|
||||
with pytest.raises(ValueError, match="metadata must be a dict"):
|
||||
registry.restore("test-pack", ["list", "not", "dict"])
|
||||
|
||||
def test_restore_uses_deep_copy(self, temp_dir):
|
||||
"""Test restore() deep copies metadata to prevent mutation."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
original_metadata = {
|
||||
"version": "1.0.0",
|
||||
"nested": {"key": "original"},
|
||||
}
|
||||
registry.restore("test-pack", original_metadata)
|
||||
|
||||
# Mutate the original metadata after restore
|
||||
original_metadata["version"] = "MUTATED"
|
||||
original_metadata["nested"]["key"] = "MUTATED"
|
||||
|
||||
# Registry should have the original values
|
||||
stored = registry.get("test-pack")
|
||||
assert stored["version"] == "1.0.0"
|
||||
assert stored["nested"]["key"] == "original"
|
||||
|
||||
def test_get_returns_deep_copy(self, temp_dir):
|
||||
"""Test that get() returns a deep copy to prevent mutation."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
registry.add("test-pack", {"version": "1.0.0", "nested": {"key": "original"}})
|
||||
|
||||
# Get and mutate the returned copy
|
||||
metadata = registry.get("test-pack")
|
||||
metadata["version"] = "MUTATED"
|
||||
metadata["nested"]["key"] = "MUTATED"
|
||||
|
||||
# Original should be unchanged
|
||||
fresh = registry.get("test-pack")
|
||||
assert fresh["version"] == "1.0.0"
|
||||
assert fresh["nested"]["key"] == "original"
|
||||
|
||||
def test_get_returns_none_for_corrupted_entry(self, temp_dir):
|
||||
"""Test that get() returns None for corrupted (non-dict) entries."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
# Directly corrupt the registry with non-dict entries
|
||||
registry.data["presets"]["corrupted-string"] = "not a dict"
|
||||
registry.data["presets"]["corrupted-list"] = ["not", "a", "dict"]
|
||||
registry.data["presets"]["corrupted-int"] = 42
|
||||
registry._save()
|
||||
|
||||
# All corrupted entries should return None
|
||||
assert registry.get("corrupted-string") is None
|
||||
assert registry.get("corrupted-list") is None
|
||||
assert registry.get("corrupted-int") is None
|
||||
# Non-existent should also return None
|
||||
assert registry.get("nonexistent") is None
|
||||
|
||||
def test_list_returns_deep_copy(self, temp_dir):
|
||||
"""Test that list() returns deep copies to prevent mutation."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
registry.add("test-pack", {"version": "1.0.0", "nested": {"key": "original"}})
|
||||
|
||||
# Get list and mutate
|
||||
all_packs = registry.list()
|
||||
all_packs["test-pack"]["version"] = "MUTATED"
|
||||
all_packs["test-pack"]["nested"]["key"] = "MUTATED"
|
||||
|
||||
# Original should be unchanged
|
||||
fresh = registry.get("test-pack")
|
||||
assert fresh["version"] == "1.0.0"
|
||||
assert fresh["nested"]["key"] == "original"
|
||||
|
||||
def test_list_returns_empty_dict_for_corrupted_registry(self, temp_dir):
|
||||
"""Test that list() returns empty dict when presets is not a dict."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
# Corrupt the registry - presets is a list instead of dict
|
||||
registry.data["presets"] = ["not", "a", "dict"]
|
||||
registry._save()
|
||||
|
||||
# list() should return empty dict, not crash
|
||||
result = registry.list()
|
||||
assert result == {}
|
||||
|
||||
def test_list_by_priority_excludes_disabled(self, temp_dir):
|
||||
"""Test that list_by_priority excludes disabled presets by default."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
registry.add("pack-enabled", {"version": "1.0.0", "enabled": True, "priority": 5})
|
||||
registry.add("pack-disabled", {"version": "1.0.0", "enabled": False, "priority": 1})
|
||||
registry.add("pack-default", {"version": "1.0.0", "priority": 10}) # no enabled field = True
|
||||
|
||||
# Default: exclude disabled
|
||||
by_priority = registry.list_by_priority()
|
||||
pack_ids = [p[0] for p in by_priority]
|
||||
assert "pack-enabled" in pack_ids
|
||||
assert "pack-default" in pack_ids
|
||||
assert "pack-disabled" not in pack_ids
|
||||
|
||||
def test_list_by_priority_includes_disabled_when_requested(self, temp_dir):
|
||||
"""Test that list_by_priority includes disabled presets when requested."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
registry.add("pack-enabled", {"version": "1.0.0", "enabled": True, "priority": 5})
|
||||
registry.add("pack-disabled", {"version": "1.0.0", "enabled": False, "priority": 1})
|
||||
|
||||
# Include disabled
|
||||
by_priority = registry.list_by_priority(include_disabled=True)
|
||||
pack_ids = [p[0] for p in by_priority]
|
||||
assert "pack-enabled" in pack_ids
|
||||
assert "pack-disabled" in pack_ids
|
||||
# Disabled pack has lower priority number, so it comes first when included
|
||||
assert pack_ids[0] == "pack-disabled"
|
||||
|
||||
|
||||
# ===== PresetManager Tests =====
|
||||
|
||||
@@ -740,24 +573,6 @@ class TestRegistryPriority:
|
||||
assert sorted_packs[0][0] == "pack-b"
|
||||
assert sorted_packs[1][0] == "pack-a"
|
||||
|
||||
def test_list_by_priority_invalid_priority_defaults(self, temp_dir):
|
||||
"""Malformed priority values fall back to the default priority."""
|
||||
packs_dir = temp_dir / "packs"
|
||||
packs_dir.mkdir()
|
||||
registry = PresetRegistry(packs_dir)
|
||||
|
||||
registry.add("pack-high", {"version": "1.0.0", "priority": 1})
|
||||
registry.data["presets"]["pack-invalid"] = {
|
||||
"version": "1.0.0",
|
||||
"priority": "high",
|
||||
}
|
||||
registry._save()
|
||||
|
||||
sorted_packs = registry.list_by_priority()
|
||||
|
||||
assert [item[0] for item in sorted_packs] == ["pack-high", "pack-invalid"]
|
||||
assert sorted_packs[1][1]["priority"] == 10
|
||||
|
||||
|
||||
# ===== PresetResolver Tests =====
|
||||
|
||||
@@ -863,54 +678,11 @@ class TestPresetResolver:
|
||||
ext_template = ext_templates_dir / "custom-template.md"
|
||||
ext_template.write_text("# Extension Custom Template\n")
|
||||
|
||||
# Register extension in registry
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("my-ext", {"version": "1.0.0", "priority": 10})
|
||||
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve("custom-template")
|
||||
assert result is not None
|
||||
assert "Extension Custom Template" in result.read_text()
|
||||
|
||||
def test_resolve_disabled_extension_templates_skipped(self, project_dir):
|
||||
"""Test that disabled extension templates are not resolved."""
|
||||
# Create extension with templates
|
||||
ext_dir = project_dir / ".specify" / "extensions" / "disabled-ext"
|
||||
ext_templates_dir = ext_dir / "templates"
|
||||
ext_templates_dir.mkdir(parents=True)
|
||||
ext_template = ext_templates_dir / "disabled-template.md"
|
||||
ext_template.write_text("# Disabled Extension Template\n")
|
||||
|
||||
# Register extension as disabled
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("disabled-ext", {"version": "1.0.0", "priority": 1, "enabled": False})
|
||||
|
||||
# Template should NOT be resolved because extension is disabled
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve("disabled-template")
|
||||
assert result is None, "Disabled extension template should not be resolved"
|
||||
|
||||
def test_resolve_disabled_extension_not_picked_up_as_unregistered(self, project_dir):
|
||||
"""Test that disabled extensions are not picked up via unregistered dir scan."""
|
||||
# Create extension directory with templates
|
||||
ext_dir = project_dir / ".specify" / "extensions" / "test-disabled-ext"
|
||||
ext_templates_dir = ext_dir / "templates"
|
||||
ext_templates_dir.mkdir(parents=True)
|
||||
ext_template = ext_templates_dir / "unique-disabled-template.md"
|
||||
ext_template.write_text("# Should Not Resolve\n")
|
||||
|
||||
# Register the extension but disable it
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("test-disabled-ext", {"version": "1.0.0", "enabled": False})
|
||||
|
||||
# Verify the template is NOT resolved (even though the directory exists)
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve("unique-disabled-template")
|
||||
assert result is None, "Disabled extension should not be picked up as unregistered"
|
||||
|
||||
def test_resolve_pack_over_extension(self, project_dir, pack_dir, temp_dir, valid_pack_data):
|
||||
"""Test that pack templates take priority over extension templates."""
|
||||
# Create extension with templates
|
||||
@@ -969,15 +741,10 @@ class TestPresetResolver:
|
||||
ext_template = ext_templates_dir / "unique-template.md"
|
||||
ext_template.write_text("# Unique\n")
|
||||
|
||||
# Register extension in registry
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("my-ext", {"version": "1.0.0", "priority": 10})
|
||||
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve_with_source("unique-template")
|
||||
assert result is not None
|
||||
assert result["source"] == "extension:my-ext v1.0.0"
|
||||
assert result["source"] == "extension:my-ext"
|
||||
|
||||
def test_resolve_with_source_not_found(self, project_dir):
|
||||
"""Test resolve_with_source for nonexistent template."""
|
||||
@@ -998,104 +765,6 @@ class TestPresetResolver:
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestExtensionPriorityResolution:
|
||||
"""Test extension priority resolution with registered and unregistered extensions."""
|
||||
|
||||
def test_unregistered_beats_registered_with_lower_precedence(self, project_dir):
|
||||
"""Unregistered extension (implicit priority 10) beats registered with priority 20."""
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create registered extension with priority 20 (lower precedence than 10)
|
||||
registered_dir = extensions_dir / "registered-ext"
|
||||
(registered_dir / "templates").mkdir(parents=True)
|
||||
(registered_dir / "templates" / "test-template.md").write_text("# From Registered\n")
|
||||
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("registered-ext", {"version": "1.0.0", "priority": 20})
|
||||
|
||||
# Create unregistered extension directory (implicit priority 10)
|
||||
unregistered_dir = extensions_dir / "unregistered-ext"
|
||||
(unregistered_dir / "templates").mkdir(parents=True)
|
||||
(unregistered_dir / "templates" / "test-template.md").write_text("# From Unregistered\n")
|
||||
|
||||
# Unregistered (priority 10) should beat registered (priority 20)
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve("test-template")
|
||||
assert result is not None
|
||||
assert "From Unregistered" in result.read_text()
|
||||
|
||||
def test_registered_with_higher_precedence_beats_unregistered(self, project_dir):
|
||||
"""Registered extension with priority 5 beats unregistered (implicit priority 10)."""
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create registered extension with priority 5 (higher precedence than 10)
|
||||
registered_dir = extensions_dir / "registered-ext"
|
||||
(registered_dir / "templates").mkdir(parents=True)
|
||||
(registered_dir / "templates" / "test-template.md").write_text("# From Registered\n")
|
||||
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("registered-ext", {"version": "1.0.0", "priority": 5})
|
||||
|
||||
# Create unregistered extension directory (implicit priority 10)
|
||||
unregistered_dir = extensions_dir / "unregistered-ext"
|
||||
(unregistered_dir / "templates").mkdir(parents=True)
|
||||
(unregistered_dir / "templates" / "test-template.md").write_text("# From Unregistered\n")
|
||||
|
||||
# Registered (priority 5) should beat unregistered (priority 10)
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve("test-template")
|
||||
assert result is not None
|
||||
assert "From Registered" in result.read_text()
|
||||
|
||||
def test_unregistered_attribution_with_priority_ordering(self, project_dir):
|
||||
"""Test resolve_with_source correctly attributes unregistered extension."""
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create registered extension with priority 20
|
||||
registered_dir = extensions_dir / "registered-ext"
|
||||
(registered_dir / "templates").mkdir(parents=True)
|
||||
(registered_dir / "templates" / "test-template.md").write_text("# From Registered\n")
|
||||
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("registered-ext", {"version": "1.0.0", "priority": 20})
|
||||
|
||||
# Create unregistered extension (implicit priority 10)
|
||||
unregistered_dir = extensions_dir / "unregistered-ext"
|
||||
(unregistered_dir / "templates").mkdir(parents=True)
|
||||
(unregistered_dir / "templates" / "test-template.md").write_text("# From Unregistered\n")
|
||||
|
||||
# Attribution should show unregistered extension
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve_with_source("test-template")
|
||||
assert result is not None
|
||||
assert "unregistered-ext" in result["source"]
|
||||
assert "(unregistered)" in result["source"]
|
||||
|
||||
def test_same_priority_sorted_alphabetically(self, project_dir):
|
||||
"""Extensions with same priority are sorted alphabetically by ID."""
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
extensions_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create two unregistered extensions (both implicit priority 10)
|
||||
# "aaa-ext" should come before "zzz-ext" alphabetically
|
||||
zzz_dir = extensions_dir / "zzz-ext"
|
||||
(zzz_dir / "templates").mkdir(parents=True)
|
||||
(zzz_dir / "templates" / "test-template.md").write_text("# From ZZZ\n")
|
||||
|
||||
aaa_dir = extensions_dir / "aaa-ext"
|
||||
(aaa_dir / "templates").mkdir(parents=True)
|
||||
(aaa_dir / "templates" / "test-template.md").write_text("# From AAA\n")
|
||||
|
||||
# AAA should win due to alphabetical ordering at same priority
|
||||
resolver = PresetResolver(project_dir)
|
||||
result = resolver.resolve("test-template")
|
||||
assert result is not None
|
||||
assert "From AAA" in result.read_text()
|
||||
|
||||
|
||||
# ===== PresetCatalog Tests =====
|
||||
|
||||
|
||||
@@ -1310,13 +979,8 @@ class TestIntegration:
|
||||
ext_templates_dir.mkdir(parents=True)
|
||||
(ext_templates_dir / "spec-template.md").write_text("# Extension\n")
|
||||
|
||||
# Register extension in registry
|
||||
extensions_dir = project_dir / ".specify" / "extensions"
|
||||
ext_registry = ExtensionRegistry(extensions_dir)
|
||||
ext_registry.add("my-ext", {"version": "1.0.0", "priority": 10})
|
||||
|
||||
result = resolver.resolve_with_source("spec-template")
|
||||
assert result["source"] == "extension:my-ext v1.0.0"
|
||||
assert result["source"] == "extension:my-ext"
|
||||
|
||||
# Install pack — should win over extension
|
||||
manager = PresetManager(project_dir)
|
||||
@@ -2046,348 +1710,3 @@ class TestPresetSkills:
|
||||
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert metadata.get("registered_skills", []) == []
|
||||
|
||||
|
||||
class TestPresetSetPriority:
|
||||
"""Test preset set-priority CLI command."""
|
||||
|
||||
def test_set_priority_changes_priority(self, project_dir, pack_dir):
|
||||
"""Test set-priority command changes preset priority."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset with default priority
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
|
||||
# Verify default priority
|
||||
assert manager.registry.get("test-pack")["priority"] == 10
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "set-priority", "test-pack", "5"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "priority changed: 10 → 5" in result.output
|
||||
|
||||
# Reload registry to see updated value
|
||||
manager2 = PresetManager(project_dir)
|
||||
assert manager2.registry.get("test-pack")["priority"] == 5
|
||||
|
||||
def test_set_priority_same_value_no_change(self, project_dir, pack_dir):
|
||||
"""Test set-priority with same value shows already set message."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset with priority 5
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5", priority=5)
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "set-priority", "test-pack", "5"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "already has priority 5" in result.output
|
||||
|
||||
def test_set_priority_invalid_value(self, project_dir, pack_dir):
|
||||
"""Test set-priority rejects invalid priority values."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "set-priority", "test-pack", "0"])
|
||||
|
||||
assert result.exit_code == 1, result.output
|
||||
assert "Priority must be a positive integer" in result.output
|
||||
|
||||
def test_set_priority_not_installed(self, project_dir):
|
||||
"""Test set-priority fails for non-installed preset."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "set-priority", "nonexistent", "5"])
|
||||
|
||||
assert result.exit_code == 1, result.output
|
||||
assert "not installed" in result.output.lower()
|
||||
|
||||
|
||||
class TestPresetPriorityBackwardsCompatibility:
|
||||
"""Test backwards compatibility for presets installed before priority feature."""
|
||||
|
||||
def test_legacy_preset_without_priority_field(self, temp_dir):
|
||||
"""Presets installed before priority feature should default to 10."""
|
||||
presets_dir = temp_dir / ".specify" / "presets"
|
||||
presets_dir.mkdir(parents=True)
|
||||
|
||||
# Simulate legacy registry entry without priority field
|
||||
registry = PresetRegistry(presets_dir)
|
||||
registry.data["presets"]["legacy-pack"] = {
|
||||
"version": "1.0.0",
|
||||
"source": "local",
|
||||
"enabled": True,
|
||||
"installed_at": "2025-01-01T00:00:00Z",
|
||||
# No "priority" field - simulates pre-feature preset
|
||||
}
|
||||
registry._save()
|
||||
|
||||
# Reload registry
|
||||
registry2 = PresetRegistry(presets_dir)
|
||||
|
||||
# list_by_priority should use default of 10
|
||||
result = registry2.list_by_priority()
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "legacy-pack"
|
||||
# Priority defaults to 10 and is normalized in returned metadata
|
||||
assert result[0][1]["priority"] == 10
|
||||
|
||||
def test_legacy_preset_in_list_installed(self, project_dir, pack_dir):
|
||||
"""list_installed returns priority=10 for legacy presets without priority field."""
|
||||
manager = PresetManager(project_dir)
|
||||
|
||||
# Install preset normally
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
|
||||
# Manually remove priority to simulate legacy preset
|
||||
pack_data = manager.registry.data["presets"]["test-pack"]
|
||||
del pack_data["priority"]
|
||||
manager.registry._save()
|
||||
|
||||
# list_installed should still return priority=10
|
||||
installed = manager.list_installed()
|
||||
assert len(installed) == 1
|
||||
assert installed[0]["priority"] == 10
|
||||
|
||||
def test_mixed_legacy_and_new_presets_ordering(self, temp_dir):
|
||||
"""Legacy presets (no priority) sort with default=10 among prioritized presets."""
|
||||
presets_dir = temp_dir / ".specify" / "presets"
|
||||
presets_dir.mkdir(parents=True)
|
||||
|
||||
registry = PresetRegistry(presets_dir)
|
||||
|
||||
# Add preset with explicit priority=5
|
||||
registry.add("pack-with-priority", {"version": "1.0.0", "priority": 5})
|
||||
|
||||
# Add legacy preset without priority (manually)
|
||||
registry.data["presets"]["legacy-pack"] = {
|
||||
"version": "1.0.0",
|
||||
"source": "local",
|
||||
"enabled": True,
|
||||
# No priority field
|
||||
}
|
||||
|
||||
# Add another preset with priority=15
|
||||
registry.add("low-priority-pack", {"version": "1.0.0", "priority": 15})
|
||||
registry._save()
|
||||
|
||||
# Reload and check ordering
|
||||
registry2 = PresetRegistry(presets_dir)
|
||||
sorted_presets = registry2.list_by_priority()
|
||||
|
||||
# Should be: pack-with-priority (5), legacy-pack (default 10), low-priority-pack (15)
|
||||
assert [p[0] for p in sorted_presets] == [
|
||||
"pack-with-priority",
|
||||
"legacy-pack",
|
||||
"low-priority-pack",
|
||||
]
|
||||
|
||||
|
||||
class TestPresetEnableDisable:
|
||||
"""Test preset enable/disable CLI commands."""
|
||||
|
||||
def test_disable_preset(self, project_dir, pack_dir):
|
||||
"""Test disable command sets enabled=False."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
|
||||
# Verify initially enabled
|
||||
assert manager.registry.get("test-pack").get("enabled", True) is True
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "disable", "test-pack"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "disabled" in result.output.lower()
|
||||
|
||||
# Reload registry to see updated value
|
||||
manager2 = PresetManager(project_dir)
|
||||
assert manager2.registry.get("test-pack")["enabled"] is False
|
||||
|
||||
def test_enable_preset(self, project_dir, pack_dir):
|
||||
"""Test enable command sets enabled=True."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset and disable it
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
manager.registry.update("test-pack", {"enabled": False})
|
||||
|
||||
# Verify disabled
|
||||
assert manager.registry.get("test-pack")["enabled"] is False
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "enable", "test-pack"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "enabled" in result.output.lower()
|
||||
|
||||
# Reload registry to see updated value
|
||||
manager2 = PresetManager(project_dir)
|
||||
assert manager2.registry.get("test-pack")["enabled"] is True
|
||||
|
||||
def test_disable_already_disabled(self, project_dir, pack_dir):
|
||||
"""Test disable on already disabled preset shows warning."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset and disable it
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
manager.registry.update("test-pack", {"enabled": False})
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "disable", "test-pack"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "already disabled" in result.output.lower()
|
||||
|
||||
def test_enable_already_enabled(self, project_dir, pack_dir):
|
||||
"""Test enable on already enabled preset shows warning."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset (enabled by default)
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "enable", "test-pack"])
|
||||
|
||||
assert result.exit_code == 0, result.output
|
||||
assert "already enabled" in result.output.lower()
|
||||
|
||||
def test_disable_not_installed(self, project_dir):
|
||||
"""Test disable fails for non-installed preset."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "disable", "nonexistent"])
|
||||
|
||||
assert result.exit_code == 1, result.output
|
||||
assert "not installed" in result.output.lower()
|
||||
|
||||
def test_enable_not_installed(self, project_dir):
|
||||
"""Test enable fails for non-installed preset."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "enable", "nonexistent"])
|
||||
|
||||
assert result.exit_code == 1, result.output
|
||||
assert "not installed" in result.output.lower()
|
||||
|
||||
def test_disabled_preset_excluded_from_resolution(self, project_dir, pack_dir):
|
||||
"""Test that disabled presets are excluded from template resolution."""
|
||||
# Install preset with a template
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
|
||||
# Create a template in the preset directory
|
||||
preset_template = project_dir / ".specify" / "presets" / "test-pack" / "templates" / "test-template.md"
|
||||
preset_template.parent.mkdir(parents=True, exist_ok=True)
|
||||
preset_template.write_text("# Template from test-pack")
|
||||
|
||||
resolver = PresetResolver(project_dir)
|
||||
|
||||
# Template should be found when enabled
|
||||
result = resolver.resolve("test-template", "template")
|
||||
assert result is not None
|
||||
assert "test-pack" in str(result)
|
||||
|
||||
# Disable the preset
|
||||
manager.registry.update("test-pack", {"enabled": False})
|
||||
|
||||
# Template should NOT be found when disabled
|
||||
resolver2 = PresetResolver(project_dir)
|
||||
result2 = resolver2.resolve("test-template", "template")
|
||||
assert result2 is None
|
||||
|
||||
def test_enable_corrupted_registry_entry(self, project_dir, pack_dir):
|
||||
"""Test enable fails gracefully for corrupted registry entry."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset then corrupt the registry entry
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
manager.registry.data["presets"]["test-pack"] = "corrupted-string"
|
||||
manager.registry._save()
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "enable", "test-pack"])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "corrupted state" in result.output.lower()
|
||||
|
||||
def test_disable_corrupted_registry_entry(self, project_dir, pack_dir):
|
||||
"""Test disable fails gracefully for corrupted registry entry."""
|
||||
from typer.testing import CliRunner
|
||||
from unittest.mock import patch
|
||||
from specify_cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
# Install preset then corrupt the registry entry
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(pack_dir, "0.1.5")
|
||||
manager.registry.data["presets"]["test-pack"] = "corrupted-string"
|
||||
manager.registry._save()
|
||||
|
||||
with patch.object(Path, "cwd", return_value=project_dir):
|
||||
result = runner.invoke(app, ["preset", "disable", "test-pack"])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "corrupted state" in result.output.lower()
|
||||
|
||||
@@ -1,252 +0,0 @@
|
||||
"""
|
||||
Pytest tests for timestamp-based branch naming in create-new-feature.sh and common.sh.
|
||||
|
||||
Converted from tests/test_timestamp_branches.sh so they are discovered by `uv run pytest`.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
CREATE_FEATURE = PROJECT_ROOT / "scripts" / "bash" / "create-new-feature.sh"
|
||||
COMMON_SH = PROJECT_ROOT / "scripts" / "bash" / "common.sh"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def git_repo(tmp_path: Path) -> Path:
|
||||
"""Create a temp git repo with scripts and .specify dir."""
|
||||
subprocess.run(["git", "init", "-q"], cwd=tmp_path, check=True)
|
||||
subprocess.run(
|
||||
["git", "config", "user.email", "test@example.com"], cwd=tmp_path, check=True
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "config", "user.name", "Test User"], cwd=tmp_path, check=True
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "commit", "--allow-empty", "-m", "init", "-q"],
|
||||
cwd=tmp_path,
|
||||
check=True,
|
||||
)
|
||||
scripts_dir = tmp_path / "scripts" / "bash"
|
||||
scripts_dir.mkdir(parents=True)
|
||||
shutil.copy(CREATE_FEATURE, scripts_dir / "create-new-feature.sh")
|
||||
shutil.copy(COMMON_SH, scripts_dir / "common.sh")
|
||||
(tmp_path / ".specify" / "templates").mkdir(parents=True)
|
||||
return tmp_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def no_git_dir(tmp_path: Path) -> Path:
|
||||
"""Create a temp directory without git, but with scripts."""
|
||||
scripts_dir = tmp_path / "scripts" / "bash"
|
||||
scripts_dir.mkdir(parents=True)
|
||||
shutil.copy(CREATE_FEATURE, scripts_dir / "create-new-feature.sh")
|
||||
shutil.copy(COMMON_SH, scripts_dir / "common.sh")
|
||||
(tmp_path / ".specify" / "templates").mkdir(parents=True)
|
||||
return tmp_path
|
||||
|
||||
|
||||
def run_script(cwd: Path, *args: str) -> subprocess.CompletedProcess:
|
||||
"""Run create-new-feature.sh with given args."""
|
||||
cmd = ["bash", "scripts/bash/create-new-feature.sh", *args]
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
cwd=cwd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
def source_and_call(func_call: str, env: dict | None = None) -> subprocess.CompletedProcess:
|
||||
"""Source common.sh and call a function."""
|
||||
cmd = f'source "{COMMON_SH}" && {func_call}'
|
||||
return subprocess.run(
|
||||
["bash", "-c", cmd],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env={**os.environ, **(env or {})},
|
||||
)
|
||||
|
||||
|
||||
# ── Timestamp Branch Tests ───────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestTimestampBranch:
|
||||
def test_timestamp_creates_branch(self, git_repo: Path):
|
||||
"""Test 1: --timestamp creates branch with YYYYMMDD-HHMMSS prefix."""
|
||||
result = run_script(git_repo, "--timestamp", "--short-name", "user-auth", "Add user auth")
|
||||
assert result.returncode == 0, result.stderr
|
||||
branch = None
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("BRANCH_NAME:"):
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch is not None
|
||||
assert re.match(r"^\d{8}-\d{6}-user-auth$", branch), f"unexpected branch: {branch}"
|
||||
|
||||
def test_number_and_timestamp_warns(self, git_repo: Path):
|
||||
"""Test 3: --number + --timestamp warns and uses timestamp."""
|
||||
result = run_script(git_repo, "--timestamp", "--number", "42", "--short-name", "feat", "Feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert "Warning" in result.stderr and "--number" in result.stderr
|
||||
|
||||
def test_json_output_keys(self, git_repo: Path):
|
||||
"""Test 4: JSON output contains expected keys."""
|
||||
import json
|
||||
result = run_script(git_repo, "--json", "--timestamp", "--short-name", "api", "API feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
data = json.loads(result.stdout)
|
||||
for key in ("BRANCH_NAME", "SPEC_FILE", "FEATURE_NUM"):
|
||||
assert key in data, f"missing {key} in JSON: {data}"
|
||||
assert re.match(r"^\d{8}-\d{6}$", data["FEATURE_NUM"])
|
||||
|
||||
def test_long_name_truncation(self, git_repo: Path):
|
||||
"""Test 5: Long branch name is truncated to <= 244 chars."""
|
||||
long_name = "a-" * 150 + "end"
|
||||
result = run_script(git_repo, "--timestamp", "--short-name", long_name, "Long feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
branch = None
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("BRANCH_NAME:"):
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch is not None
|
||||
assert len(branch) <= 244
|
||||
assert re.match(r"^\d{8}-\d{6}-", branch)
|
||||
|
||||
|
||||
# ── Sequential Branch Tests ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestSequentialBranch:
|
||||
def test_sequential_default_with_existing_specs(self, git_repo: Path):
|
||||
"""Test 2: Sequential default with existing specs."""
|
||||
(git_repo / "specs" / "001-first-feat").mkdir(parents=True)
|
||||
(git_repo / "specs" / "002-second-feat").mkdir(parents=True)
|
||||
result = run_script(git_repo, "--short-name", "new-feat", "New feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
branch = None
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("BRANCH_NAME:"):
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch is not None
|
||||
assert re.match(r"^\d{3}-new-feat$", branch), f"unexpected branch: {branch}"
|
||||
|
||||
def test_sequential_ignores_timestamp_dirs(self, git_repo: Path):
|
||||
"""Sequential numbering skips timestamp dirs when computing next number."""
|
||||
(git_repo / "specs" / "002-first-feat").mkdir(parents=True)
|
||||
(git_repo / "specs" / "20260319-143022-ts-feat").mkdir(parents=True)
|
||||
result = run_script(git_repo, "--short-name", "next-feat", "Next feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
branch = None
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("BRANCH_NAME:"):
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch == "003-next-feat", f"expected 003-next-feat, got: {branch}"
|
||||
|
||||
|
||||
# ── check_feature_branch Tests ───────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCheckFeatureBranch:
|
||||
def test_accepts_timestamp_branch(self):
|
||||
"""Test 6: check_feature_branch accepts timestamp branch."""
|
||||
result = source_and_call('check_feature_branch "20260319-143022-feat" "true"')
|
||||
assert result.returncode == 0
|
||||
|
||||
def test_accepts_sequential_branch(self):
|
||||
"""Test 7: check_feature_branch accepts sequential branch."""
|
||||
result = source_and_call('check_feature_branch "004-feat" "true"')
|
||||
assert result.returncode == 0
|
||||
|
||||
def test_rejects_main(self):
|
||||
"""Test 8: check_feature_branch rejects main."""
|
||||
result = source_and_call('check_feature_branch "main" "true"')
|
||||
assert result.returncode != 0
|
||||
|
||||
def test_rejects_partial_timestamp(self):
|
||||
"""Test 9: check_feature_branch rejects 7-digit date."""
|
||||
result = source_and_call('check_feature_branch "2026031-143022-feat" "true"')
|
||||
assert result.returncode != 0
|
||||
|
||||
|
||||
# ── find_feature_dir_by_prefix Tests ─────────────────────────────────────────
|
||||
|
||||
|
||||
class TestFindFeatureDirByPrefix:
|
||||
def test_timestamp_branch(self, tmp_path: Path):
|
||||
"""Test 10: find_feature_dir_by_prefix with timestamp branch."""
|
||||
(tmp_path / "specs" / "20260319-143022-user-auth").mkdir(parents=True)
|
||||
result = source_and_call(
|
||||
f'find_feature_dir_by_prefix "{tmp_path}" "20260319-143022-user-auth"'
|
||||
)
|
||||
assert result.returncode == 0
|
||||
assert result.stdout.strip() == f"{tmp_path}/specs/20260319-143022-user-auth"
|
||||
|
||||
def test_cross_branch_prefix(self, tmp_path: Path):
|
||||
"""Test 11: find_feature_dir_by_prefix cross-branch (different suffix, same timestamp)."""
|
||||
(tmp_path / "specs" / "20260319-143022-original-feat").mkdir(parents=True)
|
||||
result = source_and_call(
|
||||
f'find_feature_dir_by_prefix "{tmp_path}" "20260319-143022-different-name"'
|
||||
)
|
||||
assert result.returncode == 0
|
||||
assert result.stdout.strip() == f"{tmp_path}/specs/20260319-143022-original-feat"
|
||||
|
||||
|
||||
# ── get_current_branch Tests ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestGetCurrentBranch:
|
||||
def test_env_var(self):
|
||||
"""Test 12: get_current_branch returns SPECIFY_FEATURE env var."""
|
||||
result = source_and_call("get_current_branch", env={"SPECIFY_FEATURE": "my-custom-branch"})
|
||||
assert result.stdout.strip() == "my-custom-branch"
|
||||
|
||||
|
||||
# ── No-git Tests ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestNoGitTimestamp:
|
||||
def test_no_git_timestamp(self, no_git_dir: Path):
|
||||
"""Test 13: No-git repo + timestamp creates spec dir with warning."""
|
||||
result = run_script(no_git_dir, "--timestamp", "--short-name", "no-git-feat", "No git feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
spec_dirs = list((no_git_dir / "specs").iterdir()) if (no_git_dir / "specs").exists() else []
|
||||
assert len(spec_dirs) > 0, "spec dir not created"
|
||||
assert "git" in result.stderr.lower() or "warning" in result.stderr.lower()
|
||||
|
||||
|
||||
# ── E2E Flow Tests ───────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestE2EFlow:
|
||||
def test_e2e_timestamp(self, git_repo: Path):
|
||||
"""Test 14: E2E timestamp flow — branch, dir, validation."""
|
||||
run_script(git_repo, "--timestamp", "--short-name", "e2e-ts", "E2E timestamp test")
|
||||
branch = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
).stdout.strip()
|
||||
assert re.match(r"^\d{8}-\d{6}-e2e-ts$", branch), f"branch: {branch}"
|
||||
assert (git_repo / "specs" / branch).is_dir()
|
||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
||||
assert val.returncode == 0
|
||||
|
||||
def test_e2e_sequential(self, git_repo: Path):
|
||||
"""Test 15: E2E sequential flow (regression guard)."""
|
||||
run_script(git_repo, "--short-name", "seq-feat", "Sequential feature")
|
||||
branch = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
).stdout.strip()
|
||||
assert re.match(r"^\d{3}-seq-feat$", branch), f"branch: {branch}"
|
||||
assert (git_repo / "specs" / branch).is_dir()
|
||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
||||
assert val.returncode == 0
|
||||
Reference in New Issue
Block a user