mirror of
https://github.com/github/spec-kit.git
synced 2026-04-02 18:53:09 +00:00
Compare commits
34 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67214f9a8c | ||
|
|
3899dcc0d4 | ||
|
|
b8335a532c | ||
|
|
cb16412f88 | ||
|
|
804cd10c71 | ||
|
|
4dff63a84e | ||
|
|
40ecd44ada | ||
|
|
b19a7eedfa | ||
|
|
9cb3f3d1ad | ||
|
|
f8da535d71 | ||
|
|
edaa5a7ff1 | ||
|
|
5be705e414 | ||
|
|
796b4f47c4 | ||
|
|
6b1f45c50c | ||
|
|
8778c26dcf | ||
|
|
41d1f4b0ac | ||
|
|
9c2481fd67 | ||
|
|
8520241dfe | ||
|
|
362868a342 | ||
|
|
d7206126e0 | ||
|
|
b22f381c0d | ||
|
|
ccc44dd00a | ||
|
|
2c2fea8783 | ||
|
|
4b4bd735a3 | ||
|
|
36019ebf1b | ||
|
|
fb152eb824 | ||
|
|
00e5dc1f91 | ||
|
|
eeda669c19 | ||
|
|
ebc61067e8 | ||
|
|
2c2936022c | ||
|
|
816c1160e9 | ||
|
|
bc766c3101 | ||
|
|
f132f748e3 | ||
|
|
ee65758e2b |
@@ -12,7 +12,7 @@ body:
|
||||
- Review the [Extension Publishing Guide](https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-PUBLISHING-GUIDE.md)
|
||||
- Ensure your extension has a valid `extension.yml` manifest
|
||||
- Create a GitHub release with a version tag (e.g., v1.0.0)
|
||||
- Test installation: `specify extension add --from <your-release-url>`
|
||||
- Test installation: `specify extension add <extension-name> --from <your-release-url>`
|
||||
|
||||
- type: input
|
||||
id: extension-id
|
||||
@@ -229,7 +229,7 @@ body:
|
||||
placeholder: |
|
||||
```bash
|
||||
# Install extension
|
||||
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
|
||||
# Use a command
|
||||
/speckit.your-extension.command-name arg1 arg2
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -64,5 +64,5 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
uses: actions/deploy-pages@v5
|
||||
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run markdownlint-cli2
|
||||
uses: DavidAnson/markdownlint-cli2-action@v19
|
||||
uses: DavidAnson/markdownlint-cli2-action@ce4853d43830c74c1753b39f3cf40f71c2031eb9 # v23
|
||||
with:
|
||||
globs: |
|
||||
'**/*.md'
|
||||
|
||||
45
.github/workflows/release-trigger.yml
vendored
45
.github/workflows/release-trigger.yml
vendored
@@ -100,18 +100,16 @@ jobs:
|
||||
COMMITS="- Initial release"
|
||||
fi
|
||||
|
||||
# Create new changelog entry
|
||||
{
|
||||
head -n 8 CHANGELOG.md
|
||||
echo ""
|
||||
echo "## [${{ steps.version.outputs.version }}] - $DATE"
|
||||
echo ""
|
||||
echo "### Changes"
|
||||
echo ""
|
||||
echo "$COMMITS"
|
||||
echo ""
|
||||
tail -n +9 CHANGELOG.md
|
||||
} > CHANGELOG.md.tmp
|
||||
# Create new changelog entry — insert after the marker comment
|
||||
NEW_ENTRY=$(printf '%s\n' \
|
||||
"" \
|
||||
"## [${{ steps.version.outputs.version }}] - $DATE" \
|
||||
"" \
|
||||
"### Changed" \
|
||||
"" \
|
||||
"$COMMITS")
|
||||
|
||||
awk -v entry="$NEW_ENTRY" '/<!-- insert new changelog below this comment -->/ { print; print entry; next } {print}' CHANGELOG.md > CHANGELOG.md.tmp
|
||||
mv CHANGELOG.md.tmp CHANGELOG.md
|
||||
|
||||
echo "✅ Updated CHANGELOG.md with commits since $PREVIOUS_TAG"
|
||||
@@ -141,6 +139,22 @@ jobs:
|
||||
git push origin "${{ steps.version.outputs.tag }}"
|
||||
echo "Branch ${{ env.branch }} and tag ${{ steps.version.outputs.tag }} pushed"
|
||||
|
||||
- name: Bump to dev version
|
||||
id: dev_version
|
||||
run: |
|
||||
IFS='.' read -r MAJOR MINOR PATCH <<< "${{ steps.version.outputs.version }}"
|
||||
NEXT_DEV="$MAJOR.$MINOR.$((PATCH + 1)).dev0"
|
||||
echo "dev_version=$NEXT_DEV" >> $GITHUB_OUTPUT
|
||||
sed -i "s/version = \".*\"/version = \"$NEXT_DEV\"/" pyproject.toml
|
||||
git add pyproject.toml
|
||||
if git diff --cached --quiet; then
|
||||
echo "No dev version changes to commit"
|
||||
else
|
||||
git commit -m "chore: begin $NEXT_DEV development"
|
||||
git push origin "${{ env.branch }}"
|
||||
echo "Bumped to dev version $NEXT_DEV"
|
||||
fi
|
||||
|
||||
- name: Open pull request
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_PAT }}
|
||||
@@ -148,16 +162,17 @@ jobs:
|
||||
gh pr create \
|
||||
--base main \
|
||||
--head "${{ env.branch }}" \
|
||||
--title "chore: bump version to ${{ steps.version.outputs.version }}" \
|
||||
--body "Automated version bump to ${{ steps.version.outputs.version }}.
|
||||
--title "chore: release ${{ steps.version.outputs.version }}, begin ${{ steps.dev_version.outputs.dev_version }} development" \
|
||||
--body "Automated release of ${{ steps.version.outputs.version }}.
|
||||
|
||||
This PR was created by the Release Trigger workflow. The git tag \`${{ steps.version.outputs.tag }}\` has already been pushed and the release artifacts are being built.
|
||||
|
||||
Merge this PR to record the version bump and changelog update on \`main\`."
|
||||
Merging this PR will set \`main\` to \`${{ steps.dev_version.outputs.dev_version }}\` so that development installs are clearly marked as pre-release."
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "✅ Version bumped to ${{ steps.version.outputs.version }}"
|
||||
echo "✅ Tag ${{ steps.version.outputs.tag }} created and pushed"
|
||||
echo "✅ Dev version set to ${{ steps.dev_version.outputs.dev_version }}"
|
||||
echo "✅ PR opened to merge version bump into main"
|
||||
echo "🚀 Release workflow is building artifacts from the tag"
|
||||
|
||||
@@ -202,8 +202,7 @@ agent: $basename
|
||||
}
|
||||
|
||||
# Create skills in <skills_dir>\<name>\SKILL.md format.
|
||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
||||
# current dotted-name exception (e.g. speckit.plan).
|
||||
# Skills use hyphenated names (e.g. speckit-plan).
|
||||
#
|
||||
# Technical debt note:
|
||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
||||
@@ -463,7 +462,7 @@ function Build-Variant {
|
||||
'kimi' {
|
||||
$skillsDir = Join-Path $baseDir ".kimi/skills"
|
||||
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
|
||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'kimi' -Separator '.'
|
||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'kimi'
|
||||
}
|
||||
'trae' {
|
||||
$rulesDir = Join-Path $baseDir ".trae/rules"
|
||||
@@ -498,13 +497,13 @@ $AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode'
|
||||
$AllScripts = @('sh', 'ps')
|
||||
|
||||
function Normalize-List {
|
||||
param([string]$Input)
|
||||
param([string]$Value)
|
||||
|
||||
if ([string]::IsNullOrEmpty($Input)) {
|
||||
if ([string]::IsNullOrEmpty($Value)) {
|
||||
return @()
|
||||
}
|
||||
|
||||
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||
$items = $Value -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||
return $items
|
||||
}
|
||||
|
||||
@@ -527,7 +526,7 @@ function Validate-Subset {
|
||||
|
||||
# Determine agent list
|
||||
if (-not [string]::IsNullOrEmpty($Agents)) {
|
||||
$AgentList = Normalize-List -Input $Agents
|
||||
$AgentList = Normalize-List -Value $Agents
|
||||
if (-not (Validate-Subset -Type 'agent' -Allowed $AllAgents -Items $AgentList)) {
|
||||
exit 1
|
||||
}
|
||||
@@ -537,7 +536,7 @@ if (-not [string]::IsNullOrEmpty($Agents)) {
|
||||
|
||||
# Determine script list
|
||||
if (-not [string]::IsNullOrEmpty($Scripts)) {
|
||||
$ScriptList = Normalize-List -Input $Scripts
|
||||
$ScriptList = Normalize-List -Value $Scripts
|
||||
if (-not (Validate-Subset -Type 'script' -Allowed $AllScripts -Items $ScriptList)) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -140,8 +140,7 @@ EOF
|
||||
}
|
||||
|
||||
# Create skills in <skills_dir>/<name>/SKILL.md format.
|
||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
||||
# current dotted-name exception (e.g. speckit.plan).
|
||||
# Skills use hyphenated names (e.g. speckit-plan).
|
||||
#
|
||||
# Technical debt note:
|
||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
||||
@@ -321,7 +320,7 @@ build_variant() {
|
||||
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
|
||||
kimi)
|
||||
mkdir -p "$base_dir/.kimi/skills"
|
||||
create_skills "$base_dir/.kimi/skills" "$script" "kimi" "." ;;
|
||||
create_skills "$base_dir/.kimi/skills" "$script" "kimi" ;;
|
||||
trae)
|
||||
mkdir -p "$base_dir/.trae/rules"
|
||||
generate_commands trae md "\$ARGUMENTS" "$base_dir/.trae/rules" "$script" ;;
|
||||
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v7
|
||||
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v7
|
||||
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v6
|
||||
|
||||
36
AGENTS.md
36
AGENTS.md
@@ -30,10 +30,10 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **Claude Code** | `.claude/commands/` | Markdown | `claude` | Anthropic's Claude Code CLI |
|
||||
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
||||
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
||||
| **Cursor** | `.cursor/commands/` | Markdown | N/A (IDE-based) | Cursor IDE (`--ai cursor-agent`) |
|
||||
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
|
||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
||||
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (skills) |
|
||||
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (`--ai codex --ai-skills`) |
|
||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
||||
| **Junie** | `.junie/commands/` | Markdown | `junie` | Junie by JetBrains |
|
||||
| **Kilo Code** | `.kilocode/workflows/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
||||
@@ -50,6 +50,8 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **iFlow CLI** | `.iflow/commands/` | Markdown | `iflow` | iFlow CLI (iflow-ai) |
|
||||
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
|
||||
| **Trae** | `.trae/rules/` | Markdown | N/A (IDE-based) | Trae IDE |
|
||||
| **Antigravity** | `.agent/commands/` | Markdown | N/A (IDE-based) | Antigravity IDE (`--ai agy --ai-skills`) |
|
||||
| **Mistral Vibe** | `.vibe/prompts/` | Markdown | `vibe` | Mistral Vibe CLI |
|
||||
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
|
||||
|
||||
### Step-by-Step Integration Guide
|
||||
@@ -316,32 +318,40 @@ Require a command-line tool to be installed:
|
||||
|
||||
- **Claude Code**: `claude` CLI
|
||||
- **Gemini CLI**: `gemini` CLI
|
||||
- **Cursor**: `cursor-agent` CLI
|
||||
- **Qwen Code**: `qwen` CLI
|
||||
- **opencode**: `opencode` CLI
|
||||
- **Codex CLI**: `codex` CLI (requires `--ai-skills`)
|
||||
- **Junie**: `junie` CLI
|
||||
- **Kiro CLI**: `kiro-cli` CLI
|
||||
- **Auggie CLI**: `auggie` CLI
|
||||
- **CodeBuddy CLI**: `codebuddy` CLI
|
||||
- **Qoder CLI**: `qodercli` CLI
|
||||
- **Kiro CLI**: `kiro-cli` CLI
|
||||
- **Amp**: `amp` CLI
|
||||
- **SHAI**: `shai` CLI
|
||||
- **Tabnine CLI**: `tabnine` CLI
|
||||
- **Kimi Code**: `kimi` CLI
|
||||
- **Mistral Vibe**: `vibe` CLI
|
||||
- **Pi Coding Agent**: `pi` CLI
|
||||
- **iFlow CLI**: `iflow` CLI
|
||||
|
||||
### IDE-Based Agents
|
||||
|
||||
Work within integrated development environments:
|
||||
|
||||
- **GitHub Copilot**: Built into VS Code/compatible editors
|
||||
- **Cursor**: Built into Cursor IDE (`--ai cursor-agent`)
|
||||
- **Windsurf**: Built into Windsurf IDE
|
||||
- **Kilo Code**: Built into Kilo Code IDE
|
||||
- **Roo Code**: Built into Roo Code IDE
|
||||
- **IBM Bob**: Built into IBM Bob IDE
|
||||
- **Trae**: Built into Trae IDE
|
||||
- **Antigravity**: Built into Antigravity IDE (`--ai agy --ai-skills`)
|
||||
|
||||
## Command File Formats
|
||||
|
||||
### Markdown Format
|
||||
|
||||
Used by: Claude, Cursor, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi
|
||||
Used by: Claude, Cursor, GitHub Copilot, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi, Codex, Auggie, CodeBuddy, Qoder, Roo Code, Kilo Code, Trae, Antigravity, Mistral Vibe, iFlow
|
||||
|
||||
**Standard format:**
|
||||
|
||||
@@ -379,15 +389,29 @@ Command content with {SCRIPT} and {{args}} placeholders.
|
||||
## Directory Conventions
|
||||
|
||||
- **CLI agents**: Usually `.<agent-name>/commands/`
|
||||
- **Singular command exception**:
|
||||
- opencode: `.opencode/command/` (singular `command`, not `commands`)
|
||||
- **Nested path exception**:
|
||||
- Tabnine: `.tabnine/agent/commands/` (extra `agent/` segment)
|
||||
- **Shared `.agents/` folder**:
|
||||
- Amp: `.agents/commands/` (shared folder, not `.amp/`)
|
||||
- Codex: `.agents/skills/` (shared folder; requires `--ai-skills`; invoked as `$speckit-<command>`)
|
||||
- **Skills-based exceptions**:
|
||||
- Codex: `.agents/skills/` (skills, invoked as `$speckit-<command>`)
|
||||
- Kimi Code: `.kimi/skills/` (skills, invoked as `/skill:speckit-<command>`)
|
||||
- **Prompt-based exceptions**:
|
||||
- Kiro CLI: `.kiro/prompts/`
|
||||
- Pi: `.pi/prompts/`
|
||||
- Mistral Vibe: `.vibe/prompts/`
|
||||
- **Rules-based exceptions**:
|
||||
- Trae: `.trae/rules/`
|
||||
- **IDE agents**: Follow IDE-specific patterns:
|
||||
- Copilot: `.github/agents/`
|
||||
- Cursor: `.cursor/commands/`
|
||||
- Windsurf: `.windsurf/workflows/`
|
||||
- Kilo Code: `.kilocode/workflows/`
|
||||
- Roo Code: `.roo/commands/`
|
||||
- IBM Bob: `.bob/commands/`
|
||||
- Antigravity: `.agent/skills/` (`--ai-skills` required; `.agent/commands/` is deprecated)
|
||||
|
||||
## Argument Patterns
|
||||
|
||||
|
||||
1105
CHANGELOG.md
1105
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -36,7 +36,7 @@ On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler
|
||||
> If your pull request introduces a large change that materially impacts the work of the CLI or the rest of the repository (e.g., you're introducing new templates, arguments, or otherwise major changes), make sure that it was **discussed and agreed upon** by the project maintainers. Pull requests with large changes that did not have a prior conversation and agreement will be closed.
|
||||
|
||||
1. Fork and clone the repository
|
||||
1. Configure and install the dependencies: `uv sync`
|
||||
1. Configure and install the dependencies: `uv sync --extra test`
|
||||
1. Make sure the CLI works on your machine: `uv run specify --help`
|
||||
1. Create a new branch: `git checkout -b my-branch-name`
|
||||
1. Make your change, add tests, and make sure everything still works
|
||||
@@ -44,6 +44,9 @@ On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler
|
||||
1. Push to your fork and submit a pull request
|
||||
1. Wait for your pull request to be reviewed and merged.
|
||||
|
||||
For the detailed test workflow, command-selection prompt, and PR reporting template, see [`TESTING.md`](./TESTING.md).
|
||||
Activate the project virtual environment (see the Setup block in [`TESTING.md`](./TESTING.md)), then install the CLI from your working tree (`uv pip install -e .` after `uv sync --extra test`) or otherwise ensure the shell uses the local `specify` binary before running the manual slash-command tests described below.
|
||||
|
||||
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
|
||||
|
||||
- Follow the project's coding conventions.
|
||||
@@ -62,6 +65,14 @@ When working on spec-kit:
|
||||
3. Test script functionality in the `scripts/` directory
|
||||
4. Ensure memory files (`memory/constitution.md`) are updated if major process changes are made
|
||||
|
||||
### Recommended validation flow
|
||||
|
||||
For the smoothest review experience, validate changes in this order:
|
||||
|
||||
1. **Run focused automated checks first** — use the quick verification commands in [`TESTING.md`](./TESTING.md) to catch packaging, scaffolding, and configuration regressions early.
|
||||
2. **Run manual workflow tests second** — if your change affects slash commands or the developer workflow, follow [`TESTING.md`](./TESTING.md) to choose the right commands, run them in an agent, and capture results for your PR.
|
||||
3. **Use local release packages when debugging packaged output** — if you need to inspect the exact files CI-style packaging produces, generate local release packages as described below.
|
||||
|
||||
### Testing template and command changes locally
|
||||
|
||||
Running `uv run specify init` pulls released packages, which won’t include your local changes.
|
||||
@@ -85,6 +96,8 @@ To test your templates, commands, and other changes locally, follow these steps:
|
||||
|
||||
Navigate to your test project folder and open the agent to verify your implementation.
|
||||
|
||||
If you only need to validate generated file structure and content before doing manual agent testing, start with the focused automated checks in [`TESTING.md`](./TESTING.md). Keep this section for the cases where you need to inspect the exact packaged output locally.
|
||||
|
||||
## AI contributions in Spec Kit
|
||||
|
||||
> [!IMPORTANT]
|
||||
|
||||
87
README.md
87
README.md
@@ -9,7 +9,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/github/spec-kit/actions/workflows/release.yml"><img src="https://github.com/github/spec-kit/actions/workflows/release.yml/badge.svg" alt="Release"/></a>
|
||||
<a href="https://github.com/github/spec-kit/releases/latest"><img src="https://img.shields.io/github/v/release/github/spec-kit" alt="Latest Release"/></a>
|
||||
<a href="https://github.com/github/spec-kit/stargazers"><img src="https://img.shields.io/github/stars/github/spec-kit?style=social" alt="GitHub stars"/></a>
|
||||
<a href="https://github.com/github/spec-kit/blob/main/LICENSE"><img src="https://img.shields.io/github/license/github/spec-kit" alt="License"/></a>
|
||||
<a href="https://github.github.io/spec-kit/"><img src="https://img.shields.io/badge/docs-GitHub_Pages-blue" alt="Documentation"/></a>
|
||||
@@ -22,7 +22,10 @@
|
||||
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
||||
- [⚡ Get Started](#-get-started)
|
||||
- [📽️ Video Overview](#️-video-overview)
|
||||
- [🧩 Community Extensions](#-community-extensions)
|
||||
- [🎨 Community Presets](#-community-presets)
|
||||
- [🚶 Community Walkthroughs](#-community-walkthroughs)
|
||||
- [🛠️ Community Friends](#️-community-friends)
|
||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||
- [🧩 Making Spec Kit Your Own: Extensions & Presets](#-making-spec-kit-your-own-extensions--presets)
|
||||
@@ -155,6 +158,76 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
||||
|
||||
[](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
|
||||
|
||||
## 🧩 Community Extensions
|
||||
|
||||
The following community-contributed extensions are available in [`catalog.community.json`](extensions/catalog.community.json):
|
||||
|
||||
**Categories:**
|
||||
|
||||
- `docs` — reads, validates, or generates spec artifacts
|
||||
- `code` — reviews, validates, or modifies source code
|
||||
- `process` — orchestrates workflow across phases
|
||||
- `integration` — syncs with external platforms
|
||||
- `visibility` — reports on project health or progress
|
||||
|
||||
**Effect:**
|
||||
|
||||
- `Read-only` — produces reports without modifying files
|
||||
- `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
|
||||
| Extension | Purpose | Category | Effect | URL |
|
||||
|-----------|---------|----------|--------|-----|
|
||||
| AI-Driven Engineering (AIDE) | A structured 7-step workflow for building new projects from scratch with AI assistants — from vision through implementation | `process` | Read+Write | [aide](https://github.com/mnriem/spec-kit-extensions/tree/main/aide) |
|
||||
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Checkpoint Extension | Commit the changes made during the middle of the implementation, so you don't end up with just one very large commit at the end | `code` | Read+Write | [spec-kit-checkpoint](https://github.com/aaronrsun/spec-kit-checkpoint) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Cognitive Squad | Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing | `docs` | Read+Write | [cognitive-squad](https://github.com/Testimonial/cognitive-squad) |
|
||||
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
|
||||
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
|
||||
| Extensify | Create and validate extensions and extension catalogs | `process` | Read+Write | [extensify](https://github.com/mnriem/spec-kit-extensions/tree/main/extensify) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
||||
| MAQA — Multi-Agent & Quality Assurance | Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins. Optional CI gate. | `process` | Read+Write | [spec-kit-maqa-ext](https://github.com/GenieRobot/spec-kit-maqa-ext) |
|
||||
| MAQA Azure DevOps Integration | Azure DevOps Boards integration for MAQA — syncs User Stories and Task children as features progress | `integration` | Read+Write | [spec-kit-maqa-azure-devops](https://github.com/GenieRobot/spec-kit-maqa-azure-devops) |
|
||||
| MAQA CI/CD Gate | Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green. | `process` | Read+Write | [spec-kit-maqa-ci](https://github.com/GenieRobot/spec-kit-maqa-ci) |
|
||||
| MAQA GitHub Projects Integration | GitHub Projects v2 integration for MAQA — syncs draft issues and Status columns as features progress | `integration` | Read+Write | [spec-kit-maqa-github-projects](https://github.com/GenieRobot/spec-kit-maqa-github-projects) |
|
||||
| MAQA Jira Integration | Jira integration for MAQA — syncs Stories and Subtasks as features progress through the board | `integration` | Read+Write | [spec-kit-maqa-jira](https://github.com/GenieRobot/spec-kit-maqa-jira) |
|
||||
| MAQA Linear Integration | Linear integration for MAQA — syncs issues and sub-issues across workflow states as features progress | `integration` | Read+Write | [spec-kit-maqa-linear](https://github.com/GenieRobot/spec-kit-maqa-linear) |
|
||||
| MAQA Trello Integration | Trello board integration for MAQA — populates board from specs, moves cards, real-time checklist ticking | `integration` | Read+Write | [spec-kit-maqa-trello](https://github.com/GenieRobot/spec-kit-maqa-trello) |
|
||||
| Onboard | Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step | `process` | Read+Write | [spec-kit-onboard](https://github.com/dmux/spec-kit-onboard) |
|
||||
| Plan Review Gate | Require spec.md and plan.md to be merged via MR/PR before allowing task generation | `process` | Read-only | [spec-kit-plan-review-gate](https://github.com/luno/spec-kit-plan-review-gate) |
|
||||
| Presetify | Create and validate presets and preset catalogs | `process` | Read+Write | [presetify](https://github.com/mnriem/spec-kit-extensions/tree/main/presetify) |
|
||||
| Product Forge | Full product lifecycle: research → product spec → SpecKit → implement → verify → test | `process` | Read+Write | [speckit-product-forge](https://github.com/VaiYav/speckit-product-forge) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
||||
| Superpowers Bridge | Orchestrates obra/superpowers skills within the spec-kit SDD workflow across the full lifecycle (clarification, TDD, review, verification, critique, debugging, branch completion) | `process` | Read+Write | [superpowers-bridge](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/superpowers-bridge) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
|
||||
|
||||
To submit your own extension, see the [Extension Publishing Guide](extensions/EXTENSION-PUBLISHING-GUIDE.md).
|
||||
|
||||
## 🎨 Community Presets
|
||||
|
||||
The following community-contributed presets customize how Spec Kit behaves — overriding templates, commands, and terminology without changing any tooling. Presets are available in [`catalog.community.json`](presets/catalog.community.json):
|
||||
|
||||
| Preset | Purpose | Provides | Requires | URL |
|
||||
|--------|---------|----------|----------|-----|
|
||||
| AIDE In-Place Migration | Adapts the AIDE extension workflow for in-place technology migrations (X → Y pattern) — adds migration objectives, verification gates, knowledge documents, and behavioral equivalence criteria | 2 templates, 8 commands | AIDE extension | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) |
|
||||
| Pirate Speak (Full) | Transforms all Spec Kit output into pirate speak — specs become "Voyage Manifests", plans become "Battle Plans", tasks become "Crew Assignments" | 6 templates, 9 commands | — | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) |
|
||||
|
||||
To build and publish your own preset, see the [Presets Publishing Guide](presets/PUBLISHING.md).
|
||||
|
||||
## 🚶 Community Walkthroughs
|
||||
|
||||
See Spec-Driven Development in action across different scenarios with these community-contributed walkthroughs:
|
||||
@@ -173,6 +246,14 @@ See Spec-Driven Development in action across different scenarios with these comm
|
||||
|
||||
- **[Greenfield Spring Boot + React with a custom extension](https://github.com/mnriem/spec-kit-aide-extension-demo)** — Walks through the **AIDE extension**, a community extension that adds an alternative spec-driven workflow to spec-kit with high-level specs (vision) and low-level specs (work items) organized in a 7-step iterative lifecycle: vision → roadmap → progress tracking → work queue → work items → execution → feedback loops. Uses a family trading platform (Spring Boot 4, React 19, PostgreSQL, Docker Compose) as the scenario to illustrate how the extension mechanism lets you plug in a different style of spec-driven development without changing any core tooling — truly utilizing the "Kit" in Spec Kit.
|
||||
|
||||
## 🛠️ Community Friends
|
||||
|
||||
Community projects that extend, visualize, or build on Spec Kit:
|
||||
|
||||
- **[cc-sdd](https://github.com/rhuss/cc-sdd)** - A Claude Code plugin that adds composable traits on top of Spec Kit with [Superpowers](https://github.com/obra/superpowers)-based quality gates, spec/code review, git worktree isolation, and parallel implementation via agent teams.
|
||||
|
||||
- **[Spec Kit Assistant](https://marketplace.visualstudio.com/items?itemName=rfsales.speckit-assistant)** — A VS Code extension that provides a visual orchestrator for the full SDD workflow (constitution → specification → planning → tasks → implementation) with phase status visualization, an interactive task checklist, DAG visualization, and support for Claude, Gemini, GitHub Copilot, and OpenAI backends. Requires the `specify` CLI in your PATH.
|
||||
|
||||
## 🤖 Supported AI Agents
|
||||
|
||||
| Agent | Support | Notes |
|
||||
@@ -231,7 +312,7 @@ The `specify` command supports the following options:
|
||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
|
||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`). Extension commands are also auto-registered as skills when extensions are added later. |
|
||||
| `--branch-numbering` | Option | Branch numbering strategy: `sequential` (default — `001`, `002`, `003`) or `timestamp` (`YYYYMMDD-HHMMSS`). Timestamp mode is useful for distributed teams to avoid numbering conflicts |
|
||||
|
||||
### Examples
|
||||
@@ -382,7 +463,7 @@ specify extension add <extension-name>
|
||||
|
||||
For example, extensions could add Jira integration, post-implementation code review, V-Model test traceability, or project health diagnostics.
|
||||
|
||||
See the [Extensions README](./extensions/README.md) for the full guide, the complete community catalog, and how to build and publish your own.
|
||||
See the [Extensions README](./extensions/README.md) for the full guide and how to build and publish your own. Browse the [community extensions](#-community-extensions) above for what's available.
|
||||
|
||||
### Presets — Customize Existing Workflows
|
||||
|
||||
|
||||
133
TESTING.md
Normal file
133
TESTING.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# Testing Guide
|
||||
|
||||
This document is the detailed testing companion to [`CONTRIBUTING.md`](./CONTRIBUTING.md).
|
||||
|
||||
Use it for three things:
|
||||
|
||||
1. running quick automated checks before manual testing,
|
||||
2. manually testing affected slash commands through an AI agent, and
|
||||
3. capturing the results in a PR-friendly format.
|
||||
|
||||
Any change that affects a slash command's behavior requires manually testing that command through an AI agent and submitting results with the PR.
|
||||
|
||||
## Recommended order
|
||||
|
||||
1. **Sync your environment** — install the project and test dependencies.
|
||||
2. **Run focused automated checks** — especially for packaging, scaffolding, agent config, and generated-file changes.
|
||||
3. **Run manual agent tests** — for any affected slash commands.
|
||||
4. **Paste results into your PR** — include both command-selection reasoning and manual test results.
|
||||
|
||||
## Quick automated checks
|
||||
|
||||
Run these before manual testing when your change affects packaging, scaffolding, templates, release artifacts, or agent wiring.
|
||||
|
||||
### Environment setup
|
||||
|
||||
```bash
|
||||
cd <spec-kit-repo>
|
||||
uv sync --extra test
|
||||
source .venv/bin/activate # On Windows (CMD): .venv\Scripts\activate | (PowerShell): .venv\Scripts\Activate.ps1
|
||||
```
|
||||
|
||||
### Generated package structure and content
|
||||
|
||||
```bash
|
||||
uv run python -m pytest tests/test_core_pack_scaffold.py -q
|
||||
```
|
||||
|
||||
This validates the generated files that CI-style packaging depends on, including directory layout, file names, frontmatter/TOML validity, placeholder replacement, `.specify/` path rewrites, and parity with `create-release-packages.sh`.
|
||||
|
||||
### Agent configuration and release wiring consistency
|
||||
|
||||
```bash
|
||||
uv run python -m pytest tests/test_agent_config_consistency.py -q
|
||||
```
|
||||
|
||||
Run this when you change agent metadata, release scripts, context update scripts, or artifact naming.
|
||||
|
||||
### Optional single-agent packaging spot check
|
||||
|
||||
```bash
|
||||
AGENTS=copilot SCRIPTS=sh ./.github/workflows/scripts/create-release-packages.sh v1.0.0
|
||||
```
|
||||
|
||||
Inspect `.genreleases/sdd-copilot-package-sh/` and the matching ZIP in `.genreleases/` when you want to review the exact packaged output for one agent/script combination.
|
||||
|
||||
## Manual testing process
|
||||
|
||||
1. **Identify affected commands** — use the [prompt below](#determining-which-tests-to-run) to have your agent analyze your changed files and determine which commands need testing.
|
||||
2. **Set up a test project** — scaffold from your local branch (see [Setup](#setup)).
|
||||
3. **Run each affected command** — invoke it in your agent, verify it completes successfully, and confirm it produces the expected output (files created, scripts executed, artifacts populated).
|
||||
4. **Run prerequisites first** — commands that depend on earlier commands (e.g., `/speckit.tasks` requires `/speckit.plan` which requires `/speckit.specify`) must be run in order.
|
||||
5. **Report results** — paste the [reporting template](#reporting-results) into your PR with pass/fail for each command tested.
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Install the project and test dependencies from your local branch
|
||||
cd <spec-kit-repo>
|
||||
uv sync --extra test
|
||||
source .venv/bin/activate # On Windows (CMD): .venv\Scripts\activate | (PowerShell): .venv\Scripts\Activate.ps1
|
||||
uv pip install -e .
|
||||
# Ensure the `specify` binary in this environment points at your working tree so the agent runs the branch you're testing.
|
||||
|
||||
# Initialize a test project using your local changes
|
||||
uv run specify init /tmp/speckit-test --ai <agent> --offline
|
||||
cd /tmp/speckit-test
|
||||
|
||||
# Open in your agent
|
||||
```
|
||||
|
||||
If you are testing the packaged output rather than the live source tree, create a local release package first as described in [`CONTRIBUTING.md`](./CONTRIBUTING.md).
|
||||
|
||||
## Reporting results
|
||||
|
||||
Paste this into your PR:
|
||||
|
||||
~~~markdown
|
||||
## Manual test results
|
||||
|
||||
**Agent**: [e.g., GitHub Copilot in VS Code] | **OS/Shell**: [e.g., macOS/zsh]
|
||||
|
||||
| Command tested | Notes |
|
||||
|----------------|-------|
|
||||
| `/speckit.command` | |
|
||||
~~~
|
||||
|
||||
## Determining which tests to run
|
||||
|
||||
Copy this prompt into your agent. Include the agent's response (selected tests plus a brief explanation of the mapping) in your PR.
|
||||
|
||||
~~~text
|
||||
Read TESTING.md, then run `git diff --name-only main` to get my changed files.
|
||||
For each changed file, determine which slash commands it affects by reading
|
||||
the command templates in templates/commands/ to understand what each command
|
||||
invokes. Use these mapping rules:
|
||||
|
||||
- templates/commands/X.md → the command it defines
|
||||
- scripts/bash/Y.sh or scripts/powershell/Y.ps1 → every command that invokes that script (grep templates/commands/ for the script name). Also check transitive dependencies: if the changed script is sourced by other scripts (e.g., common.sh is sourced by create-new-feature.sh, check-prerequisites.sh, setup-plan.sh, update-agent-context.sh), then every command invoking those downstream scripts is also affected
|
||||
- templates/Z-template.md → every command that consumes that template during execution
|
||||
- src/specify_cli/*.py → CLI commands (`specify init`, `specify check`, `specify extension *`, `specify preset *`); test the affected CLI command and, for init/scaffolding changes, at minimum test /speckit.specify
|
||||
- extensions/X/commands/* → the extension command it defines
|
||||
- extensions/X/scripts/* → every extension command that invokes that script
|
||||
- extensions/X/extension.yml or config-template.yml → every command in that extension. Also check if the manifest defines hooks (look for `hooks:` entries like `before_specify`, `after_implement`, etc.) — if so, the core commands those hooks attach to are also affected
|
||||
- presets/*/* → test preset scaffolding via `specify init` with the preset
|
||||
- pyproject.toml → packaging/bundling; test `specify init` and verify bundled assets
|
||||
|
||||
Include prerequisite tests (e.g., T5 requires T3 requires T1).
|
||||
|
||||
Output in this format:
|
||||
|
||||
### Test selection reasoning
|
||||
|
||||
| Changed file | Affects | Test | Why |
|
||||
|---|---|---|---|
|
||||
| (path) | (command) | T# | (reason) |
|
||||
|
||||
### Required tests
|
||||
|
||||
Number each test sequentially (T1, T2, ...). List prerequisite tests first.
|
||||
|
||||
- T1: /speckit.command — (reason)
|
||||
- T2: /speckit.command — (reason)
|
||||
~~~
|
||||
@@ -44,7 +44,7 @@ provides:
|
||||
- name: string # Required, pattern: ^speckit\.[a-z0-9-]+\.[a-z0-9-]+$
|
||||
file: string # Required, relative path to command file
|
||||
description: string # Required
|
||||
aliases: [string] # Optional, array of alternate names
|
||||
aliases: [string] # Optional, same pattern as name; namespace must match extension.id and must not shadow core or installed extension commands
|
||||
|
||||
config: # Optional, array of config files
|
||||
- name: string # Config file name
|
||||
|
||||
@@ -41,7 +41,7 @@ provides:
|
||||
- name: "speckit.my-ext.hello" # Must follow pattern: speckit.{ext-id}.{cmd}
|
||||
file: "commands/hello.md"
|
||||
description: "Say hello"
|
||||
aliases: ["speckit.hello"] # Optional aliases
|
||||
aliases: ["speckit.my-ext.hi"] # Optional aliases, same pattern
|
||||
|
||||
config: # Optional: Config files
|
||||
- name: "my-ext-config.yml"
|
||||
@@ -186,7 +186,7 @@ What the extension provides.
|
||||
- `name`: Command name (must match `speckit.{ext-id}.{command}`)
|
||||
- `file`: Path to command file (relative to extension root)
|
||||
- `description`: Command description (optional)
|
||||
- `aliases`: Alternative command names (optional, array)
|
||||
- `aliases`: Alternative command names (optional, array; each must match `speckit.{ext-id}.{command}`)
|
||||
|
||||
### Optional Fields
|
||||
|
||||
@@ -514,7 +514,7 @@ zip -r spec-kit-my-ext-1.0.0.zip extension.yml commands/ scripts/ docs/
|
||||
Users install with:
|
||||
|
||||
```bash
|
||||
specify extension add --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
|
||||
```
|
||||
|
||||
### Option 3: Community Reference Catalog
|
||||
@@ -523,7 +523,7 @@ Submit to the community catalog for public discovery:
|
||||
|
||||
1. **Fork** spec-kit repository
|
||||
2. **Add entry** to `extensions/catalog.community.json`
|
||||
3. **Update** `extensions/README.md` with your extension
|
||||
3. **Update** the Community Extensions table in `README.md` with your extension
|
||||
4. **Create PR** following the [Extension Publishing Guide](EXTENSION-PUBLISHING-GUIDE.md)
|
||||
5. **After merge**, your extension becomes available:
|
||||
- Users can browse `catalog.community.json` to discover your extension
|
||||
|
||||
@@ -122,7 +122,7 @@ Test that users can install from your release:
|
||||
specify extension add --dev /path/to/your-extension
|
||||
|
||||
# Test from GitHub archive
|
||||
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
---
|
||||
@@ -204,9 +204,9 @@ Edit `extensions/catalog.community.json` and add your extension:
|
||||
- Use current timestamp for `created_at` and `updated_at`
|
||||
- Update the top-level `updated_at` to current time
|
||||
|
||||
### 3. Update Extensions README
|
||||
### 3. Update Community Extensions Table
|
||||
|
||||
Add your extension to the Available Extensions table in `extensions/README.md`:
|
||||
Add your extension to the Community Extensions table in the project root `README.md`:
|
||||
|
||||
```markdown
|
||||
| Your Extension Name | Brief description of what it does | `<category>` | <effect> | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
|
||||
@@ -234,7 +234,7 @@ Insert your extension in alphabetical order in the table.
|
||||
git checkout -b add-your-extension
|
||||
|
||||
# Commit your changes
|
||||
git add extensions/catalog.community.json extensions/README.md
|
||||
git add extensions/catalog.community.json README.md
|
||||
git commit -m "Add your-extension to community catalog
|
||||
|
||||
- Extension ID: your-extension
|
||||
@@ -273,7 +273,7 @@ Brief description of what your extension does.
|
||||
- [x] All commands working
|
||||
- [x] No security vulnerabilities
|
||||
- [x] Added to extensions/catalog.community.json
|
||||
- [x] Added to extensions/README.md Available Extensions table
|
||||
- [x] Added to Community Extensions table in README.md
|
||||
|
||||
### Testing
|
||||
Tested on:
|
||||
|
||||
@@ -160,7 +160,7 @@ This will:
|
||||
|
||||
```bash
|
||||
# From GitHub release
|
||||
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
### Install from Local Directory (Development)
|
||||
@@ -187,6 +187,21 @@ Provided commands:
|
||||
Check: .specify/extensions/jira/
|
||||
```
|
||||
|
||||
### Automatic Agent Skill Registration
|
||||
|
||||
If your project was initialized with `--ai-skills`, extension commands are **automatically registered as agent skills** during installation. This ensures that extensions are discoverable by agents that use the [agentskills.io](https://agentskills.io) skill specification.
|
||||
|
||||
```text
|
||||
✓ Extension installed successfully!
|
||||
|
||||
Jira Integration (v1.0.0)
|
||||
...
|
||||
|
||||
✓ 3 agent skill(s) auto-registered
|
||||
```
|
||||
|
||||
When an extension is removed, its corresponding skills are also cleaned up automatically. Pre-existing skills that were manually customized are never overwritten.
|
||||
|
||||
---
|
||||
|
||||
## Using Extensions
|
||||
@@ -199,8 +214,8 @@ Extensions add commands that appear in your AI agent (Claude Code):
|
||||
# In Claude Code
|
||||
> /speckit.jira.specstoissues
|
||||
|
||||
# Or use short alias (if provided)
|
||||
> /speckit.specstoissues
|
||||
# Or use a namespaced alias (if provided)
|
||||
> /speckit.jira.sync
|
||||
```
|
||||
|
||||
### Extension Configuration
|
||||
@@ -722,7 +737,7 @@ You can still install extensions not in your catalog using `--from`:
|
||||
specify extension add jira
|
||||
|
||||
# Direct URL (bypasses catalog)
|
||||
specify extension add --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
|
||||
|
||||
# Local development
|
||||
specify extension add --dev /path/to/extension
|
||||
@@ -792,7 +807,7 @@ specify extension add --dev /path/to/extension
|
||||
2. Install older version of extension:
|
||||
|
||||
```bash
|
||||
specify extension add --from https://github.com/org/ext/archive/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/org/ext/archive/v1.0.0.zip
|
||||
```
|
||||
|
||||
### MCP Tool Not Available
|
||||
|
||||
@@ -59,7 +59,7 @@ Populate your `catalog.json` with approved extensions:
|
||||
Skip catalog curation - team members install directly using URLs:
|
||||
|
||||
```bash
|
||||
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
**Benefits**: Quick for one-off testing or private extensions
|
||||
@@ -68,37 +68,9 @@ specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/ta
|
||||
|
||||
## Available Community Extensions
|
||||
|
||||
The following community-contributed extensions are available in [`catalog.community.json`](catalog.community.json):
|
||||
See the [Community Extensions](../README.md#-community-extensions) section in the main README for the full list of available community-contributed extensions.
|
||||
|
||||
**Categories:** `docs` — reads, validates, or generates spec artifacts · `code` — reviews, validates, or modifies source code · `process` — orchestrates workflow across phases · `integration` — syncs with external platforms · `visibility` — reports on project health or progress
|
||||
|
||||
**Effect:** `Read-only` — produces reports without modifying files · `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
|
||||
| Extension | Purpose | Category | Effect | URL |
|
||||
|-----------|---------|----------|--------|-----|
|
||||
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Checkpoint Extension | Commit the changes made during the middle of the implementation, so you don't end up with just one very large commit at the end | `code` | Read+Write | [spec-kit-checkpoint](https://github.com/aaronrsun/spec-kit-checkpoint) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Cognitive Squad | Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing | `docs` | Read+Write | [cognitive-squad](https://github.com/Testimonial/cognitive-squad) |
|
||||
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
|
||||
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
|
||||
For the raw catalog data, see [`catalog.community.json`](catalog.community.json).
|
||||
|
||||
|
||||
## Adding Your Extension
|
||||
@@ -136,7 +108,7 @@ specify extension search # See what's in your catalog
|
||||
specify extension add <extension-name> # Install by name
|
||||
|
||||
# Direct from URL (bypasses catalog)
|
||||
specify extension add --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
|
||||
specify extension add <extension-name> --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
|
||||
|
||||
# List installed extensions
|
||||
specify extension list
|
||||
|
||||
@@ -223,7 +223,7 @@ provides:
|
||||
- name: "speckit.jira.specstoissues"
|
||||
file: "commands/specstoissues.md"
|
||||
description: "Create Jira hierarchy from spec and tasks"
|
||||
aliases: ["speckit.specstoissues"] # Alternate names
|
||||
aliases: ["speckit.jira.sync"] # Alternate names
|
||||
|
||||
- name: "speckit.jira.discover-fields"
|
||||
file: "commands/discover-fields.md"
|
||||
@@ -1517,7 +1517,7 @@ specify extension add github-projects
|
||||
/speckit.github.taskstoissues
|
||||
```
|
||||
|
||||
**Compatibility shim** (if needed):
|
||||
**Migration alias** (if needed):
|
||||
|
||||
```yaml
|
||||
# extension.yml
|
||||
@@ -1525,10 +1525,10 @@ provides:
|
||||
commands:
|
||||
- name: "speckit.github.taskstoissues"
|
||||
file: "commands/taskstoissues.md"
|
||||
aliases: ["speckit.taskstoissues"] # Backward compatibility
|
||||
aliases: ["speckit.github.sync-taskstoissues"] # Alternate namespaced entry point
|
||||
```
|
||||
|
||||
AI agent registers both names, so old scripts work.
|
||||
AI agents register both names, so callers can migrate to the alternate alias without relying on deprecated global shortcuts like `/speckit.taskstoissues`.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,8 +1,41 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-03-19T12:08:20Z",
|
||||
"updated_at": "2026-03-30T00:00:00Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||
"extensions": {
|
||||
"aide": {
|
||||
"name": "AI-Driven Engineering (AIDE)",
|
||||
"id": "aide",
|
||||
"description": "A structured 7-step workflow for building new projects from scratch with AI assistants — from vision through implementation.",
|
||||
"author": "mnriem",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-extensions/releases/download/aide-v1.0.0/aide.zip",
|
||||
"repository": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-extensions/blob/main/aide/README.md",
|
||||
"changelog": "https://github.com/mnriem/spec-kit-extensions/blob/main/aide/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 7,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"workflow",
|
||||
"project-management",
|
||||
"ai-driven",
|
||||
"new-project",
|
||||
"planning",
|
||||
"experimental"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"archive": {
|
||||
"name": "Archive Extension",
|
||||
"id": "archive",
|
||||
@@ -209,7 +242,7 @@
|
||||
"updated_at": "2026-03-19T12:08:20Z"
|
||||
},
|
||||
"docguard": {
|
||||
"name": "DocGuard \u2014 CDD Enforcement",
|
||||
"name": "DocGuard — CDD Enforcement",
|
||||
"id": "docguard",
|
||||
"description": "Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies.",
|
||||
"author": "raccioly",
|
||||
@@ -281,6 +314,37 @@
|
||||
"created_at": "2026-03-13T00:00:00Z",
|
||||
"updated_at": "2026-03-13T00:00:00Z"
|
||||
},
|
||||
"extensify": {
|
||||
"name": "Extensify",
|
||||
"id": "extensify",
|
||||
"description": "Create and validate extensions and extension catalogs.",
|
||||
"author": "mnriem",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-extensions/releases/download/extensify-v1.0.0/extensify.zip",
|
||||
"repository": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-extensions/blob/main/extensify/README.md",
|
||||
"changelog": "https://github.com/mnriem/spec-kit-extensions/blob/main/extensify/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"extensions",
|
||||
"workflow",
|
||||
"validation",
|
||||
"experimental"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"fleet": {
|
||||
"name": "Fleet Orchestrator",
|
||||
"id": "fleet",
|
||||
@@ -373,6 +437,390 @@
|
||||
"created_at": "2026-03-05T00:00:00Z",
|
||||
"updated_at": "2026-03-05T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"maqa": {
|
||||
"name": "MAQA — Multi-Agent & Quality Assurance",
|
||||
"id": "maqa",
|
||||
"description": "Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins (Trello, Linear, GitHub Projects, Jira, Azure DevOps). Optional CI gate.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.3",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-ext/releases/download/maqa-v0.1.3/maqa.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-ext",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-ext",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-ext/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-ext/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"multi-agent",
|
||||
"orchestration",
|
||||
"quality-assurance",
|
||||
"workflow",
|
||||
"parallel",
|
||||
"tdd"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-azure-devops": {
|
||||
"name": "MAQA Azure DevOps Integration",
|
||||
"id": "maqa-azure-devops",
|
||||
"description": "Azure DevOps Boards integration for the MAQA extension. Populates work items from specs, moves User Stories across columns as features progress, real-time Task child ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/releases/download/maqa-azure-devops-v0.1.0/maqa-azure-devops.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"azure-devops",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-ci": {
|
||||
"name": "MAQA CI/CD Gate",
|
||||
"id": "maqa-ci",
|
||||
"description": "CI/CD pipeline gate for the MAQA extension. Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-ci/releases/download/maqa-ci-v0.1.0/maqa-ci.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-ci",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-ci",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-ci/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-ci/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"ci-cd",
|
||||
"github-actions",
|
||||
"circleci",
|
||||
"gitlab-ci",
|
||||
"quality-gate",
|
||||
"maqa"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-github-projects": {
|
||||
"name": "MAQA GitHub Projects Integration",
|
||||
"id": "maqa-github-projects",
|
||||
"description": "GitHub Projects v2 integration for the MAQA extension. Populates draft issues from specs, moves items across Status columns as features progress, real-time task list ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/releases/download/maqa-github-projects-v0.1.0/maqa-github-projects.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-github-projects",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-github-projects",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"github-projects",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-jira": {
|
||||
"name": "MAQA Jira Integration",
|
||||
"id": "maqa-jira",
|
||||
"description": "Jira integration for the MAQA extension. Populates Stories from specs, moves issues across board columns as features progress, real-time Subtask ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-jira/releases/download/maqa-jira-v0.1.0/maqa-jira.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-jira",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-jira",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-jira/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-jira/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"jira",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-linear": {
|
||||
"name": "MAQA Linear Integration",
|
||||
"id": "maqa-linear",
|
||||
"description": "Linear integration for the MAQA extension. Populates issues from specs, moves items across workflow states as features progress, real-time sub-issue ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-linear/releases/download/maqa-linear-v0.1.0/maqa-linear.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-linear",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-linear",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-linear/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-linear/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"linear",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-trello": {
|
||||
"name": "MAQA Trello Integration",
|
||||
"id": "maqa-trello",
|
||||
"description": "Trello board integration for the MAQA extension. Populates board from specs, moves cards between lists as features progress, real-time checklist ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.1",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-trello/releases/download/maqa-trello-v0.1.1/maqa-trello.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-trello",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-trello",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-trello/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-trello/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"trello",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-26T00:00:00Z"
|
||||
},
|
||||
"onboard": {
|
||||
"name": "Onboard",
|
||||
"id": "onboard",
|
||||
"description": "Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step.",
|
||||
"author": "Rafael Sales",
|
||||
"version": "2.1.0",
|
||||
"download_url": "https://github.com/dmux/spec-kit-onboard/archive/refs/tags/v2.1.0.zip",
|
||||
"repository": "https://github.com/dmux/spec-kit-onboard",
|
||||
"homepage": "https://github.com/dmux/spec-kit-onboard",
|
||||
"documentation": "https://github.com/dmux/spec-kit-onboard/blob/main/README.md",
|
||||
"changelog": "https://github.com/dmux/spec-kit-onboard/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 7,
|
||||
"hooks": 3
|
||||
},
|
||||
"tags": [
|
||||
"onboarding",
|
||||
"learning",
|
||||
"mentoring",
|
||||
"developer-experience",
|
||||
"gamification",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-26T00:00:00Z"
|
||||
},
|
||||
"plan-review-gate": {
|
||||
"name": "Plan Review Gate",
|
||||
"id": "plan-review-gate",
|
||||
"description": "Require spec.md and plan.md to be merged via MR/PR before allowing task generation",
|
||||
"author": "luno",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/luno/spec-kit-plan-review-gate/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/luno/spec-kit-plan-review-gate",
|
||||
"homepage": "https://github.com/luno/spec-kit-plan-review-gate",
|
||||
"documentation": "https://github.com/luno/spec-kit-plan-review-gate/blob/main/README.md",
|
||||
"changelog": "https://github.com/luno/spec-kit-plan-review-gate/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"review",
|
||||
"quality",
|
||||
"workflow",
|
||||
"gate"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T08:22:30Z",
|
||||
"updated_at": "2026-03-27T08:22:30Z"
|
||||
},
|
||||
"presetify": {
|
||||
"name": "Presetify",
|
||||
"id": "presetify",
|
||||
"description": "Create and validate presets and preset catalogs.",
|
||||
"author": "mnriem",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-extensions/releases/download/presetify-v1.0.0/presetify.zip",
|
||||
"repository": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-extensions/blob/main/presetify/README.md",
|
||||
"changelog": "https://github.com/mnriem/spec-kit-extensions/blob/main/presetify/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"presets",
|
||||
"workflow",
|
||||
"templates",
|
||||
"experimental"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"product-forge": {
|
||||
"name": "Product Forge",
|
||||
"id": "product-forge",
|
||||
"description": "Full product lifecycle: research \u2192 product spec \u2192 SpecKit \u2192 implement \u2192 verify \u2192 test",
|
||||
"author": "VaiYav",
|
||||
"version": "1.1.1",
|
||||
"download_url": "https://github.com/VaiYav/speckit-product-forge/archive/refs/tags/v1.1.1.zip",
|
||||
"repository": "https://github.com/VaiYav/speckit-product-forge",
|
||||
"homepage": "https://github.com/VaiYav/speckit-product-forge",
|
||||
"documentation": "https://github.com/VaiYav/speckit-product-forge/blob/main/README.md",
|
||||
"changelog": "https://github.com/VaiYav/speckit-product-forge/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 10,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"process",
|
||||
"research",
|
||||
"product-spec",
|
||||
"lifecycle",
|
||||
"testing"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-28T00:00:00Z",
|
||||
"updated_at": "2026-03-28T00:00:00Z"
|
||||
},
|
||||
"ralph": {
|
||||
"name": "Ralph Loop",
|
||||
"id": "ralph",
|
||||
@@ -543,6 +991,81 @@
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"superb": {
|
||||
"name": "Superpowers Bridge",
|
||||
"id": "superb",
|
||||
"description": "Orchestrates obra/superpowers skills within the spec-kit SDD workflow. Thin bridge commands delegate to superpowers' authoritative SKILL.md files at runtime (with graceful fallback), while bridge-original commands provide spec-kit-native value. Eight commands cover the full lifecycle: intent clarification, TDD enforcement, task review, verification, critique, systematic debugging, branch completion, and review response. Hook-bound commands fire automatically; standalone commands are invoked when needed.",
|
||||
"author": "rbbtsn0w",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/RbBtSn0w/spec-kit-extensions/releases/download/superpowers-bridge-v1.0.0/superpowers-bridge.zip",
|
||||
"repository": "https://github.com/RbBtSn0w/spec-kit-extensions",
|
||||
"homepage": "https://github.com/RbBtSn0w/spec-kit-extensions",
|
||||
"documentation": "https://github.com/RbBtSn0w/spec-kit-extensions/blob/main/superpowers-bridge/README.md",
|
||||
"changelog": "https://github.com/RbBtSn0w/spec-kit-extensions/blob/main/superpowers-bridge/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.4.3",
|
||||
"tools": [
|
||||
{
|
||||
"name": "superpowers",
|
||||
"version": ">=5.0.0",
|
||||
"required": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 8,
|
||||
"hooks": 4
|
||||
},
|
||||
"tags": [
|
||||
"methodology",
|
||||
"tdd",
|
||||
"code-review",
|
||||
"workflow",
|
||||
"superpowers",
|
||||
"brainstorming",
|
||||
"verification",
|
||||
"debugging",
|
||||
"branch-management"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-30T00:00:00Z",
|
||||
"updated_at": "2026-03-30T00:00:00Z"
|
||||
},
|
||||
"sync": {
|
||||
"name": "Spec Sync",
|
||||
"id": "sync",
|
||||
@@ -578,7 +1101,7 @@
|
||||
"understanding": {
|
||||
"name": "Understanding",
|
||||
"id": "understanding",
|
||||
"description": "Automated requirements quality analysis \u2014 validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"author": "Ladislav Bihari",
|
||||
"version": "3.4.0",
|
||||
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
||||
@@ -616,38 +1139,6 @@
|
||||
"created_at": "2026-03-07T00:00:00Z",
|
||||
"updated_at": "2026-03-07T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"v-model": {
|
||||
"name": "V-Model Extension Pack",
|
||||
"id": "v-model",
|
||||
@@ -680,37 +1171,6 @@
|
||||
"created_at": "2026-02-20T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"verify": {
|
||||
"name": "Verify Extension",
|
||||
"id": "verify",
|
||||
|
||||
@@ -47,8 +47,8 @@ provides:
|
||||
- name: "speckit.my-extension.example"
|
||||
file: "commands/example.md"
|
||||
description: "Example command that demonstrates functionality"
|
||||
# Optional: Add aliases for shorter command names
|
||||
aliases: ["speckit.example"]
|
||||
# Optional: Add aliases in the same namespaced format
|
||||
aliases: ["speckit.my-extension.example-short"]
|
||||
|
||||
# ADD MORE COMMANDS: Copy this block for each command
|
||||
# - name: "speckit.my-extension.another-command"
|
||||
|
||||
@@ -1,6 +1,58 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-03-09T00:00:00Z",
|
||||
"updated_at": "2026-03-24T00:00:00Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json",
|
||||
"presets": {}
|
||||
"presets": {
|
||||
"aide-in-place": {
|
||||
"name": "AIDE In-Place Migration",
|
||||
"id": "aide-in-place",
|
||||
"version": "1.0.0",
|
||||
"description": "Adapts the AIDE workflow for in-place technology migrations (X → Y pattern). Overrides vision, roadmap, progress, and work item commands with migration-specific guidance.",
|
||||
"author": "mnriem",
|
||||
"repository": "https://github.com/mnriem/spec-kit-presets",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-presets/releases/download/aide-in-place-v1.0.0/aide-in-place.zip",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-presets",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-presets/blob/main/aide-in-place/README.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0",
|
||||
"extensions": ["aide"]
|
||||
},
|
||||
"provides": {
|
||||
"templates": 2,
|
||||
"commands": 8
|
||||
},
|
||||
"tags": [
|
||||
"migration",
|
||||
"in-place",
|
||||
"brownfield",
|
||||
"aide"
|
||||
]
|
||||
},
|
||||
"pirate": {
|
||||
"name": "Pirate Speak (Full)",
|
||||
"id": "pirate",
|
||||
"version": "1.0.0",
|
||||
"description": "Arrr! Transforms all Spec Kit output into pirate speak. Specs, plans, and tasks be written fer scallywags.",
|
||||
"author": "mnriem",
|
||||
"repository": "https://github.com/mnriem/spec-kit-presets",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-presets/releases/download/pirate-v1.0.0/pirate.zip",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-presets",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-presets/blob/main/pirate/README.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"templates": 6,
|
||||
"commands": 9
|
||||
},
|
||||
"tags": [
|
||||
"pirate",
|
||||
"theme",
|
||||
"fun",
|
||||
"experimental"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.4.1"
|
||||
version = "0.4.4"
|
||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
ALLOW_EXISTING=false
|
||||
SHORT_NAME=""
|
||||
BRANCH_NUMBER=""
|
||||
USE_TIMESTAMP=false
|
||||
@@ -14,6 +15,9 @@ while [ $i -le $# ]; do
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
;;
|
||||
--allow-existing-branch)
|
||||
ALLOW_EXISTING=true
|
||||
;;
|
||||
--short-name)
|
||||
if [ $((i + 1)) -gt $# ]; then
|
||||
echo 'Error: --short-name requires a value' >&2
|
||||
@@ -45,10 +49,11 @@ while [ $i -le $# ]; do
|
||||
USE_TIMESTAMP=true
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
echo "Usage: $0 [--json] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --json Output in JSON format"
|
||||
echo " --allow-existing-branch Switch to branch if it already exists instead of failing"
|
||||
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||
echo " --number N Specify branch number manually (overrides auto-detection)"
|
||||
echo " --timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
@@ -69,7 +74,7 @@ done
|
||||
|
||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
echo "Usage: $0 [--json] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -89,9 +94,9 @@ get_highest_from_specs() {
|
||||
for dir in "$specs_dir"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
# Only match sequential prefixes (###-*), skip timestamp dirs
|
||||
if echo "$dirname" | grep -q '^[0-9]\{3\}-'; then
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\{3\}')
|
||||
# Match sequential prefixes (>=3 digits), but skip timestamp dirs.
|
||||
if echo "$dirname" | grep -Eq '^[0-9]{3,}-' && ! echo "$dirname" | grep -Eq '^[0-9]{8}-[0-9]{6}-'; then
|
||||
number=$(echo "$dirname" | grep -Eo '^[0-9]+')
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
@@ -115,9 +120,9 @@ get_highest_from_branches() {
|
||||
# Clean branch name: remove leading markers and remote prefixes
|
||||
clean_branch=$(echo "$branch" | sed 's/^[* ]*//; s|^remotes/[^/]*/||')
|
||||
|
||||
# Extract feature number if branch matches pattern ###-*
|
||||
if echo "$clean_branch" | grep -q '^[0-9]\{3\}-'; then
|
||||
number=$(echo "$clean_branch" | grep -o '^[0-9]\{3\}' || echo "0")
|
||||
# Extract sequential feature number (>=3 digits), skip timestamp branches.
|
||||
if echo "$clean_branch" | grep -Eq '^[0-9]{3,}-' && ! echo "$clean_branch" | grep -Eq '^[0-9]{8}-[0-9]{6}-'; then
|
||||
number=$(echo "$clean_branch" | grep -Eo '^[0-9]+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
@@ -287,12 +292,19 @@ if [ "$HAS_GIT" = true ]; then
|
||||
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
||||
# Check if branch already exists
|
||||
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
||||
if [ "$USE_TIMESTAMP" = true ]; then
|
||||
if [ "$ALLOW_EXISTING" = true ]; then
|
||||
# Switch to the existing branch instead of failing
|
||||
if ! git checkout "$BRANCH_NAME" 2>/dev/null; then
|
||||
>&2 echo "Error: Failed to switch to existing branch '$BRANCH_NAME'. Please resolve any local changes or conflicts and try again."
|
||||
exit 1
|
||||
fi
|
||||
elif [ "$USE_TIMESTAMP" = true ]; then
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Rerun to get a new timestamp or use a different --short-name."
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||
exit 1
|
||||
fi
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
@@ -305,13 +317,15 @@ fi
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
||||
touch "$SPEC_FILE"
|
||||
if [ ! -f "$SPEC_FILE" ]; then
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
||||
touch "$SPEC_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Inform the user how to persist the feature variable in their own shell
|
||||
|
||||
@@ -63,7 +63,7 @@ AGENT_TYPE="${1:-}"
|
||||
# Agent-specific file paths
|
||||
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
|
||||
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/agents/copilot-instructions.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
||||
CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
|
||||
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
||||
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
||||
|
||||
@@ -8,7 +8,8 @@ function Find-SpecifyRoot {
|
||||
|
||||
# Normalize to absolute path to prevent issues with relative paths
|
||||
# Use -LiteralPath to handle paths with wildcard characters ([, ], *, ?)
|
||||
$current = (Resolve-Path -LiteralPath $StartDir -ErrorAction SilentlyContinue)?.Path
|
||||
$resolved = Resolve-Path -LiteralPath $StartDir -ErrorAction SilentlyContinue
|
||||
$current = if ($resolved) { $resolved.Path } else { $null }
|
||||
if (-not $current) { return $null }
|
||||
|
||||
while ($true) {
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[switch]$Json,
|
||||
[switch]$AllowExistingBranch,
|
||||
[string]$ShortName,
|
||||
[Parameter()]
|
||||
[int]$Number = 0,
|
||||
[long]$Number = 0,
|
||||
[switch]$Timestamp,
|
||||
[switch]$Help,
|
||||
[Parameter(Position = 0, ValueFromRemainingArguments = $true)]
|
||||
@@ -15,10 +16,11 @@ $ErrorActionPreference = 'Stop'
|
||||
|
||||
# Show help if requested
|
||||
if ($Help) {
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host ""
|
||||
Write-Host "Options:"
|
||||
Write-Host " -Json Output in JSON format"
|
||||
Write-Host " -AllowExistingBranch Switch to branch if it already exists instead of failing"
|
||||
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
||||
Write-Host " -Timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
@@ -33,7 +35,7 @@ if ($Help) {
|
||||
|
||||
# Check if feature description provided
|
||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -48,12 +50,15 @@ if ([string]::IsNullOrWhiteSpace($featureDesc)) {
|
||||
function Get-HighestNumberFromSpecs {
|
||||
param([string]$SpecsDir)
|
||||
|
||||
$highest = 0
|
||||
[long]$highest = 0
|
||||
if (Test-Path $SpecsDir) {
|
||||
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
|
||||
if ($_.Name -match '^(\d{3})-') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
# Match sequential prefixes (>=3 digits), but skip timestamp dirs.
|
||||
if ($_.Name -match '^(\d{3,})-' -and $_.Name -notmatch '^\d{8}-\d{6}-') {
|
||||
[long]$num = 0
|
||||
if ([long]::TryParse($matches[1], [ref]$num) -and $num -gt $highest) {
|
||||
$highest = $num
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,7 +68,7 @@ function Get-HighestNumberFromSpecs {
|
||||
function Get-HighestNumberFromBranches {
|
||||
param()
|
||||
|
||||
$highest = 0
|
||||
[long]$highest = 0
|
||||
try {
|
||||
$branches = git branch -a 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
@@ -71,10 +76,12 @@ function Get-HighestNumberFromBranches {
|
||||
# Clean branch name: remove leading markers and remote prefixes
|
||||
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
|
||||
|
||||
# Extract feature number if branch matches pattern ###-*
|
||||
if ($cleanBranch -match '^(\d{3})-') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
# Extract sequential feature number (>=3 digits), skip timestamp branches.
|
||||
if ($cleanBranch -match '^(\d{3,})-' -and $cleanBranch -notmatch '^\d{8}-\d{6}-') {
|
||||
[long]$num = 0
|
||||
if ([long]::TryParse($matches[1], [ref]$num) -and $num -gt $highest) {
|
||||
$highest = $num
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,12 +253,20 @@ if ($hasGit) {
|
||||
# Check if branch already exists
|
||||
$existingBranch = git branch --list $branchName 2>$null
|
||||
if ($existingBranch) {
|
||||
if ($Timestamp) {
|
||||
if ($AllowExistingBranch) {
|
||||
# Switch to the existing branch instead of failing
|
||||
git checkout -q $branchName 2>$null | Out-Null
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Error "Error: Branch '$branchName' exists but could not be checked out. Resolve any uncommitted changes or conflicts and try again."
|
||||
exit 1
|
||||
}
|
||||
} elseif ($Timestamp) {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Rerun to get a new timestamp or use a different -ShortName."
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||
exit 1
|
||||
}
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
@@ -264,12 +279,14 @@ if ($hasGit) {
|
||||
$featureDir = Join-Path $specsDir $branchName
|
||||
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||
|
||||
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||
$specFile = Join-Path $featureDir 'spec.md'
|
||||
if ($template -and (Test-Path $template)) {
|
||||
Copy-Item $template $specFile -Force
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile | Out-Null
|
||||
if (-not (Test-Path -PathType Leaf $specFile)) {
|
||||
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||
if ($template -and (Test-Path $template)) {
|
||||
Copy-Item $template $specFile -Force
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
||||
@@ -290,4 +307,3 @@ if ($Json) {
|
||||
Write-Output "HAS_GIT: $hasGit"
|
||||
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ $NEW_PLAN = $IMPL_PLAN
|
||||
# Agent file paths
|
||||
$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md'
|
||||
$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md'
|
||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md'
|
||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/copilot-instructions.md'
|
||||
$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
||||
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
||||
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
|
||||
@@ -345,6 +345,7 @@ AI_ASSISTANT_HELP = _build_ai_assistant_help()
|
||||
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
||||
|
||||
CLAUDE_LOCAL_PATH = Path.home() / ".claude" / "local" / "claude"
|
||||
CLAUDE_NPM_LOCAL_PATH = Path.home() / ".claude" / "local" / "node_modules" / ".bin" / "claude"
|
||||
|
||||
BANNER = """
|
||||
███████╗██████╗ ███████╗ ██████╗██╗███████╗██╗ ██╗
|
||||
@@ -605,13 +606,15 @@ def check_tool(tool: str, tracker: StepTracker = None) -> bool:
|
||||
Returns:
|
||||
True if tool is found, False otherwise
|
||||
"""
|
||||
# Special handling for Claude CLI after `claude migrate-installer`
|
||||
# Special handling for Claude CLI local installs
|
||||
# See: https://github.com/github/spec-kit/issues/123
|
||||
# The migrate-installer command REMOVES the original executable from PATH
|
||||
# and creates an alias at ~/.claude/local/claude instead
|
||||
# This path should be prioritized over other claude executables in PATH
|
||||
# See: https://github.com/github/spec-kit/issues/550
|
||||
# Claude Code can be installed in two local paths:
|
||||
# 1. ~/.claude/local/claude (after `claude migrate-installer`)
|
||||
# 2. ~/.claude/local/node_modules/.bin/claude (npm-local install, e.g. via nvm)
|
||||
# Neither path may be on the system PATH, so we check them explicitly.
|
||||
if tool == "claude":
|
||||
if CLAUDE_LOCAL_PATH.exists() and CLAUDE_LOCAL_PATH.is_file():
|
||||
if CLAUDE_LOCAL_PATH.is_file() or CLAUDE_NPM_LOCAL_PATH.is_file():
|
||||
if tracker:
|
||||
tracker.complete(tool, "available")
|
||||
return True
|
||||
@@ -1194,6 +1197,84 @@ def _locate_release_script() -> tuple[Path, str]:
|
||||
raise FileNotFoundError(f"Release script '{name}' not found in core_pack or source checkout")
|
||||
|
||||
|
||||
def _install_shared_infra(
|
||||
project_path: Path,
|
||||
script_type: str,
|
||||
tracker: StepTracker | None = None,
|
||||
) -> bool:
|
||||
"""Install shared infrastructure files into *project_path*.
|
||||
|
||||
Copies ``.specify/scripts/`` and ``.specify/templates/`` from the
|
||||
bundled core_pack or source checkout. Tracks all installed files
|
||||
in ``speckit.manifest.json``.
|
||||
Returns ``True`` on success.
|
||||
"""
|
||||
from .integrations.manifest import IntegrationManifest
|
||||
|
||||
core = _locate_core_pack()
|
||||
manifest = IntegrationManifest("speckit", project_path, version=get_speckit_version())
|
||||
|
||||
# Scripts
|
||||
if core and (core / "scripts").is_dir():
|
||||
scripts_src = core / "scripts"
|
||||
else:
|
||||
repo_root = Path(__file__).parent.parent.parent
|
||||
scripts_src = repo_root / "scripts"
|
||||
|
||||
skipped_files: list[str] = []
|
||||
|
||||
if scripts_src.is_dir():
|
||||
dest_scripts = project_path / ".specify" / "scripts"
|
||||
dest_scripts.mkdir(parents=True, exist_ok=True)
|
||||
variant_dir = "bash" if script_type == "sh" else "powershell"
|
||||
variant_src = scripts_src / variant_dir
|
||||
if variant_src.is_dir():
|
||||
dest_variant = dest_scripts / variant_dir
|
||||
dest_variant.mkdir(parents=True, exist_ok=True)
|
||||
# Merge without overwriting — only add files that don't exist yet
|
||||
for src_path in variant_src.rglob("*"):
|
||||
if src_path.is_file():
|
||||
rel_path = src_path.relative_to(variant_src)
|
||||
dst_path = dest_variant / rel_path
|
||||
if dst_path.exists():
|
||||
skipped_files.append(str(dst_path.relative_to(project_path)))
|
||||
else:
|
||||
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(src_path, dst_path)
|
||||
rel = dst_path.relative_to(project_path).as_posix()
|
||||
manifest.record_existing(rel)
|
||||
|
||||
# Page templates (not command templates, not vscode-settings.json)
|
||||
if core and (core / "templates").is_dir():
|
||||
templates_src = core / "templates"
|
||||
else:
|
||||
repo_root = Path(__file__).parent.parent.parent
|
||||
templates_src = repo_root / "templates"
|
||||
|
||||
if templates_src.is_dir():
|
||||
dest_templates = project_path / ".specify" / "templates"
|
||||
dest_templates.mkdir(parents=True, exist_ok=True)
|
||||
for f in templates_src.iterdir():
|
||||
if f.is_file() and f.name != "vscode-settings.json" and not f.name.startswith("."):
|
||||
dst = dest_templates / f.name
|
||||
if dst.exists():
|
||||
skipped_files.append(str(dst.relative_to(project_path)))
|
||||
else:
|
||||
shutil.copy2(f, dst)
|
||||
rel = dst.relative_to(project_path).as_posix()
|
||||
manifest.record_existing(rel)
|
||||
|
||||
if skipped_files:
|
||||
import logging
|
||||
logging.getLogger(__name__).warning(
|
||||
"The following shared files already exist and were not overwritten:\n%s",
|
||||
"\n".join(f" {f}" for f in skipped_files),
|
||||
)
|
||||
|
||||
manifest.save()
|
||||
return True
|
||||
|
||||
|
||||
def scaffold_from_core_pack(
|
||||
project_path: Path,
|
||||
ai_assistant: str,
|
||||
@@ -1490,12 +1571,6 @@ def load_init_options(project_path: Path) -> dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
# Agent-specific skill directory overrides for agents whose skills directory
|
||||
# doesn't follow the standard <agent_folder>/skills/ pattern
|
||||
AGENT_SKILLS_DIR_OVERRIDES = {
|
||||
"codex": ".agents/skills", # Codex agent layout override
|
||||
}
|
||||
|
||||
# Default skills directory for agents not in AGENT_CONFIG
|
||||
DEFAULT_SKILLS_DIR = ".agents/skills"
|
||||
|
||||
@@ -1528,13 +1603,9 @@ SKILL_DESCRIPTIONS = {
|
||||
def _get_skills_dir(project_path: Path, selected_ai: str) -> Path:
|
||||
"""Resolve the agent-specific skills directory for the given AI assistant.
|
||||
|
||||
Uses ``AGENT_SKILLS_DIR_OVERRIDES`` first, then falls back to
|
||||
``AGENT_CONFIG[agent]["folder"] + "skills"``, and finally to
|
||||
``DEFAULT_SKILLS_DIR``.
|
||||
Uses ``AGENT_CONFIG[agent]["folder"] + "skills"`` and falls back to
|
||||
``DEFAULT_SKILLS_DIR`` for unknown agents.
|
||||
"""
|
||||
if selected_ai in AGENT_SKILLS_DIR_OVERRIDES:
|
||||
return project_path / AGENT_SKILLS_DIR_OVERRIDES[selected_ai]
|
||||
|
||||
agent_config = AGENT_CONFIG.get(selected_ai, {})
|
||||
agent_folder = agent_config.get("folder", "")
|
||||
if agent_folder:
|
||||
@@ -1648,10 +1719,7 @@ def install_ai_skills(
|
||||
command_name = command_name[len("speckit."):]
|
||||
if command_name.endswith(".agent"):
|
||||
command_name = command_name[:-len(".agent")]
|
||||
if selected_ai == "kimi":
|
||||
skill_name = f"speckit.{command_name}"
|
||||
else:
|
||||
skill_name = f"speckit-{command_name}"
|
||||
skill_name = f"speckit-{command_name.replace('.', '-')}"
|
||||
|
||||
# Create skill directory (additive — never removes existing content)
|
||||
skill_dir = skills_dir / skill_name
|
||||
@@ -1730,8 +1798,64 @@ def _has_bundled_skills(project_path: Path, selected_ai: str) -> bool:
|
||||
if not skills_dir.is_dir():
|
||||
return False
|
||||
|
||||
pattern = "speckit.*/SKILL.md" if selected_ai == "kimi" else "speckit-*/SKILL.md"
|
||||
return any(skills_dir.glob(pattern))
|
||||
return any(skills_dir.glob("speckit-*/SKILL.md"))
|
||||
|
||||
|
||||
def _migrate_legacy_kimi_dotted_skills(skills_dir: Path) -> tuple[int, int]:
|
||||
"""Migrate legacy Kimi dotted skill dirs (speckit.xxx) to hyphenated format.
|
||||
|
||||
Temporary migration helper:
|
||||
- Intended removal window: after 2026-06-25.
|
||||
- Purpose: one-time cleanup for projects initialized before Kimi moved to
|
||||
hyphenated skills (speckit-xxx).
|
||||
|
||||
Returns:
|
||||
Tuple[migrated_count, removed_count]
|
||||
- migrated_count: old dotted dir renamed to hyphenated dir
|
||||
- removed_count: old dotted dir deleted when equivalent hyphenated dir existed
|
||||
"""
|
||||
if not skills_dir.is_dir():
|
||||
return (0, 0)
|
||||
|
||||
migrated_count = 0
|
||||
removed_count = 0
|
||||
|
||||
for legacy_dir in sorted(skills_dir.glob("speckit.*")):
|
||||
if not legacy_dir.is_dir():
|
||||
continue
|
||||
if not (legacy_dir / "SKILL.md").exists():
|
||||
continue
|
||||
|
||||
suffix = legacy_dir.name[len("speckit."):]
|
||||
if not suffix:
|
||||
continue
|
||||
|
||||
target_dir = skills_dir / f"speckit-{suffix.replace('.', '-')}"
|
||||
|
||||
if not target_dir.exists():
|
||||
shutil.move(str(legacy_dir), str(target_dir))
|
||||
migrated_count += 1
|
||||
continue
|
||||
|
||||
# If the new target already exists, avoid destructive cleanup unless
|
||||
# both SKILL.md files are byte-identical.
|
||||
target_skill = target_dir / "SKILL.md"
|
||||
legacy_skill = legacy_dir / "SKILL.md"
|
||||
if target_skill.is_file():
|
||||
try:
|
||||
if target_skill.read_bytes() == legacy_skill.read_bytes():
|
||||
# Preserve legacy directory when it contains extra user files.
|
||||
has_extra_entries = any(
|
||||
child.name != "SKILL.md" for child in legacy_dir.iterdir()
|
||||
)
|
||||
if not has_extra_entries:
|
||||
shutil.rmtree(legacy_dir)
|
||||
removed_count += 1
|
||||
except OSError:
|
||||
# Best-effort migration: preserve legacy dir on read failures.
|
||||
pass
|
||||
|
||||
return (migrated_count, removed_count)
|
||||
|
||||
|
||||
AGENT_SKILLS_MIGRATIONS = {
|
||||
@@ -1782,6 +1906,7 @@ def init(
|
||||
offline: bool = typer.Option(False, "--offline", help="Use assets bundled in the specify-cli package instead of downloading from GitHub (no network access required). Bundled assets will become the default in v0.6.0 and this flag will be removed."),
|
||||
preset: str = typer.Option(None, "--preset", help="Install a preset during initialization (by preset ID)"),
|
||||
branch_numbering: str = typer.Option(None, "--branch-numbering", help="Branch numbering strategy: 'sequential' (001, 002, ...) or 'timestamp' (YYYYMMDD-HHMMSS)"),
|
||||
integration: str = typer.Option(None, "--integration", help="Use the new integration system (e.g. --integration copilot). Mutually exclusive with --ai."),
|
||||
):
|
||||
"""
|
||||
Initialize a new Specify project.
|
||||
@@ -1843,6 +1968,35 @@ def init(
|
||||
if ai_assistant:
|
||||
ai_assistant = AI_ASSISTANT_ALIASES.get(ai_assistant, ai_assistant)
|
||||
|
||||
# --integration and --ai are mutually exclusive
|
||||
if integration and ai_assistant:
|
||||
console.print("[red]Error:[/red] --integration and --ai are mutually exclusive")
|
||||
console.print("[yellow]Use:[/yellow] --integration for the new integration system, or --ai for the legacy path")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Auto-promote: --ai copilot → integration path with a nudge
|
||||
use_integration = False
|
||||
if integration:
|
||||
from .integrations import INTEGRATION_REGISTRY, get_integration
|
||||
resolved_integration = get_integration(integration)
|
||||
if not resolved_integration:
|
||||
console.print(f"[red]Error:[/red] Unknown integration: '{integration}'")
|
||||
available = ", ".join(sorted(INTEGRATION_REGISTRY))
|
||||
console.print(f"[yellow]Available integrations:[/yellow] {available}")
|
||||
raise typer.Exit(1)
|
||||
use_integration = True
|
||||
# Map integration key to the ai_assistant variable for downstream compatibility
|
||||
ai_assistant = integration
|
||||
elif ai_assistant == "copilot":
|
||||
from .integrations import get_integration
|
||||
resolved_integration = get_integration("copilot")
|
||||
if resolved_integration:
|
||||
use_integration = True
|
||||
console.print(
|
||||
"[dim]Tip: Use [bold]--integration copilot[/bold] instead of "
|
||||
"--ai copilot. The --ai flag will be deprecated in a future release.[/dim]"
|
||||
)
|
||||
|
||||
if project_name == ".":
|
||||
here = True
|
||||
project_name = None # Clear project_name to use existing validation logic
|
||||
@@ -2011,7 +2165,10 @@ def init(
|
||||
"This will become the default in v0.6.0."
|
||||
)
|
||||
|
||||
if use_github:
|
||||
if use_integration:
|
||||
tracker.add("integration", "Install integration")
|
||||
tracker.add("shared-infra", "Install shared infrastructure")
|
||||
elif use_github:
|
||||
for key, label in [
|
||||
("fetch", "Fetch latest release"),
|
||||
("download", "Download template"),
|
||||
@@ -2046,7 +2203,39 @@ def init(
|
||||
verify = not skip_tls
|
||||
local_ssl_context = ssl_context if verify else False
|
||||
|
||||
if use_github:
|
||||
if use_integration:
|
||||
# Integration-based scaffolding (new path)
|
||||
from .integrations.manifest import IntegrationManifest
|
||||
tracker.start("integration")
|
||||
manifest = IntegrationManifest(
|
||||
resolved_integration.key, project_path, version=get_speckit_version()
|
||||
)
|
||||
resolved_integration.setup(
|
||||
project_path, manifest,
|
||||
script_type=selected_script,
|
||||
)
|
||||
manifest.save()
|
||||
|
||||
# Write .specify/integration.json
|
||||
script_ext = "sh" if selected_script == "sh" else "ps1"
|
||||
integration_json = project_path / ".specify" / "integration.json"
|
||||
integration_json.parent.mkdir(parents=True, exist_ok=True)
|
||||
integration_json.write_text(json.dumps({
|
||||
"integration": resolved_integration.key,
|
||||
"version": get_speckit_version(),
|
||||
"scripts": {
|
||||
"update-context": f".specify/integrations/{resolved_integration.key}/scripts/update-context.{script_ext}",
|
||||
},
|
||||
}, indent=2) + "\n", encoding="utf-8")
|
||||
|
||||
tracker.complete("integration", resolved_integration.config.get("name", resolved_integration.key))
|
||||
|
||||
# Install shared infrastructure (scripts, templates)
|
||||
tracker.start("shared-infra")
|
||||
_install_shared_infra(project_path, selected_script, tracker=tracker)
|
||||
tracker.complete("shared-infra", f"scripts ({selected_script}) + templates")
|
||||
|
||||
elif use_github:
|
||||
with httpx.Client(verify=local_ssl_context) as local_client:
|
||||
download_and_extract_template(
|
||||
project_path,
|
||||
@@ -2094,16 +2283,33 @@ def init(
|
||||
|
||||
ensure_constitution_from_template(project_path, tracker=tracker)
|
||||
|
||||
# Determine skills directory and migrate any legacy Kimi dotted skills.
|
||||
migrated_legacy_kimi_skills = 0
|
||||
removed_legacy_kimi_skills = 0
|
||||
skills_dir: Optional[Path] = None
|
||||
if selected_ai in NATIVE_SKILLS_AGENTS:
|
||||
skills_dir = _get_skills_dir(project_path, selected_ai)
|
||||
if selected_ai == "kimi" and skills_dir.is_dir():
|
||||
(
|
||||
migrated_legacy_kimi_skills,
|
||||
removed_legacy_kimi_skills,
|
||||
) = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
if ai_skills:
|
||||
if selected_ai in NATIVE_SKILLS_AGENTS:
|
||||
skills_dir = _get_skills_dir(project_path, selected_ai)
|
||||
bundled_found = _has_bundled_skills(project_path, selected_ai)
|
||||
if bundled_found:
|
||||
detail = f"bundled skills → {skills_dir.relative_to(project_path)}"
|
||||
if migrated_legacy_kimi_skills or removed_legacy_kimi_skills:
|
||||
detail += (
|
||||
f" (migrated {migrated_legacy_kimi_skills}, "
|
||||
f"removed {removed_legacy_kimi_skills} legacy Kimi dotted skills)"
|
||||
)
|
||||
if tracker:
|
||||
tracker.start("ai-skills")
|
||||
tracker.complete("ai-skills", f"bundled skills → {skills_dir.relative_to(project_path)}")
|
||||
tracker.complete("ai-skills", detail)
|
||||
else:
|
||||
console.print(f"[green]✓[/green] Using bundled agent skills in {skills_dir.relative_to(project_path)}/")
|
||||
console.print(f"[green]✓[/green] Using {detail}")
|
||||
else:
|
||||
# Compatibility fallback: convert command templates to skills
|
||||
# when an older template archive does not include native skills.
|
||||
@@ -2164,7 +2370,7 @@ def init(
|
||||
# Persist the CLI options so later operations (e.g. preset add)
|
||||
# can adapt their behaviour without re-scanning the filesystem.
|
||||
# Must be saved BEFORE preset install so _get_skills_dir() works.
|
||||
save_init_options(project_path, {
|
||||
init_opts = {
|
||||
"ai": selected_ai,
|
||||
"ai_skills": ai_skills,
|
||||
"ai_commands_dir": ai_commands_dir,
|
||||
@@ -2174,7 +2380,10 @@ def init(
|
||||
"offline": offline,
|
||||
"script": selected_script,
|
||||
"speckit_version": get_speckit_version(),
|
||||
})
|
||||
}
|
||||
if use_integration:
|
||||
init_opts["integration"] = resolved_integration.key
|
||||
save_init_options(project_path, init_opts)
|
||||
|
||||
# Install preset if specified
|
||||
if preset:
|
||||
@@ -2288,7 +2497,7 @@ def init(
|
||||
if codex_skill_mode:
|
||||
return f"$speckit-{name}"
|
||||
if kimi_skill_mode:
|
||||
return f"/skill:speckit.{name}"
|
||||
return f"/skill:speckit-{name}"
|
||||
return f"/speckit.{name}"
|
||||
|
||||
steps_lines.append(f"{step_num}. Start using {usage_label} with your AI agent:")
|
||||
@@ -3594,6 +3803,15 @@ def extension_add(
|
||||
for cmd in manifest.commands:
|
||||
console.print(f" • {cmd['name']} - {cmd.get('description', '')}")
|
||||
|
||||
# Report agent skills registration
|
||||
reg_meta = manager.registry.get(manifest.id)
|
||||
reg_skills = reg_meta.get("registered_skills", []) if reg_meta else []
|
||||
# Normalize to guard against corrupted registry entries
|
||||
if not isinstance(reg_skills, list):
|
||||
reg_skills = []
|
||||
if reg_skills:
|
||||
console.print(f"\n[green]✓[/green] {len(reg_skills)} agent skill(s) auto-registered")
|
||||
|
||||
console.print("\n[yellow]⚠[/yellow] Configuration may be required")
|
||||
console.print(f" Check: .specify/extensions/{manifest.id}/")
|
||||
|
||||
@@ -3632,14 +3850,19 @@ def extension_remove(
|
||||
installed = manager.list_installed()
|
||||
extension_id, display_name = _resolve_installed_extension(extension, installed, "remove")
|
||||
|
||||
# Get extension info for command count
|
||||
# Get extension info for command and skill counts
|
||||
ext_manifest = manager.get_extension(extension_id)
|
||||
cmd_count = len(ext_manifest.commands) if ext_manifest else 0
|
||||
reg_meta = manager.registry.get(extension_id)
|
||||
raw_skills = reg_meta.get("registered_skills") if reg_meta else None
|
||||
skill_count = len(raw_skills) if isinstance(raw_skills, list) else 0
|
||||
|
||||
# Confirm removal
|
||||
if not force:
|
||||
console.print("\n[yellow]⚠ This will remove:[/yellow]")
|
||||
console.print(f" • {cmd_count} commands from AI agent")
|
||||
if skill_count:
|
||||
console.print(f" • {skill_count} agent skill(s)")
|
||||
console.print(f" • Extension directory: .specify/extensions/{extension_id}/")
|
||||
if not keep_config:
|
||||
console.print(" • Config files (will be backed up)")
|
||||
|
||||
@@ -10,6 +10,8 @@ from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
import platform
|
||||
import re
|
||||
from copy import deepcopy
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -211,24 +213,52 @@ class CommandRegistrar:
|
||||
return f"---\n{yaml_str}---\n"
|
||||
|
||||
def _adjust_script_paths(self, frontmatter: dict) -> dict:
|
||||
"""Adjust script paths from extension-relative to repo-relative.
|
||||
"""Normalize script paths in frontmatter to generated project locations.
|
||||
|
||||
Rewrites known repo-relative and top-level script paths under the
|
||||
`scripts` and `agent_scripts` keys (for example `../../scripts/`,
|
||||
`../../templates/`, `../../memory/`, `scripts/`, `templates/`, and
|
||||
`memory/`) to the `.specify/...` paths used in generated projects.
|
||||
|
||||
Args:
|
||||
frontmatter: Frontmatter dictionary
|
||||
|
||||
Returns:
|
||||
Modified frontmatter with adjusted paths
|
||||
Modified frontmatter with normalized project paths
|
||||
"""
|
||||
frontmatter = deepcopy(frontmatter)
|
||||
|
||||
for script_key in ("scripts", "agent_scripts"):
|
||||
scripts = frontmatter.get(script_key)
|
||||
if not isinstance(scripts, dict):
|
||||
continue
|
||||
|
||||
for key, script_path in scripts.items():
|
||||
if isinstance(script_path, str) and script_path.startswith("../../scripts/"):
|
||||
scripts[key] = f".specify/scripts/{script_path[14:]}"
|
||||
if isinstance(script_path, str):
|
||||
scripts[key] = self._rewrite_project_relative_paths(script_path)
|
||||
return frontmatter
|
||||
|
||||
@staticmethod
|
||||
def _rewrite_project_relative_paths(text: str) -> str:
|
||||
"""Rewrite repo-relative paths to their generated project locations."""
|
||||
if not isinstance(text, str) or not text:
|
||||
return text
|
||||
|
||||
for old, new in (
|
||||
("../../memory/", ".specify/memory/"),
|
||||
("../../scripts/", ".specify/scripts/"),
|
||||
("../../templates/", ".specify/templates/"),
|
||||
):
|
||||
text = text.replace(old, new)
|
||||
|
||||
# Only rewrite top-level style references so extension-local paths like
|
||||
# ".specify/extensions/<ext>/scripts/..." remain intact.
|
||||
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?memory/', r"\1.specify/memory/", text)
|
||||
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?scripts/', r"\1.specify/scripts/", text)
|
||||
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?templates/', r"\1.specify/templates/", text)
|
||||
|
||||
return text.replace(".specify/.specify/", ".specify/").replace(".specify.specify/", ".specify/")
|
||||
|
||||
def render_markdown_command(
|
||||
self,
|
||||
frontmatter: dict,
|
||||
@@ -277,9 +307,25 @@ class CommandRegistrar:
|
||||
toml_lines.append(f"# Source: {source_id}")
|
||||
toml_lines.append("")
|
||||
|
||||
toml_lines.append('prompt = """')
|
||||
toml_lines.append(body)
|
||||
toml_lines.append('"""')
|
||||
# Keep TOML output valid even when body contains triple-quote delimiters.
|
||||
# Prefer multiline forms, then fall back to escaped basic string.
|
||||
if '"""' not in body:
|
||||
toml_lines.append('prompt = """')
|
||||
toml_lines.append(body)
|
||||
toml_lines.append('"""')
|
||||
elif "'''" not in body:
|
||||
toml_lines.append("prompt = '''")
|
||||
toml_lines.append(body)
|
||||
toml_lines.append("'''")
|
||||
else:
|
||||
escaped_body = (
|
||||
body.replace("\\", "\\\\")
|
||||
.replace('"', '\\"')
|
||||
.replace("\n", "\\n")
|
||||
.replace("\r", "\\r")
|
||||
.replace("\t", "\\t")
|
||||
)
|
||||
toml_lines.append(f'prompt = "{escaped_body}"')
|
||||
|
||||
return "\n".join(toml_lines)
|
||||
|
||||
@@ -308,8 +354,8 @@ class CommandRegistrar:
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
|
||||
if agent_name == "codex":
|
||||
body = self._resolve_codex_skill_placeholders(frontmatter, body, project_root)
|
||||
if agent_name in {"codex", "kimi"}:
|
||||
body = self.resolve_skill_placeholders(agent_name, frontmatter, body, project_root)
|
||||
|
||||
description = frontmatter.get("description", f"Spec-kit workflow command: {skill_name}")
|
||||
skill_frontmatter = {
|
||||
@@ -324,13 +370,8 @@ class CommandRegistrar:
|
||||
return self.render_frontmatter(skill_frontmatter) + "\n" + body
|
||||
|
||||
@staticmethod
|
||||
def _resolve_codex_skill_placeholders(frontmatter: dict, body: str, project_root: Path) -> str:
|
||||
"""Resolve script placeholders for Codex skill overrides.
|
||||
|
||||
This intentionally scopes the fix to Codex, which is the newly
|
||||
migrated runtime path in this PR. Existing Kimi behavior is left
|
||||
unchanged for now.
|
||||
"""
|
||||
def resolve_skill_placeholders(agent_name: str, frontmatter: dict, body: str, project_root: Path) -> str:
|
||||
"""Resolve script placeholders for skills-backed agents."""
|
||||
try:
|
||||
from . import load_init_options
|
||||
except ImportError:
|
||||
@@ -346,7 +387,11 @@ class CommandRegistrar:
|
||||
if not isinstance(agent_scripts, dict):
|
||||
agent_scripts = {}
|
||||
|
||||
script_variant = load_init_options(project_root).get("script")
|
||||
init_opts = load_init_options(project_root)
|
||||
if not isinstance(init_opts, dict):
|
||||
init_opts = {}
|
||||
|
||||
script_variant = init_opts.get("script")
|
||||
if script_variant not in {"sh", "ps"}:
|
||||
fallback_order = []
|
||||
default_variant = "ps" if platform.system().lower().startswith("win") else "sh"
|
||||
@@ -376,7 +421,8 @@ class CommandRegistrar:
|
||||
agent_script_command = agent_script_command.replace("{ARGS}", "$ARGUMENTS")
|
||||
body = body.replace("{AGENT_SCRIPT}", agent_script_command)
|
||||
|
||||
return body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", "codex")
|
||||
body = body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", agent_name)
|
||||
return CommandRegistrar._rewrite_project_relative_paths(body)
|
||||
|
||||
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
||||
"""Convert argument placeholder format.
|
||||
@@ -400,8 +446,9 @@ class CommandRegistrar:
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
short_name = short_name.replace(".", "-")
|
||||
|
||||
return f"speckit.{short_name}" if agent_name == "kimi" else f"speckit-{short_name}"
|
||||
return f"speckit-{short_name}"
|
||||
|
||||
def register_commands(
|
||||
self,
|
||||
|
||||
@@ -25,6 +25,49 @@ import yaml
|
||||
from packaging import version as pkg_version
|
||||
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
||||
|
||||
_FALLBACK_CORE_COMMAND_NAMES = frozenset({
|
||||
"analyze",
|
||||
"checklist",
|
||||
"clarify",
|
||||
"constitution",
|
||||
"implement",
|
||||
"plan",
|
||||
"specify",
|
||||
"tasks",
|
||||
"taskstoissues",
|
||||
})
|
||||
EXTENSION_COMMAND_NAME_PATTERN = re.compile(r"^speckit\.([a-z0-9-]+)\.([a-z0-9-]+)$")
|
||||
|
||||
|
||||
def _load_core_command_names() -> frozenset[str]:
|
||||
"""Discover bundled core command names from the packaged templates.
|
||||
|
||||
Prefer the wheel-time ``core_pack`` bundle when present, and fall back to
|
||||
the source checkout when running from the repository. If neither is
|
||||
available, use the baked-in fallback set so validation still works.
|
||||
"""
|
||||
candidate_dirs = [
|
||||
Path(__file__).parent / "core_pack" / "commands",
|
||||
Path(__file__).resolve().parent.parent.parent / "templates" / "commands",
|
||||
]
|
||||
|
||||
for commands_dir in candidate_dirs:
|
||||
if not commands_dir.is_dir():
|
||||
continue
|
||||
|
||||
command_names = {
|
||||
command_file.stem
|
||||
for command_file in commands_dir.iterdir()
|
||||
if command_file.is_file() and command_file.suffix == ".md"
|
||||
}
|
||||
if command_names:
|
||||
return frozenset(command_names)
|
||||
|
||||
return _FALLBACK_CORE_COMMAND_NAMES
|
||||
|
||||
|
||||
CORE_COMMAND_NAMES = _load_core_command_names()
|
||||
|
||||
|
||||
class ExtensionError(Exception):
|
||||
"""Base exception for extension-related errors."""
|
||||
@@ -149,7 +192,7 @@ class ExtensionManifest:
|
||||
raise ValidationError("Command missing 'name' or 'file'")
|
||||
|
||||
# Validate command name format
|
||||
if not re.match(r'^speckit\.[a-z0-9-]+\.[a-z0-9-]+$', cmd["name"]):
|
||||
if EXTENSION_COMMAND_NAME_PATTERN.match(cmd["name"]) is None:
|
||||
raise ValidationError(
|
||||
f"Invalid command name '{cmd['name']}': "
|
||||
"must follow pattern 'speckit.{extension}.{command}'"
|
||||
@@ -446,6 +489,126 @@ class ExtensionManager:
|
||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||
self.registry = ExtensionRegistry(self.extensions_dir)
|
||||
|
||||
@staticmethod
|
||||
def _collect_manifest_command_names(manifest: ExtensionManifest) -> Dict[str, str]:
|
||||
"""Collect command and alias names declared by a manifest.
|
||||
|
||||
Performs install-time validation for extension-specific constraints:
|
||||
- commands and aliases must use the canonical `speckit.{extension}.{command}` shape
|
||||
- commands and aliases must use this extension's namespace
|
||||
- command namespaces must not shadow core commands
|
||||
- duplicate command/alias names inside one manifest are rejected
|
||||
|
||||
Args:
|
||||
manifest: Parsed extension manifest
|
||||
|
||||
Returns:
|
||||
Mapping of declared command/alias name -> kind ("command"/"alias")
|
||||
|
||||
Raises:
|
||||
ValidationError: If any declared name is invalid
|
||||
"""
|
||||
if manifest.id in CORE_COMMAND_NAMES:
|
||||
raise ValidationError(
|
||||
f"Extension ID '{manifest.id}' conflicts with core command namespace '{manifest.id}'"
|
||||
)
|
||||
|
||||
declared_names: Dict[str, str] = {}
|
||||
|
||||
for cmd in manifest.commands:
|
||||
primary_name = cmd["name"]
|
||||
aliases = cmd.get("aliases", [])
|
||||
|
||||
if aliases is None:
|
||||
aliases = []
|
||||
if not isinstance(aliases, list):
|
||||
raise ValidationError(
|
||||
f"Aliases for command '{primary_name}' must be a list"
|
||||
)
|
||||
|
||||
for kind, name in [("command", primary_name)] + [
|
||||
("alias", alias) for alias in aliases
|
||||
]:
|
||||
if not isinstance(name, str):
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} for command '{primary_name}' must be a string"
|
||||
)
|
||||
|
||||
match = EXTENSION_COMMAND_NAME_PATTERN.match(name)
|
||||
if match is None:
|
||||
raise ValidationError(
|
||||
f"Invalid {kind} '{name}': "
|
||||
"must follow pattern 'speckit.{extension}.{command}'"
|
||||
)
|
||||
|
||||
namespace = match.group(1)
|
||||
if namespace != manifest.id:
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} '{name}' must use extension namespace '{manifest.id}'"
|
||||
)
|
||||
|
||||
if namespace in CORE_COMMAND_NAMES:
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} '{name}' conflicts with core command namespace '{namespace}'"
|
||||
)
|
||||
|
||||
if name in declared_names:
|
||||
raise ValidationError(
|
||||
f"Duplicate command or alias '{name}' in extension manifest"
|
||||
)
|
||||
|
||||
declared_names[name] = kind
|
||||
|
||||
return declared_names
|
||||
|
||||
def _get_installed_command_name_map(
|
||||
self,
|
||||
exclude_extension_id: Optional[str] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Return registered command and alias names for installed extensions."""
|
||||
installed_names: Dict[str, str] = {}
|
||||
|
||||
for ext_id in self.registry.keys():
|
||||
if ext_id == exclude_extension_id:
|
||||
continue
|
||||
|
||||
manifest = self.get_extension(ext_id)
|
||||
if manifest is None:
|
||||
continue
|
||||
|
||||
for cmd in manifest.commands:
|
||||
cmd_name = cmd.get("name")
|
||||
if isinstance(cmd_name, str):
|
||||
installed_names.setdefault(cmd_name, ext_id)
|
||||
|
||||
aliases = cmd.get("aliases", [])
|
||||
if not isinstance(aliases, list):
|
||||
continue
|
||||
|
||||
for alias in aliases:
|
||||
if isinstance(alias, str):
|
||||
installed_names.setdefault(alias, ext_id)
|
||||
|
||||
return installed_names
|
||||
|
||||
def _validate_install_conflicts(self, manifest: ExtensionManifest) -> None:
|
||||
"""Reject installs that would shadow core or installed extension commands."""
|
||||
declared_names = self._collect_manifest_command_names(manifest)
|
||||
installed_names = self._get_installed_command_name_map(
|
||||
exclude_extension_id=manifest.id
|
||||
)
|
||||
|
||||
collisions = [
|
||||
f"{name} (already provided by extension '{installed_names[name]}')"
|
||||
for name in sorted(declared_names)
|
||||
if name in installed_names
|
||||
]
|
||||
if collisions:
|
||||
raise ValidationError(
|
||||
"Extension commands conflict with installed extensions:\n- "
|
||||
+ "\n- ".join(collisions)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
|
||||
"""Load .extensionignore and return an ignore function for shutil.copytree.
|
||||
@@ -510,6 +673,283 @@ class ExtensionManager:
|
||||
|
||||
return _ignore
|
||||
|
||||
def _get_skills_dir(self) -> Optional[Path]:
|
||||
"""Return the active skills directory for extension skill registration.
|
||||
|
||||
Reads ``.specify/init-options.json`` to determine whether skills
|
||||
are enabled and which agent was selected, then delegates to
|
||||
the module-level ``_get_skills_dir()`` helper for the concrete path.
|
||||
|
||||
Kimi is treated as a native-skills agent: if ``ai == "kimi"`` and
|
||||
``.kimi/skills`` exists, extension installs should still propagate
|
||||
command skills even when ``ai_skills`` is false.
|
||||
|
||||
Returns:
|
||||
The skills directory ``Path``, or ``None`` if skills were not
|
||||
enabled and no native-skills fallback applies.
|
||||
"""
|
||||
from . import load_init_options, _get_skills_dir as resolve_skills_dir
|
||||
|
||||
opts = load_init_options(self.project_root)
|
||||
if not isinstance(opts, dict):
|
||||
opts = {}
|
||||
|
||||
agent = opts.get("ai")
|
||||
if not isinstance(agent, str) or not agent:
|
||||
return None
|
||||
|
||||
ai_skills_enabled = bool(opts.get("ai_skills"))
|
||||
if not ai_skills_enabled and agent != "kimi":
|
||||
return None
|
||||
|
||||
skills_dir = resolve_skills_dir(self.project_root, agent)
|
||||
if not skills_dir.is_dir():
|
||||
return None
|
||||
|
||||
return skills_dir
|
||||
|
||||
def _register_extension_skills(
|
||||
self,
|
||||
manifest: ExtensionManifest,
|
||||
extension_dir: Path,
|
||||
) -> List[str]:
|
||||
"""Generate SKILL.md files for extension commands as agent skills.
|
||||
|
||||
For every command in the extension manifest, creates a SKILL.md
|
||||
file in the agent's skills directory following the agentskills.io
|
||||
specification. This is only done when ``--ai-skills`` was used
|
||||
during project initialisation.
|
||||
|
||||
Args:
|
||||
manifest: Extension manifest.
|
||||
extension_dir: Installed extension directory.
|
||||
|
||||
Returns:
|
||||
List of skill names that were created (for registry storage).
|
||||
"""
|
||||
skills_dir = self._get_skills_dir()
|
||||
if not skills_dir:
|
||||
return []
|
||||
|
||||
from . import load_init_options
|
||||
from .agents import CommandRegistrar
|
||||
import yaml
|
||||
|
||||
written: List[str] = []
|
||||
opts = load_init_options(self.project_root)
|
||||
if not isinstance(opts, dict):
|
||||
opts = {}
|
||||
selected_ai = opts.get("ai")
|
||||
if not isinstance(selected_ai, str) or not selected_ai:
|
||||
return []
|
||||
registrar = CommandRegistrar()
|
||||
|
||||
for cmd_info in manifest.commands:
|
||||
cmd_name = cmd_info["name"]
|
||||
cmd_file_rel = cmd_info["file"]
|
||||
|
||||
# Guard against path traversal: reject absolute paths and ensure
|
||||
# the resolved file stays within the extension directory.
|
||||
cmd_path = Path(cmd_file_rel)
|
||||
if cmd_path.is_absolute():
|
||||
continue
|
||||
try:
|
||||
ext_root = extension_dir.resolve()
|
||||
source_file = (ext_root / cmd_path).resolve()
|
||||
source_file.relative_to(ext_root) # raises ValueError if outside
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
|
||||
if not source_file.is_file():
|
||||
continue
|
||||
|
||||
# Derive skill name from command name using the same hyphenated
|
||||
# convention as hook rendering and preset skill registration.
|
||||
short_name_raw = cmd_name
|
||||
if short_name_raw.startswith("speckit."):
|
||||
short_name_raw = short_name_raw[len("speckit."):]
|
||||
skill_name = f"speckit-{short_name_raw.replace('.', '-')}"
|
||||
|
||||
# Check if skill already exists before creating the directory
|
||||
skill_subdir = skills_dir / skill_name
|
||||
skill_file = skill_subdir / "SKILL.md"
|
||||
if skill_file.exists():
|
||||
# Do not overwrite user-customized skills
|
||||
continue
|
||||
|
||||
# Create skill directory; track whether we created it so we can clean
|
||||
# up safely if reading the source file subsequently fails.
|
||||
created_now = not skill_subdir.exists()
|
||||
skill_subdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Parse the command file — guard against IsADirectoryError / decode errors
|
||||
try:
|
||||
content = source_file.read_text(encoding="utf-8")
|
||||
except (OSError, UnicodeDecodeError):
|
||||
if created_now:
|
||||
try:
|
||||
skill_subdir.rmdir() # undo the mkdir; dir is empty at this point
|
||||
except OSError:
|
||||
pass # best-effort cleanup
|
||||
continue
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
frontmatter = registrar._adjust_script_paths(frontmatter)
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
original_desc = frontmatter.get("description", "")
|
||||
description = original_desc or f"Extension command: {cmd_name}"
|
||||
|
||||
frontmatter_data = {
|
||||
"name": skill_name,
|
||||
"description": description,
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": f"extension:{manifest.id}",
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
|
||||
# Derive a human-friendly title from the command name
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
title_name = short_name.replace(".", " ").replace("-", " ").title()
|
||||
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# {title_name} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
written.append(skill_name)
|
||||
|
||||
return written
|
||||
|
||||
def _unregister_extension_skills(self, skill_names: List[str], extension_id: str) -> None:
|
||||
"""Remove SKILL.md directories for extension skills.
|
||||
|
||||
Called during extension removal to clean up skill files that
|
||||
were created by ``_register_extension_skills()``.
|
||||
|
||||
If ``_get_skills_dir()`` returns ``None`` (e.g. the user removed
|
||||
init-options.json or toggled ai_skills after installation), we
|
||||
fall back to scanning all known agent skills directories so that
|
||||
orphaned skill directories are still cleaned up. In that case
|
||||
each candidate directory is verified against the SKILL.md
|
||||
``metadata.source`` field before removal to avoid accidentally
|
||||
deleting user-created skills with the same name.
|
||||
|
||||
Args:
|
||||
skill_names: List of skill names to remove.
|
||||
extension_id: Extension ID used to verify ownership during
|
||||
fallback candidate scanning.
|
||||
"""
|
||||
if not skill_names:
|
||||
return
|
||||
|
||||
skills_dir = self._get_skills_dir()
|
||||
|
||||
if skills_dir:
|
||||
# Fast path: we know the exact skills directory
|
||||
for skill_name in skill_names:
|
||||
# Guard against path traversal from a corrupted registry entry:
|
||||
# reject names that are absolute, contain path separators, or
|
||||
# resolve to a path outside the skills directory.
|
||||
sn_path = Path(skill_name)
|
||||
if sn_path.is_absolute() or len(sn_path.parts) != 1:
|
||||
continue
|
||||
try:
|
||||
skill_subdir = (skills_dir / skill_name).resolve()
|
||||
skill_subdir.relative_to(skills_dir.resolve()) # raises if outside
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
if not skill_subdir.is_dir():
|
||||
continue
|
||||
# Safety check: only delete if SKILL.md exists and its
|
||||
# metadata.source matches exactly this extension — mirroring
|
||||
# the fallback branch — so a corrupted registry entry cannot
|
||||
# delete an unrelated user skill.
|
||||
skill_md = skill_subdir / "SKILL.md"
|
||||
if not skill_md.is_file():
|
||||
continue
|
||||
try:
|
||||
import yaml as _yaml
|
||||
raw = skill_md.read_text(encoding="utf-8")
|
||||
source = ""
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
fm = _yaml.safe_load(parts[1]) or {}
|
||||
source = (
|
||||
fm.get("metadata", {}).get("source", "")
|
||||
if isinstance(fm, dict)
|
||||
else ""
|
||||
)
|
||||
if source != f"extension:{extension_id}":
|
||||
continue
|
||||
except (OSError, UnicodeDecodeError, Exception):
|
||||
continue
|
||||
shutil.rmtree(skill_subdir)
|
||||
else:
|
||||
# Fallback: scan all possible agent skills directories
|
||||
from . import AGENT_CONFIG, DEFAULT_SKILLS_DIR
|
||||
|
||||
candidate_dirs: set[Path] = set()
|
||||
for cfg in AGENT_CONFIG.values():
|
||||
folder = cfg.get("folder", "")
|
||||
if folder:
|
||||
candidate_dirs.add(self.project_root / folder.rstrip("/") / "skills")
|
||||
candidate_dirs.add(self.project_root / DEFAULT_SKILLS_DIR)
|
||||
|
||||
for skills_candidate in candidate_dirs:
|
||||
if not skills_candidate.is_dir():
|
||||
continue
|
||||
for skill_name in skill_names:
|
||||
# Same path-traversal guard as the fast path above
|
||||
sn_path = Path(skill_name)
|
||||
if sn_path.is_absolute() or len(sn_path.parts) != 1:
|
||||
continue
|
||||
try:
|
||||
skill_subdir = (skills_candidate / skill_name).resolve()
|
||||
skill_subdir.relative_to(skills_candidate.resolve()) # raises if outside
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
if not skill_subdir.is_dir():
|
||||
continue
|
||||
# Safety check: only delete if SKILL.md exists and its
|
||||
# metadata.source matches exactly this extension. If the
|
||||
# file is missing or unreadable we skip to avoid deleting
|
||||
# unrelated user-created directories.
|
||||
skill_md = skill_subdir / "SKILL.md"
|
||||
if not skill_md.is_file():
|
||||
continue
|
||||
try:
|
||||
import yaml as _yaml
|
||||
raw = skill_md.read_text(encoding="utf-8")
|
||||
source = ""
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
fm = _yaml.safe_load(parts[1]) or {}
|
||||
source = (
|
||||
fm.get("metadata", {}).get("source", "")
|
||||
if isinstance(fm, dict)
|
||||
else ""
|
||||
)
|
||||
# Only remove skills explicitly created by this extension
|
||||
if source != f"extension:{extension_id}":
|
||||
continue
|
||||
except (OSError, UnicodeDecodeError, Exception):
|
||||
# If we can't verify, skip to avoid accidental deletion
|
||||
continue
|
||||
shutil.rmtree(skill_subdir)
|
||||
|
||||
def check_compatibility(
|
||||
self,
|
||||
manifest: ExtensionManifest,
|
||||
@@ -584,6 +1024,9 @@ class ExtensionManager:
|
||||
f"Use 'specify extension remove {manifest.id}' first."
|
||||
)
|
||||
|
||||
# Reject manifests that would shadow core commands or installed extensions.
|
||||
self._validate_install_conflicts(manifest)
|
||||
|
||||
# Install extension
|
||||
dest_dir = self.extensions_dir / manifest.id
|
||||
if dest_dir.exists():
|
||||
@@ -601,6 +1044,10 @@ class ExtensionManager:
|
||||
manifest, dest_dir, self.project_root
|
||||
)
|
||||
|
||||
# Auto-register extension commands as agent skills when --ai-skills
|
||||
# was used during project initialisation (feature parity).
|
||||
registered_skills = self._register_extension_skills(manifest, dest_dir)
|
||||
|
||||
# Register hooks
|
||||
hook_executor = HookExecutor(self.project_root)
|
||||
hook_executor.register_hooks(manifest)
|
||||
@@ -612,7 +1059,8 @@ class ExtensionManager:
|
||||
"manifest_hash": manifest.get_hash(),
|
||||
"enabled": True,
|
||||
"priority": priority,
|
||||
"registered_commands": registered_commands
|
||||
"registered_commands": registered_commands,
|
||||
"registered_skills": registered_skills,
|
||||
})
|
||||
|
||||
return manifest
|
||||
@@ -690,9 +1138,15 @@ class ExtensionManager:
|
||||
if not self.registry.is_installed(extension_id):
|
||||
return False
|
||||
|
||||
# Get registered commands before removal
|
||||
# Get registered commands and skills before removal
|
||||
metadata = self.registry.get(extension_id)
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
raw_skills = metadata.get("registered_skills", []) if metadata else []
|
||||
# Normalize: must be a list of plain strings to avoid corrupted-registry errors
|
||||
if isinstance(raw_skills, list):
|
||||
registered_skills = [s for s in raw_skills if isinstance(s, str)]
|
||||
else:
|
||||
registered_skills = []
|
||||
|
||||
extension_dir = self.extensions_dir / extension_id
|
||||
|
||||
@@ -701,6 +1155,9 @@ class ExtensionManager:
|
||||
registrar = CommandRegistrar()
|
||||
registrar.unregister_commands(registered_commands, self.project_root)
|
||||
|
||||
# Unregister agent skills
|
||||
self._unregister_extension_skills(registered_skills, extension_id)
|
||||
|
||||
if keep_config:
|
||||
# Preserve config files, only remove non-config files
|
||||
if extension_dir.exists():
|
||||
@@ -1644,6 +2101,52 @@ class HookExecutor:
|
||||
self.project_root = project_root
|
||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||
self.config_file = project_root / ".specify" / "extensions.yml"
|
||||
self._init_options_cache: Optional[Dict[str, Any]] = None
|
||||
|
||||
def _load_init_options(self) -> Dict[str, Any]:
|
||||
"""Load persisted init options used to determine invocation style.
|
||||
|
||||
Uses the shared helper from specify_cli and caches values per executor
|
||||
instance to avoid repeated filesystem reads during hook rendering.
|
||||
"""
|
||||
if self._init_options_cache is None:
|
||||
from . import load_init_options
|
||||
|
||||
payload = load_init_options(self.project_root)
|
||||
self._init_options_cache = payload if isinstance(payload, dict) else {}
|
||||
return self._init_options_cache
|
||||
|
||||
@staticmethod
|
||||
def _skill_name_from_command(command: Any) -> str:
|
||||
"""Map a command id like speckit.plan to speckit-plan skill name."""
|
||||
if not isinstance(command, str):
|
||||
return ""
|
||||
command_id = command.strip()
|
||||
if not command_id.startswith("speckit."):
|
||||
return ""
|
||||
return f"speckit-{command_id[len('speckit.'):].replace('.', '-')}"
|
||||
|
||||
def _render_hook_invocation(self, command: Any) -> str:
|
||||
"""Render an agent-specific invocation string for a hook command."""
|
||||
if not isinstance(command, str):
|
||||
return ""
|
||||
|
||||
command_id = command.strip()
|
||||
if not command_id:
|
||||
return ""
|
||||
|
||||
init_options = self._load_init_options()
|
||||
selected_ai = init_options.get("ai")
|
||||
codex_skill_mode = selected_ai == "codex" and bool(init_options.get("ai_skills"))
|
||||
kimi_skill_mode = selected_ai == "kimi"
|
||||
|
||||
skill_name = self._skill_name_from_command(command_id)
|
||||
if codex_skill_mode and skill_name:
|
||||
return f"${skill_name}"
|
||||
if kimi_skill_mode and skill_name:
|
||||
return f"/skill:{skill_name}"
|
||||
|
||||
return f"/{command_id}"
|
||||
|
||||
def get_project_config(self) -> Dict[str, Any]:
|
||||
"""Load project-level extension configuration.
|
||||
@@ -1887,21 +2390,27 @@ class HookExecutor:
|
||||
for hook in hooks:
|
||||
extension = hook.get("extension")
|
||||
command = hook.get("command")
|
||||
invocation = self._render_hook_invocation(command)
|
||||
command_text = command if isinstance(command, str) and command.strip() else "<missing command>"
|
||||
display_invocation = invocation or (
|
||||
f"/{command_text}" if command_text != "<missing command>" else "/<missing command>"
|
||||
)
|
||||
optional = hook.get("optional", True)
|
||||
prompt = hook.get("prompt", "")
|
||||
description = hook.get("description", "")
|
||||
|
||||
if optional:
|
||||
lines.append(f"\n**Optional Hook**: {extension}")
|
||||
lines.append(f"Command: `/{command}`")
|
||||
lines.append(f"Command: `{display_invocation}`")
|
||||
if description:
|
||||
lines.append(f"Description: {description}")
|
||||
lines.append(f"\nPrompt: {prompt}")
|
||||
lines.append(f"To execute: `/{command}`")
|
||||
lines.append(f"To execute: `{display_invocation}`")
|
||||
else:
|
||||
lines.append(f"\n**Automatic Hook**: {extension}")
|
||||
lines.append(f"Executing: `/{command}`")
|
||||
lines.append(f"EXECUTE_COMMAND: {command}")
|
||||
lines.append(f"Executing: `{display_invocation}`")
|
||||
lines.append(f"EXECUTE_COMMAND: {command_text}")
|
||||
lines.append(f"EXECUTE_COMMAND_INVOCATION: {display_invocation}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@@ -1965,6 +2474,7 @@ class HookExecutor:
|
||||
"""
|
||||
return {
|
||||
"command": hook.get("command"),
|
||||
"invocation": self._render_hook_invocation(hook.get("command")),
|
||||
"extension": hook.get("extension"),
|
||||
"optional": hook.get("optional", True),
|
||||
"description": hook.get("description", ""),
|
||||
@@ -2008,4 +2518,3 @@ class HookExecutor:
|
||||
hook["enabled"] = False
|
||||
|
||||
self.save_project_config(config)
|
||||
|
||||
|
||||
46
src/specify_cli/integrations/__init__.py
Normal file
46
src/specify_cli/integrations/__init__.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""Integration registry for AI coding assistants.
|
||||
|
||||
Each integration is a self-contained subpackage that handles setup/teardown
|
||||
for a specific AI assistant (Copilot, Claude, Gemini, etc.).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .base import IntegrationBase
|
||||
|
||||
# Maps integration key → IntegrationBase instance.
|
||||
# Populated by later stages as integrations are migrated.
|
||||
INTEGRATION_REGISTRY: dict[str, IntegrationBase] = {}
|
||||
|
||||
|
||||
def _register(integration: IntegrationBase) -> None:
|
||||
"""Register an integration instance in the global registry.
|
||||
|
||||
Raises ``ValueError`` for falsy keys and ``KeyError`` for duplicates.
|
||||
"""
|
||||
key = integration.key
|
||||
if not key:
|
||||
raise ValueError("Cannot register integration with an empty key.")
|
||||
if key in INTEGRATION_REGISTRY:
|
||||
raise KeyError(f"Integration with key {key!r} is already registered.")
|
||||
INTEGRATION_REGISTRY[key] = integration
|
||||
|
||||
|
||||
def get_integration(key: str) -> IntegrationBase | None:
|
||||
"""Return the integration for *key*, or ``None`` if not registered."""
|
||||
return INTEGRATION_REGISTRY.get(key)
|
||||
|
||||
|
||||
# -- Register built-in integrations --------------------------------------
|
||||
|
||||
def _register_builtins() -> None:
|
||||
"""Register all built-in integrations."""
|
||||
from .copilot import CopilotIntegration
|
||||
|
||||
_register(CopilotIntegration())
|
||||
|
||||
|
||||
_register_builtins()
|
||||
415
src/specify_cli/integrations/base.py
Normal file
415
src/specify_cli/integrations/base.py
Normal file
@@ -0,0 +1,415 @@
|
||||
"""Base classes for AI-assistant integrations.
|
||||
|
||||
Provides:
|
||||
- ``IntegrationOption`` — declares a CLI option an integration accepts.
|
||||
- ``IntegrationBase`` — abstract base every integration must implement.
|
||||
- ``MarkdownIntegration`` — concrete base for standard Markdown-format
|
||||
integrations (the common case — subclass, set three class attrs, done).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import shutil
|
||||
from abc import ABC
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manifest import IntegrationManifest
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# IntegrationOption
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class IntegrationOption:
|
||||
"""Declares an option that an integration accepts via ``--integration-options``.
|
||||
|
||||
Attributes:
|
||||
name: The flag name (e.g. ``"--commands-dir"``).
|
||||
is_flag: ``True`` for boolean flags (``--skills``).
|
||||
required: ``True`` if the option must be supplied.
|
||||
default: Default value when not supplied (``None`` → no default).
|
||||
help: One-line description shown in ``specify integrate info``.
|
||||
"""
|
||||
|
||||
name: str
|
||||
is_flag: bool = False
|
||||
required: bool = False
|
||||
default: Any = None
|
||||
help: str = ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# IntegrationBase — abstract base class
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class IntegrationBase(ABC):
|
||||
"""Abstract base class every integration must implement.
|
||||
|
||||
Subclasses must set the following class-level attributes:
|
||||
|
||||
* ``key`` — unique identifier, matches actual CLI tool name
|
||||
* ``config`` — dict compatible with ``AGENT_CONFIG`` entries
|
||||
* ``registrar_config`` — dict compatible with ``CommandRegistrar.AGENT_CONFIGS``
|
||||
|
||||
And may optionally set:
|
||||
|
||||
* ``context_file`` — path (relative to project root) of the agent
|
||||
context/instructions file (e.g. ``"CLAUDE.md"``)
|
||||
"""
|
||||
|
||||
# -- Must be set by every subclass ------------------------------------
|
||||
|
||||
key: str = ""
|
||||
"""Unique integration key — should match the actual CLI tool name."""
|
||||
|
||||
config: dict[str, Any] | None = None
|
||||
"""Metadata dict matching the ``AGENT_CONFIG`` shape."""
|
||||
|
||||
registrar_config: dict[str, Any] | None = None
|
||||
"""Registration dict matching ``CommandRegistrar.AGENT_CONFIGS`` shape."""
|
||||
|
||||
# -- Optional ---------------------------------------------------------
|
||||
|
||||
context_file: str | None = None
|
||||
"""Relative path to the agent context file (e.g. ``CLAUDE.md``)."""
|
||||
|
||||
# -- Public API -------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def options(cls) -> list[IntegrationOption]:
|
||||
"""Return options this integration accepts. Default: none."""
|
||||
return []
|
||||
|
||||
# -- Primitives — building blocks for setup() -------------------------
|
||||
|
||||
def shared_commands_dir(self) -> Path | None:
|
||||
"""Return path to the shared command templates directory.
|
||||
|
||||
Checks ``core_pack/commands/`` (wheel install) first, then
|
||||
``templates/commands/`` (source checkout). Returns ``None``
|
||||
if neither exists.
|
||||
"""
|
||||
import inspect
|
||||
|
||||
pkg_dir = Path(inspect.getfile(IntegrationBase)).resolve().parent.parent
|
||||
for candidate in [
|
||||
pkg_dir / "core_pack" / "commands",
|
||||
pkg_dir.parent.parent / "templates" / "commands",
|
||||
]:
|
||||
if candidate.is_dir():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
def shared_templates_dir(self) -> Path | None:
|
||||
"""Return path to the shared page templates directory.
|
||||
|
||||
Contains ``vscode-settings.json``, ``spec-template.md``, etc.
|
||||
Checks ``core_pack/templates/`` then ``templates/``.
|
||||
"""
|
||||
import inspect
|
||||
|
||||
pkg_dir = Path(inspect.getfile(IntegrationBase)).resolve().parent.parent
|
||||
for candidate in [
|
||||
pkg_dir / "core_pack" / "templates",
|
||||
pkg_dir.parent.parent / "templates",
|
||||
]:
|
||||
if candidate.is_dir():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
def list_command_templates(self) -> list[Path]:
|
||||
"""Return sorted list of command template files from the shared directory."""
|
||||
cmd_dir = self.shared_commands_dir()
|
||||
if not cmd_dir or not cmd_dir.is_dir():
|
||||
return []
|
||||
return sorted(f for f in cmd_dir.iterdir() if f.is_file() and f.suffix == ".md")
|
||||
|
||||
def command_filename(self, template_name: str) -> str:
|
||||
"""Return the destination filename for a command template.
|
||||
|
||||
*template_name* is the stem of the source file (e.g. ``"plan"``).
|
||||
Default: ``speckit.{template_name}.md``. Subclasses override
|
||||
to change the extension or naming convention.
|
||||
"""
|
||||
return f"speckit.{template_name}.md"
|
||||
|
||||
def commands_dest(self, project_root: Path) -> Path:
|
||||
"""Return the absolute path to the commands output directory.
|
||||
|
||||
Derived from ``config["folder"]`` and ``config["commands_subdir"]``.
|
||||
Raises ``ValueError`` if ``config`` or ``folder`` is missing.
|
||||
"""
|
||||
if not self.config:
|
||||
raise ValueError(
|
||||
f"{type(self).__name__}.config is not set; integration "
|
||||
"subclasses must define a non-empty 'config' mapping."
|
||||
)
|
||||
folder = self.config.get("folder")
|
||||
if not folder:
|
||||
raise ValueError(
|
||||
f"{type(self).__name__}.config is missing required 'folder' entry."
|
||||
)
|
||||
subdir = self.config.get("commands_subdir", "commands")
|
||||
return project_root / folder / subdir
|
||||
|
||||
# -- File operations — granular primitives for setup() ----------------
|
||||
|
||||
@staticmethod
|
||||
def copy_command_to_directory(
|
||||
src: Path,
|
||||
dest_dir: Path,
|
||||
filename: str,
|
||||
) -> Path:
|
||||
"""Copy a command template to *dest_dir* with the given *filename*.
|
||||
|
||||
Creates *dest_dir* if needed. Returns the absolute path of the
|
||||
written file. The caller can post-process the file before
|
||||
recording it in the manifest.
|
||||
"""
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
dst = dest_dir / filename
|
||||
shutil.copy2(src, dst)
|
||||
return dst
|
||||
|
||||
@staticmethod
|
||||
def record_file_in_manifest(
|
||||
file_path: Path,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
) -> None:
|
||||
"""Hash *file_path* and record it in *manifest*.
|
||||
|
||||
*file_path* must be inside *project_root*.
|
||||
"""
|
||||
rel = file_path.resolve().relative_to(project_root.resolve())
|
||||
manifest.record_existing(rel)
|
||||
|
||||
@staticmethod
|
||||
def write_file_and_record(
|
||||
content: str,
|
||||
dest: Path,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
) -> Path:
|
||||
"""Write *content* to *dest*, hash it, and record in *manifest*.
|
||||
|
||||
Creates parent directories as needed. Returns *dest*.
|
||||
"""
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest.write_text(content, encoding="utf-8")
|
||||
rel = dest.resolve().relative_to(project_root.resolve())
|
||||
manifest.record_existing(rel)
|
||||
return dest
|
||||
|
||||
@staticmethod
|
||||
def process_template(
|
||||
content: str,
|
||||
agent_name: str,
|
||||
script_type: str,
|
||||
arg_placeholder: str = "$ARGUMENTS",
|
||||
) -> str:
|
||||
"""Process a raw command template into agent-ready content.
|
||||
|
||||
Performs the same transformations as the release script:
|
||||
1. Extract ``scripts.<script_type>`` value from YAML frontmatter
|
||||
2. Replace ``{SCRIPT}`` with the extracted script command
|
||||
3. Extract ``agent_scripts.<script_type>`` and replace ``{AGENT_SCRIPT}``
|
||||
4. Strip ``scripts:`` and ``agent_scripts:`` sections from frontmatter
|
||||
5. Replace ``{ARGS}`` with *arg_placeholder*
|
||||
6. Replace ``__AGENT__`` with *agent_name*
|
||||
7. Rewrite paths: ``scripts/`` → ``.specify/scripts/`` etc.
|
||||
"""
|
||||
# 1. Extract script command from frontmatter
|
||||
script_command = ""
|
||||
script_pattern = re.compile(
|
||||
rf"^\s*{re.escape(script_type)}:\s*(.+)$", re.MULTILINE
|
||||
)
|
||||
# Find the scripts: block
|
||||
in_scripts = False
|
||||
for line in content.splitlines():
|
||||
if line.strip() == "scripts:":
|
||||
in_scripts = True
|
||||
continue
|
||||
if in_scripts and line and not line[0].isspace():
|
||||
in_scripts = False
|
||||
if in_scripts:
|
||||
m = script_pattern.match(line)
|
||||
if m:
|
||||
script_command = m.group(1).strip()
|
||||
break
|
||||
|
||||
# 2. Replace {SCRIPT}
|
||||
if script_command:
|
||||
content = content.replace("{SCRIPT}", script_command)
|
||||
|
||||
# 3. Extract agent_script command
|
||||
agent_script_command = ""
|
||||
in_agent_scripts = False
|
||||
for line in content.splitlines():
|
||||
if line.strip() == "agent_scripts:":
|
||||
in_agent_scripts = True
|
||||
continue
|
||||
if in_agent_scripts and line and not line[0].isspace():
|
||||
in_agent_scripts = False
|
||||
if in_agent_scripts:
|
||||
m = script_pattern.match(line)
|
||||
if m:
|
||||
agent_script_command = m.group(1).strip()
|
||||
break
|
||||
|
||||
if agent_script_command:
|
||||
content = content.replace("{AGENT_SCRIPT}", agent_script_command)
|
||||
|
||||
# 4. Strip scripts: and agent_scripts: sections from frontmatter
|
||||
lines = content.splitlines(keepends=True)
|
||||
output_lines: list[str] = []
|
||||
in_frontmatter = False
|
||||
skip_section = False
|
||||
dash_count = 0
|
||||
for line in lines:
|
||||
stripped = line.rstrip("\n\r")
|
||||
if stripped == "---":
|
||||
dash_count += 1
|
||||
if dash_count == 1:
|
||||
in_frontmatter = True
|
||||
else:
|
||||
in_frontmatter = False
|
||||
skip_section = False
|
||||
output_lines.append(line)
|
||||
continue
|
||||
if in_frontmatter:
|
||||
if stripped in ("scripts:", "agent_scripts:"):
|
||||
skip_section = True
|
||||
continue
|
||||
if skip_section:
|
||||
if line[0:1].isspace():
|
||||
continue # skip indented content under scripts/agent_scripts
|
||||
skip_section = False
|
||||
output_lines.append(line)
|
||||
content = "".join(output_lines)
|
||||
|
||||
# 5. Replace {ARGS}
|
||||
content = content.replace("{ARGS}", arg_placeholder)
|
||||
|
||||
# 6. Replace __AGENT__
|
||||
content = content.replace("__AGENT__", agent_name)
|
||||
|
||||
# 7. Rewrite paths (matches release script's rewrite_paths())
|
||||
content = re.sub(r"(/?)memory/", r".specify/memory/", content)
|
||||
content = re.sub(r"(/?)scripts/", r".specify/scripts/", content)
|
||||
content = re.sub(r"(/?)templates/", r".specify/templates/", content)
|
||||
# Fix double-prefix (same as release script's .specify.specify/ fix)
|
||||
content = content.replace(".specify.specify/", ".specify/")
|
||||
content = content.replace(".specify/.specify/", ".specify/")
|
||||
|
||||
return content
|
||||
|
||||
def setup(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""Install integration command files into *project_root*.
|
||||
|
||||
Returns the list of files created. Copies raw templates without
|
||||
processing. Integrations that need placeholder replacement
|
||||
(e.g. ``{SCRIPT}``, ``__AGENT__``) should override ``setup()``
|
||||
and call ``process_template()`` in their own loop — see
|
||||
``CopilotIntegration`` for an example.
|
||||
"""
|
||||
templates = self.list_command_templates()
|
||||
if not templates:
|
||||
return []
|
||||
|
||||
project_root_resolved = project_root.resolve()
|
||||
if manifest.project_root != project_root_resolved:
|
||||
raise ValueError(
|
||||
f"manifest.project_root ({manifest.project_root}) does not match "
|
||||
f"project_root ({project_root_resolved})"
|
||||
)
|
||||
|
||||
dest = self.commands_dest(project_root).resolve()
|
||||
try:
|
||||
dest.relative_to(project_root_resolved)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f"Integration destination {dest} escapes "
|
||||
f"project root {project_root_resolved}"
|
||||
) from exc
|
||||
|
||||
created: list[Path] = []
|
||||
|
||||
for src_file in templates:
|
||||
dst_name = self.command_filename(src_file.stem)
|
||||
dst_file = self.copy_command_to_directory(src_file, dest, dst_name)
|
||||
self.record_file_in_manifest(dst_file, project_root, manifest)
|
||||
created.append(dst_file)
|
||||
|
||||
return created
|
||||
|
||||
def teardown(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""Uninstall integration files from *project_root*.
|
||||
|
||||
Delegates to ``manifest.uninstall()`` which only removes files
|
||||
whose hash still matches the recorded value (unless *force*).
|
||||
|
||||
Returns ``(removed, skipped)`` file lists.
|
||||
"""
|
||||
return manifest.uninstall(project_root, force=force)
|
||||
|
||||
# -- Convenience helpers for subclasses -------------------------------
|
||||
|
||||
def install(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""High-level install — calls ``setup()`` and returns created files."""
|
||||
return self.setup(
|
||||
project_root, manifest, parsed_options=parsed_options, **opts
|
||||
)
|
||||
|
||||
def uninstall(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""High-level uninstall — calls ``teardown()``."""
|
||||
return self.teardown(project_root, manifest, force=force)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MarkdownIntegration — covers ~20 standard agents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class MarkdownIntegration(IntegrationBase):
|
||||
"""Concrete base for integrations that use standard Markdown commands.
|
||||
|
||||
Subclasses only need to set ``key``, ``config``, ``registrar_config``
|
||||
(and optionally ``context_file``). Everything else is inherited.
|
||||
|
||||
The default ``setup()`` from ``IntegrationBase`` copies templates
|
||||
into the agent's commands directory — which is correct for the
|
||||
standard Markdown case.
|
||||
"""
|
||||
|
||||
# MarkdownIntegration inherits IntegrationBase.setup() as-is.
|
||||
# Future stages may add markdown-specific path rewriting here.
|
||||
pass
|
||||
197
src/specify_cli/integrations/copilot/__init__.py
Normal file
197
src/specify_cli/integrations/copilot/__init__.py
Normal file
@@ -0,0 +1,197 @@
|
||||
"""Copilot integration — GitHub Copilot in VS Code.
|
||||
|
||||
Copilot has several unique behaviors compared to standard markdown agents:
|
||||
- Commands use ``.agent.md`` extension (not ``.md``)
|
||||
- Each command gets a companion ``.prompt.md`` file in ``.github/prompts/``
|
||||
- Installs ``.vscode/settings.json`` with prompt file recommendations
|
||||
- Context file lives at ``.github/copilot-instructions.md``
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from ..base import IntegrationBase
|
||||
from ..manifest import IntegrationManifest
|
||||
|
||||
|
||||
class CopilotIntegration(IntegrationBase):
|
||||
"""Integration for GitHub Copilot in VS Code."""
|
||||
|
||||
key = "copilot"
|
||||
config = {
|
||||
"name": "GitHub Copilot",
|
||||
"folder": ".github/",
|
||||
"commands_subdir": "agents",
|
||||
"install_url": None,
|
||||
"requires_cli": False,
|
||||
}
|
||||
registrar_config = {
|
||||
"dir": ".github/agents",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".agent.md",
|
||||
}
|
||||
context_file = ".github/copilot-instructions.md"
|
||||
|
||||
def command_filename(self, template_name: str) -> str:
|
||||
"""Copilot commands use ``.agent.md`` extension."""
|
||||
return f"speckit.{template_name}.agent.md"
|
||||
|
||||
def setup(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""Install copilot commands, companion prompts, and VS Code settings.
|
||||
|
||||
Uses base class primitives to: read templates, process them
|
||||
(replace placeholders, strip script blocks, rewrite paths),
|
||||
write as ``.agent.md``, then add companion prompts and VS Code settings.
|
||||
"""
|
||||
project_root_resolved = project_root.resolve()
|
||||
if manifest.project_root != project_root_resolved:
|
||||
raise ValueError(
|
||||
f"manifest.project_root ({manifest.project_root}) does not match "
|
||||
f"project_root ({project_root_resolved})"
|
||||
)
|
||||
|
||||
templates = self.list_command_templates()
|
||||
if not templates:
|
||||
return []
|
||||
|
||||
dest = self.commands_dest(project_root)
|
||||
dest_resolved = dest.resolve()
|
||||
try:
|
||||
dest_resolved.relative_to(project_root_resolved)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f"Integration destination {dest_resolved} escapes "
|
||||
f"project root {project_root_resolved}"
|
||||
) from exc
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
created: list[Path] = []
|
||||
|
||||
script_type = opts.get("script_type", "sh")
|
||||
arg_placeholder = self.registrar_config.get("args", "$ARGUMENTS")
|
||||
|
||||
# 1. Process and write command files as .agent.md
|
||||
for src_file in templates:
|
||||
raw = src_file.read_text(encoding="utf-8")
|
||||
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
|
||||
dst_name = self.command_filename(src_file.stem)
|
||||
dst_file = self.write_file_and_record(
|
||||
processed, dest / dst_name, project_root, manifest
|
||||
)
|
||||
created.append(dst_file)
|
||||
|
||||
# 2. Generate companion .prompt.md files from the templates we just wrote
|
||||
prompts_dir = project_root / ".github" / "prompts"
|
||||
for src_file in templates:
|
||||
cmd_name = f"speckit.{src_file.stem}"
|
||||
prompt_content = f"---\nagent: {cmd_name}\n---\n"
|
||||
prompt_file = self.write_file_and_record(
|
||||
prompt_content,
|
||||
prompts_dir / f"{cmd_name}.prompt.md",
|
||||
project_root,
|
||||
manifest,
|
||||
)
|
||||
created.append(prompt_file)
|
||||
|
||||
# Write .vscode/settings.json
|
||||
settings_src = self._vscode_settings_path()
|
||||
if settings_src and settings_src.is_file():
|
||||
dst_settings = project_root / ".vscode" / "settings.json"
|
||||
dst_settings.parent.mkdir(parents=True, exist_ok=True)
|
||||
if dst_settings.exists():
|
||||
# Merge into existing — don't track since we can't safely
|
||||
# remove the user's settings file on uninstall.
|
||||
self._merge_vscode_settings(settings_src, dst_settings)
|
||||
else:
|
||||
shutil.copy2(settings_src, dst_settings)
|
||||
self.record_file_in_manifest(dst_settings, project_root, manifest)
|
||||
created.append(dst_settings)
|
||||
|
||||
# 4. Install integration-specific update-context scripts
|
||||
scripts_src = Path(__file__).resolve().parent / "scripts"
|
||||
if scripts_src.is_dir():
|
||||
scripts_dest = project_root / ".specify" / "integrations" / "copilot" / "scripts"
|
||||
scripts_dest.mkdir(parents=True, exist_ok=True)
|
||||
for src_script in sorted(scripts_src.iterdir()):
|
||||
if src_script.is_file():
|
||||
dst_script = scripts_dest / src_script.name
|
||||
shutil.copy2(src_script, dst_script)
|
||||
# Make shell scripts executable
|
||||
if dst_script.suffix == ".sh":
|
||||
dst_script.chmod(dst_script.stat().st_mode | 0o111)
|
||||
self.record_file_in_manifest(dst_script, project_root, manifest)
|
||||
created.append(dst_script)
|
||||
|
||||
return created
|
||||
|
||||
def _vscode_settings_path(self) -> Path | None:
|
||||
"""Return path to the bundled vscode-settings.json template."""
|
||||
tpl_dir = self.shared_templates_dir()
|
||||
if tpl_dir:
|
||||
candidate = tpl_dir / "vscode-settings.json"
|
||||
if candidate.is_file():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _merge_vscode_settings(src: Path, dst: Path) -> None:
|
||||
"""Merge settings from *src* into existing *dst* JSON file.
|
||||
|
||||
Top-level keys from *src* are added only if missing in *dst*.
|
||||
For dict-valued keys, sub-keys are merged the same way.
|
||||
|
||||
If *dst* cannot be parsed (e.g. JSONC with comments), the merge
|
||||
is skipped to avoid overwriting user settings.
|
||||
"""
|
||||
try:
|
||||
existing = json.loads(dst.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
# Cannot parse existing file (likely JSONC with comments).
|
||||
# Skip merge to preserve the user's settings, but show
|
||||
# what they should add manually.
|
||||
import logging
|
||||
template_content = src.read_text(encoding="utf-8")
|
||||
logging.getLogger(__name__).warning(
|
||||
"Could not parse %s (may contain JSONC comments). "
|
||||
"Skipping settings merge to preserve existing file.\n"
|
||||
"Please add the following settings manually:\n%s",
|
||||
dst, template_content,
|
||||
)
|
||||
return
|
||||
|
||||
new_settings = json.loads(src.read_text(encoding="utf-8"))
|
||||
|
||||
if not isinstance(existing, dict) or not isinstance(new_settings, dict):
|
||||
import logging
|
||||
logging.getLogger(__name__).warning(
|
||||
"Skipping settings merge: %s or template is not a JSON object.", dst
|
||||
)
|
||||
return
|
||||
|
||||
changed = False
|
||||
for key, value in new_settings.items():
|
||||
if key not in existing:
|
||||
existing[key] = value
|
||||
changed = True
|
||||
elif isinstance(existing[key], dict) and isinstance(value, dict):
|
||||
for sub_key, sub_value in value.items():
|
||||
if sub_key not in existing[key]:
|
||||
existing[key][sub_key] = sub_value
|
||||
changed = True
|
||||
|
||||
if not changed:
|
||||
return
|
||||
|
||||
dst.write_text(
|
||||
json.dumps(existing, indent=4) + "\n", encoding="utf-8"
|
||||
)
|
||||
@@ -0,0 +1,22 @@
|
||||
# update-context.ps1 — Copilot integration: create/update .github/copilot-instructions.md
|
||||
#
|
||||
# This is the copilot-specific implementation that produces the GitHub
|
||||
# Copilot instructions file. The shared dispatcher reads
|
||||
# .specify/integration.json and calls this script.
|
||||
#
|
||||
# NOTE: This script is not yet active. It will be activated in Stage 7
|
||||
# when the shared update-agent-context.ps1 replaces its switch statement
|
||||
# with integration.json-based dispatch. The shared script must also be
|
||||
# refactored to support SPECKIT_SOURCE_ONLY (guard the Main call) before
|
||||
# dot-sourcing will work.
|
||||
#
|
||||
# Until then, this delegates to the shared script as a subprocess.
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$repoRoot = git rev-parse --show-toplevel 2>$null
|
||||
if (-not $repoRoot) { $repoRoot = $PWD.Path }
|
||||
|
||||
# Invoke shared update-agent-context script as a separate process.
|
||||
# Dot-sourcing is unsafe until that script guards its Main call.
|
||||
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType copilot
|
||||
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
# update-context.sh — Copilot integration: create/update .github/copilot-instructions.md
|
||||
#
|
||||
# This is the copilot-specific implementation that produces the GitHub
|
||||
# Copilot instructions file. The shared dispatcher reads
|
||||
# .specify/integration.json and calls this script.
|
||||
#
|
||||
# NOTE: This script is not yet active. It will be activated in Stage 7
|
||||
# when the shared update-agent-context.sh replaces its case statement
|
||||
# with integration.json-based dispatch. The shared script must also be
|
||||
# refactored to support SPECKIT_SOURCE_ONLY (guard the main logic)
|
||||
# before sourcing will work.
|
||||
#
|
||||
# Until then, this delegates to the shared script as a subprocess.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel 2>/dev/null || pwd)}"
|
||||
|
||||
# Invoke shared update-agent-context script as a separate process.
|
||||
# Sourcing is unsafe until that script guards its main logic.
|
||||
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" copilot
|
||||
265
src/specify_cli/integrations/manifest.py
Normal file
265
src/specify_cli/integrations/manifest.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""Hash-tracked installation manifest for integrations.
|
||||
|
||||
Each installed integration records the files it created together with
|
||||
their SHA-256 hashes. On uninstall only files whose hash still matches
|
||||
the recorded value are removed — modified files are left in place and
|
||||
reported to the caller.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _sha256(path: Path) -> str:
|
||||
"""Return the hex SHA-256 digest of *path*."""
|
||||
h = hashlib.sha256()
|
||||
with open(path, "rb") as fh:
|
||||
for chunk in iter(lambda: fh.read(8192), b""):
|
||||
h.update(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def _validate_rel_path(rel: Path, root: Path) -> Path:
|
||||
"""Resolve *rel* against *root* and verify it stays within *root*.
|
||||
|
||||
Raises ``ValueError`` if *rel* is absolute, contains ``..`` segments
|
||||
that escape *root*, or otherwise resolves outside the project root.
|
||||
"""
|
||||
if rel.is_absolute():
|
||||
raise ValueError(
|
||||
f"Absolute paths are not allowed in manifests: {rel}"
|
||||
)
|
||||
resolved = (root / rel).resolve()
|
||||
root_resolved = root.resolve()
|
||||
try:
|
||||
resolved.relative_to(root_resolved)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Path {rel} resolves to {resolved} which is outside "
|
||||
f"the project root {root_resolved}"
|
||||
) from None
|
||||
return resolved
|
||||
|
||||
|
||||
class IntegrationManifest:
|
||||
"""Tracks files installed by a single integration.
|
||||
|
||||
Parameters:
|
||||
key: Integration identifier (e.g. ``"copilot"``).
|
||||
project_root: Absolute path to the project directory.
|
||||
version: CLI version string recorded in the manifest.
|
||||
"""
|
||||
|
||||
def __init__(self, key: str, project_root: Path, version: str = "") -> None:
|
||||
self.key = key
|
||||
self.project_root = project_root.resolve()
|
||||
self.version = version
|
||||
self._files: dict[str, str] = {} # rel_path → sha256 hex
|
||||
self._installed_at: str = ""
|
||||
|
||||
# -- Manifest file location -------------------------------------------
|
||||
|
||||
@property
|
||||
def manifest_path(self) -> Path:
|
||||
"""Path to the on-disk manifest JSON."""
|
||||
return self.project_root / ".specify" / "integrations" / f"{self.key}.manifest.json"
|
||||
|
||||
# -- Recording files --------------------------------------------------
|
||||
|
||||
def record_file(self, rel_path: str | Path, content: bytes | str) -> Path:
|
||||
"""Write *content* to *rel_path* (relative to project root) and record its hash.
|
||||
|
||||
Creates parent directories as needed. Returns the absolute path
|
||||
of the written file.
|
||||
|
||||
Raises ``ValueError`` if *rel_path* resolves outside the project root.
|
||||
"""
|
||||
rel = Path(rel_path)
|
||||
abs_path = _validate_rel_path(rel, self.project_root)
|
||||
abs_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
abs_path.write_bytes(content)
|
||||
|
||||
normalized = abs_path.relative_to(self.project_root).as_posix()
|
||||
self._files[normalized] = hashlib.sha256(content).hexdigest()
|
||||
return abs_path
|
||||
|
||||
def record_existing(self, rel_path: str | Path) -> None:
|
||||
"""Record the hash of an already-existing file at *rel_path*.
|
||||
|
||||
Raises ``ValueError`` if *rel_path* resolves outside the project root.
|
||||
"""
|
||||
rel = Path(rel_path)
|
||||
abs_path = _validate_rel_path(rel, self.project_root)
|
||||
normalized = abs_path.relative_to(self.project_root).as_posix()
|
||||
self._files[normalized] = _sha256(abs_path)
|
||||
|
||||
# -- Querying ---------------------------------------------------------
|
||||
|
||||
@property
|
||||
def files(self) -> dict[str, str]:
|
||||
"""Return a copy of the ``{rel_path: sha256}`` mapping."""
|
||||
return dict(self._files)
|
||||
|
||||
def check_modified(self) -> list[str]:
|
||||
"""Return relative paths of tracked files whose content changed on disk."""
|
||||
modified: list[str] = []
|
||||
for rel, expected_hash in self._files.items():
|
||||
rel_path = Path(rel)
|
||||
# Skip paths that are absolute or attempt to escape the project root
|
||||
if rel_path.is_absolute() or ".." in rel_path.parts:
|
||||
continue
|
||||
abs_path = self.project_root / rel_path
|
||||
if not abs_path.exists() and not abs_path.is_symlink():
|
||||
continue
|
||||
# Treat symlinks and non-regular-files as modified
|
||||
if abs_path.is_symlink() or not abs_path.is_file():
|
||||
modified.append(rel)
|
||||
continue
|
||||
if _sha256(abs_path) != expected_hash:
|
||||
modified.append(rel)
|
||||
return modified
|
||||
|
||||
# -- Uninstall --------------------------------------------------------
|
||||
|
||||
def uninstall(
|
||||
self,
|
||||
project_root: Path | None = None,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""Remove tracked files whose hash still matches.
|
||||
|
||||
Parameters:
|
||||
project_root: Override for the project root.
|
||||
force: If ``True``, remove files even if modified.
|
||||
|
||||
Returns:
|
||||
``(removed, skipped)`` — absolute paths.
|
||||
"""
|
||||
root = (project_root or self.project_root).resolve()
|
||||
removed: list[Path] = []
|
||||
skipped: list[Path] = []
|
||||
|
||||
for rel, expected_hash in self._files.items():
|
||||
# Use non-resolved path for deletion so symlinks themselves
|
||||
# are removed, not their targets.
|
||||
path = root / rel
|
||||
# Validate containment lexically (without following symlinks)
|
||||
# by collapsing .. segments via Path resolution on the string parts.
|
||||
try:
|
||||
normed = Path(os.path.normpath(path))
|
||||
normed.relative_to(root)
|
||||
except (ValueError, OSError):
|
||||
continue
|
||||
if not path.exists() and not path.is_symlink():
|
||||
continue
|
||||
# Skip directories — manifest only tracks files
|
||||
if not path.is_file() and not path.is_symlink():
|
||||
skipped.append(path)
|
||||
continue
|
||||
# Never follow symlinks when comparing hashes. Only remove
|
||||
# symlinks when forced, to avoid acting on tampered entries.
|
||||
if path.is_symlink():
|
||||
if not force:
|
||||
skipped.append(path)
|
||||
continue
|
||||
else:
|
||||
if not force and _sha256(path) != expected_hash:
|
||||
skipped.append(path)
|
||||
continue
|
||||
try:
|
||||
path.unlink()
|
||||
except OSError:
|
||||
skipped.append(path)
|
||||
continue
|
||||
removed.append(path)
|
||||
# Clean up empty parent directories up to project root
|
||||
parent = path.parent
|
||||
while parent != root:
|
||||
try:
|
||||
parent.rmdir() # only succeeds if empty
|
||||
except OSError:
|
||||
break
|
||||
parent = parent.parent
|
||||
|
||||
# Remove the manifest file itself
|
||||
manifest = root / ".specify" / "integrations" / f"{self.key}.manifest.json"
|
||||
if manifest.exists():
|
||||
manifest.unlink()
|
||||
parent = manifest.parent
|
||||
while parent != root:
|
||||
try:
|
||||
parent.rmdir()
|
||||
except OSError:
|
||||
break
|
||||
parent = parent.parent
|
||||
|
||||
return removed, skipped
|
||||
|
||||
# -- Persistence ------------------------------------------------------
|
||||
|
||||
def save(self) -> Path:
|
||||
"""Write the manifest to disk. Returns the manifest path."""
|
||||
self._installed_at = self._installed_at or datetime.now(timezone.utc).isoformat()
|
||||
data: dict[str, Any] = {
|
||||
"integration": self.key,
|
||||
"version": self.version,
|
||||
"installed_at": self._installed_at,
|
||||
"files": self._files,
|
||||
}
|
||||
path = self.manifest_path
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
|
||||
return path
|
||||
|
||||
@classmethod
|
||||
def load(cls, key: str, project_root: Path) -> IntegrationManifest:
|
||||
"""Load an existing manifest from disk.
|
||||
|
||||
Raises ``FileNotFoundError`` if the manifest does not exist.
|
||||
"""
|
||||
inst = cls(key, project_root)
|
||||
path = inst.manifest_path
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ValueError(
|
||||
f"Integration manifest at {path} contains invalid JSON"
|
||||
) from exc
|
||||
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(
|
||||
f"Integration manifest at {path} must be a JSON object, "
|
||||
f"got {type(data).__name__}"
|
||||
)
|
||||
|
||||
files = data.get("files", {})
|
||||
if not isinstance(files, dict) or not all(
|
||||
isinstance(k, str) and isinstance(v, str) for k, v in files.items()
|
||||
):
|
||||
raise ValueError(
|
||||
f"Integration manifest 'files' at {path} must be a "
|
||||
"mapping of string paths to string hashes"
|
||||
)
|
||||
|
||||
inst.version = data.get("version", "")
|
||||
inst._installed_at = data.get("installed_at", "")
|
||||
inst._files = files
|
||||
|
||||
stored_key = data.get("integration", "")
|
||||
if stored_key and stored_key != key:
|
||||
raise ValueError(
|
||||
f"Manifest at {path} belongs to integration {stored_key!r}, "
|
||||
f"not {key!r}"
|
||||
)
|
||||
|
||||
return inst
|
||||
@@ -556,24 +556,31 @@ class PresetManager:
|
||||
registrar.unregister_commands(registered_commands, self.project_root)
|
||||
|
||||
def _get_skills_dir(self) -> Optional[Path]:
|
||||
"""Return the skills directory if ``--ai-skills`` was used during init.
|
||||
"""Return the active skills directory for preset skill overrides.
|
||||
|
||||
Reads ``.specify/init-options.json`` to determine whether skills
|
||||
are enabled and which agent was selected, then delegates to
|
||||
the module-level ``_get_skills_dir()`` helper for the concrete path.
|
||||
|
||||
Kimi is treated as a native-skills agent: if ``ai == "kimi"`` and
|
||||
``.kimi/skills`` exists, presets should still propagate command
|
||||
overrides to skills even when ``ai_skills`` is false.
|
||||
|
||||
Returns:
|
||||
The skills directory ``Path``, or ``None`` if skills were not
|
||||
enabled or the init-options file is missing.
|
||||
enabled and no native-skills fallback applies.
|
||||
"""
|
||||
from . import load_init_options, _get_skills_dir
|
||||
|
||||
opts = load_init_options(self.project_root)
|
||||
if not opts.get("ai_skills"):
|
||||
if not isinstance(opts, dict):
|
||||
opts = {}
|
||||
agent = opts.get("ai")
|
||||
if not isinstance(agent, str) or not agent:
|
||||
return None
|
||||
|
||||
agent = opts.get("ai")
|
||||
if not agent:
|
||||
ai_skills_enabled = bool(opts.get("ai_skills"))
|
||||
if not ai_skills_enabled and agent != "kimi":
|
||||
return None
|
||||
|
||||
skills_dir = _get_skills_dir(self.project_root, agent)
|
||||
@@ -582,6 +589,76 @@ class PresetManager:
|
||||
|
||||
return skills_dir
|
||||
|
||||
@staticmethod
|
||||
def _skill_names_for_command(cmd_name: str) -> tuple[str, str]:
|
||||
"""Return the modern and legacy skill directory names for a command."""
|
||||
raw_short_name = cmd_name
|
||||
if raw_short_name.startswith("speckit."):
|
||||
raw_short_name = raw_short_name[len("speckit."):]
|
||||
|
||||
modern_skill_name = f"speckit-{raw_short_name.replace('.', '-')}"
|
||||
legacy_skill_name = f"speckit.{raw_short_name}"
|
||||
return modern_skill_name, legacy_skill_name
|
||||
|
||||
@staticmethod
|
||||
def _skill_title_from_command(cmd_name: str) -> str:
|
||||
"""Return a human-friendly title for a skill command name."""
|
||||
title_name = cmd_name
|
||||
if title_name.startswith("speckit."):
|
||||
title_name = title_name[len("speckit."):]
|
||||
return title_name.replace(".", " ").replace("-", " ").title()
|
||||
|
||||
def _build_extension_skill_restore_index(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Index extension-backed skill restore data by skill directory name."""
|
||||
from .extensions import ExtensionManifest, ValidationError
|
||||
|
||||
resolver = PresetResolver(self.project_root)
|
||||
extensions_dir = self.project_root / ".specify" / "extensions"
|
||||
restore_index: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
for _priority, ext_id, _metadata in resolver._get_all_extensions_by_priority():
|
||||
ext_dir = extensions_dir / ext_id
|
||||
manifest_path = ext_dir / "extension.yml"
|
||||
if not manifest_path.is_file():
|
||||
continue
|
||||
|
||||
try:
|
||||
manifest = ExtensionManifest(manifest_path)
|
||||
except ValidationError:
|
||||
continue
|
||||
|
||||
ext_root = ext_dir.resolve()
|
||||
for cmd_info in manifest.commands:
|
||||
cmd_name = cmd_info.get("name")
|
||||
cmd_file_rel = cmd_info.get("file")
|
||||
if not isinstance(cmd_name, str) or not isinstance(cmd_file_rel, str):
|
||||
continue
|
||||
|
||||
cmd_path = Path(cmd_file_rel)
|
||||
if cmd_path.is_absolute():
|
||||
continue
|
||||
|
||||
try:
|
||||
source_file = (ext_root / cmd_path).resolve()
|
||||
source_file.relative_to(ext_root)
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
|
||||
if not source_file.is_file():
|
||||
continue
|
||||
|
||||
restore_info = {
|
||||
"command_name": cmd_name,
|
||||
"source_file": source_file,
|
||||
"source": f"extension:{manifest.id}",
|
||||
}
|
||||
modern_skill_name, legacy_skill_name = self._skill_names_for_command(cmd_name)
|
||||
restore_index.setdefault(modern_skill_name, restore_info)
|
||||
if legacy_skill_name != modern_skill_name:
|
||||
restore_index.setdefault(legacy_skill_name, restore_info)
|
||||
|
||||
return restore_index
|
||||
|
||||
def _register_skills(
|
||||
self,
|
||||
manifest: "PresetManifest",
|
||||
@@ -629,9 +706,15 @@ class PresetManager:
|
||||
return []
|
||||
|
||||
from . import SKILL_DESCRIPTIONS, load_init_options
|
||||
from .agents import CommandRegistrar
|
||||
|
||||
opts = load_init_options(self.project_root)
|
||||
selected_ai = opts.get("ai", "")
|
||||
init_opts = load_init_options(self.project_root)
|
||||
if not isinstance(init_opts, dict):
|
||||
init_opts = {}
|
||||
selected_ai = init_opts.get("ai")
|
||||
if not isinstance(selected_ai, str):
|
||||
return []
|
||||
registrar = CommandRegistrar()
|
||||
|
||||
written: List[str] = []
|
||||
|
||||
@@ -643,62 +726,61 @@ class PresetManager:
|
||||
continue
|
||||
|
||||
# Derive the short command name (e.g. "specify" from "speckit.specify")
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
if selected_ai == "kimi":
|
||||
skill_name = f"speckit.{short_name}"
|
||||
else:
|
||||
skill_name = f"speckit-{short_name}"
|
||||
raw_short_name = cmd_name
|
||||
if raw_short_name.startswith("speckit."):
|
||||
raw_short_name = raw_short_name[len("speckit."):]
|
||||
short_name = raw_short_name.replace(".", "-")
|
||||
skill_name, legacy_skill_name = self._skill_names_for_command(cmd_name)
|
||||
skill_title = self._skill_title_from_command(cmd_name)
|
||||
|
||||
# Only overwrite if the skill already exists (i.e. --ai-skills was used)
|
||||
skill_subdir = skills_dir / skill_name
|
||||
if not skill_subdir.exists():
|
||||
# Only overwrite skills that already exist under skills_dir,
|
||||
# including Kimi native skills when ai_skills is false.
|
||||
# If both modern and legacy directories exist, update both.
|
||||
target_skill_names: List[str] = []
|
||||
if (skills_dir / skill_name).is_dir():
|
||||
target_skill_names.append(skill_name)
|
||||
if legacy_skill_name != skill_name and (skills_dir / legacy_skill_name).is_dir():
|
||||
target_skill_names.append(legacy_skill_name)
|
||||
if not target_skill_names:
|
||||
continue
|
||||
|
||||
# Parse the command file
|
||||
content = source_file.read_text(encoding="utf-8")
|
||||
if content.startswith("---"):
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
frontmatter = yaml.safe_load(parts[1])
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
body = parts[2].strip()
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
|
||||
original_desc = frontmatter.get("description", "")
|
||||
enhanced_desc = SKILL_DESCRIPTIONS.get(
|
||||
short_name,
|
||||
original_desc or f"Spec-kit workflow command: {short_name}",
|
||||
)
|
||||
|
||||
frontmatter_data = {
|
||||
"name": skill_name,
|
||||
"description": enhanced_desc,
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": f"preset:{manifest.id}",
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# Speckit {short_name.title()} Skill\n\n"
|
||||
f"{body}\n"
|
||||
frontmatter = dict(frontmatter)
|
||||
frontmatter["description"] = enhanced_desc
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
skill_file = skill_subdir / "SKILL.md"
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
written.append(skill_name)
|
||||
for target_skill_name in target_skill_names:
|
||||
frontmatter_data = {
|
||||
"name": target_skill_name,
|
||||
"description": enhanced_desc,
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": f"preset:{manifest.id}",
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# Speckit {skill_title} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
|
||||
skill_file = skills_dir / target_skill_name / "SKILL.md"
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
written.append(target_skill_name)
|
||||
|
||||
return written
|
||||
|
||||
@@ -720,10 +802,17 @@ class PresetManager:
|
||||
if not skills_dir:
|
||||
return
|
||||
|
||||
from . import SKILL_DESCRIPTIONS
|
||||
from . import SKILL_DESCRIPTIONS, load_init_options
|
||||
from .agents import CommandRegistrar
|
||||
|
||||
# Locate core command templates from the project's installed templates
|
||||
core_templates_dir = self.project_root / ".specify" / "templates" / "commands"
|
||||
init_opts = load_init_options(self.project_root)
|
||||
if not isinstance(init_opts, dict):
|
||||
init_opts = {}
|
||||
selected_ai = init_opts.get("ai")
|
||||
registrar = CommandRegistrar()
|
||||
extension_restore_index = self._build_extension_skill_restore_index()
|
||||
|
||||
for skill_name in skill_names:
|
||||
# Derive command name from skill name (speckit-specify -> specify)
|
||||
@@ -735,7 +824,10 @@ class PresetManager:
|
||||
|
||||
skill_subdir = skills_dir / skill_name
|
||||
skill_file = skill_subdir / "SKILL.md"
|
||||
if not skill_file.exists():
|
||||
if not skill_subdir.is_dir():
|
||||
continue
|
||||
if not skill_file.is_file():
|
||||
# Only manage directories that contain the expected skill entrypoint.
|
||||
continue
|
||||
|
||||
# Try to find the core command template
|
||||
@@ -746,19 +838,11 @@ class PresetManager:
|
||||
if core_file:
|
||||
# Restore from core template
|
||||
content = core_file.read_text(encoding="utf-8")
|
||||
if content.startswith("---"):
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
frontmatter = yaml.safe_load(parts[1])
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
body = parts[2].strip()
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
if isinstance(selected_ai, str):
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
original_desc = frontmatter.get("description", "")
|
||||
enhanced_desc = SKILL_DESCRIPTIONS.get(
|
||||
@@ -776,16 +860,49 @@ class PresetManager:
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
skill_title = self._skill_title_from_command(short_name)
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# Speckit {short_name.title()} Skill\n\n"
|
||||
f"# Speckit {skill_title} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
continue
|
||||
|
||||
extension_restore = extension_restore_index.get(skill_name)
|
||||
if extension_restore:
|
||||
content = extension_restore["source_file"].read_text(encoding="utf-8")
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
if isinstance(selected_ai, str):
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
command_name = extension_restore["command_name"]
|
||||
title_name = self._skill_title_from_command(command_name)
|
||||
|
||||
frontmatter_data = {
|
||||
"name": skill_name,
|
||||
"description": frontmatter.get("description", f"Extension command: {command_name}"),
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": extension_restore["source"],
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# {title_name} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
else:
|
||||
# No core template — remove the skill entirely
|
||||
# No core or extension template — remove the skill entirely
|
||||
shutil.rmtree(skill_subdir)
|
||||
|
||||
def install_from_directory(
|
||||
@@ -915,17 +1032,26 @@ class PresetManager:
|
||||
if not self.registry.is_installed(pack_id):
|
||||
return False
|
||||
|
||||
# Unregister commands from AI agents
|
||||
metadata = self.registry.get(pack_id)
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
if registered_commands:
|
||||
self._unregister_commands(registered_commands)
|
||||
|
||||
# Restore original skills when preset is removed
|
||||
registered_skills = metadata.get("registered_skills", []) if metadata else []
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
pack_dir = self.presets_dir / pack_id
|
||||
if registered_skills:
|
||||
self._unregister_skills(registered_skills, pack_dir)
|
||||
try:
|
||||
from . import NATIVE_SKILLS_AGENTS
|
||||
except ImportError:
|
||||
NATIVE_SKILLS_AGENTS = set()
|
||||
registered_commands = {
|
||||
agent_name: cmd_names
|
||||
for agent_name, cmd_names in registered_commands.items()
|
||||
if agent_name not in NATIVE_SKILLS_AGENTS
|
||||
}
|
||||
|
||||
# Unregister non-skill command files from AI agents.
|
||||
if registered_commands:
|
||||
self._unregister_commands(registered_commands)
|
||||
|
||||
if pack_dir.exists():
|
||||
shutil.rmtree(pack_dir)
|
||||
|
||||
@@ -44,7 +44,7 @@ Load only the minimal necessary context from each artifact:
|
||||
|
||||
- Overview/Context
|
||||
- Functional Requirements
|
||||
- Non-Functional Requirements
|
||||
- Success Criteria (measurable outcomes — e.g., performance, security, availability, user success, business impact)
|
||||
- User Stories
|
||||
- Edge Cases (if present)
|
||||
|
||||
@@ -71,7 +71,7 @@ Load only the minimal necessary context from each artifact:
|
||||
|
||||
Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
|
||||
- **Requirements inventory**: For each Functional Requirement (FR-###) and Success Criterion (SC-###), record a stable key. Use the explicit FR-/SC- identifier as the primary key when present, and optionally also derive an imperative-phrase slug for readability (e.g., "User can upload file" → `user-can-upload-file`). Include only Success Criteria items that require buildable work (e.g., load-testing infrastructure, security audit tooling), and exclude post-launch outcome metrics and business KPIs (e.g., "Reduce support tickets by 50%").
|
||||
- **User story/action inventory**: Discrete user actions with acceptance criteria
|
||||
- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
|
||||
- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
|
||||
@@ -105,7 +105,7 @@ Focus on high-signal findings. Limit to 50 findings total; aggregate remainder i
|
||||
|
||||
- Requirements with zero associated tasks
|
||||
- Tasks with no mapped requirement/story
|
||||
- Non-functional requirements not reflected in tasks (e.g., performance, security)
|
||||
- Success Criteria requiring buildable work (performance, security, availability) not reflected in tasks
|
||||
|
||||
#### F. Inconsistency
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ Execution steps:
|
||||
- Functional ambiguity → Update or add a bullet in Functional Requirements.
|
||||
- User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
|
||||
- Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
|
||||
- Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
|
||||
- Non-functional constraint → Add/modify measurable criteria in Success Criteria > Measurable Outcomes (convert vague adjective to metric or explicit target).
|
||||
- Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
|
||||
- Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
|
||||
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
|
||||
|
||||
0
tests/integrations/__init__.py
Normal file
0
tests/integrations/__init__.py
Normal file
23
tests/integrations/conftest.py
Normal file
23
tests/integrations/conftest.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""Shared test helpers for integration tests."""
|
||||
|
||||
from specify_cli.integrations.base import MarkdownIntegration
|
||||
|
||||
|
||||
class StubIntegration(MarkdownIntegration):
|
||||
"""Minimal concrete integration for testing."""
|
||||
|
||||
key = "stub"
|
||||
config = {
|
||||
"name": "Stub Agent",
|
||||
"folder": ".stub/",
|
||||
"commands_subdir": "commands",
|
||||
"install_url": None,
|
||||
"requires_cli": False,
|
||||
}
|
||||
registrar_config = {
|
||||
"dir": ".stub/commands",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md",
|
||||
}
|
||||
context_file = "STUB.md"
|
||||
169
tests/integrations/test_base.py
Normal file
169
tests/integrations/test_base.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""Tests for IntegrationOption, IntegrationBase, MarkdownIntegration, and primitives."""
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations.base import (
|
||||
IntegrationBase,
|
||||
IntegrationOption,
|
||||
MarkdownIntegration,
|
||||
)
|
||||
from specify_cli.integrations.manifest import IntegrationManifest
|
||||
from .conftest import StubIntegration
|
||||
|
||||
|
||||
class TestIntegrationOption:
|
||||
def test_defaults(self):
|
||||
opt = IntegrationOption(name="--flag")
|
||||
assert opt.name == "--flag"
|
||||
assert opt.is_flag is False
|
||||
assert opt.required is False
|
||||
assert opt.default is None
|
||||
assert opt.help == ""
|
||||
|
||||
def test_flag_option(self):
|
||||
opt = IntegrationOption(name="--skills", is_flag=True, default=True, help="Enable skills")
|
||||
assert opt.is_flag is True
|
||||
assert opt.default is True
|
||||
assert opt.help == "Enable skills"
|
||||
|
||||
def test_required_option(self):
|
||||
opt = IntegrationOption(name="--commands-dir", required=True, help="Dir path")
|
||||
assert opt.required is True
|
||||
|
||||
def test_frozen(self):
|
||||
opt = IntegrationOption(name="--x")
|
||||
with pytest.raises(AttributeError):
|
||||
opt.name = "--y" # type: ignore[misc]
|
||||
|
||||
|
||||
class TestIntegrationBase:
|
||||
def test_key_and_config(self):
|
||||
i = StubIntegration()
|
||||
assert i.key == "stub"
|
||||
assert i.config["name"] == "Stub Agent"
|
||||
assert i.registrar_config["format"] == "markdown"
|
||||
assert i.context_file == "STUB.md"
|
||||
|
||||
def test_options_default_empty(self):
|
||||
assert StubIntegration.options() == []
|
||||
|
||||
def test_shared_commands_dir(self):
|
||||
i = StubIntegration()
|
||||
cmd_dir = i.shared_commands_dir()
|
||||
assert cmd_dir is not None
|
||||
assert cmd_dir.is_dir()
|
||||
|
||||
def test_setup_uses_shared_templates(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
created = i.setup(tmp_path, manifest)
|
||||
assert len(created) > 0
|
||||
for f in created:
|
||||
assert f.parent == tmp_path / ".stub" / "commands"
|
||||
assert f.name.startswith("speckit.")
|
||||
assert f.name.endswith(".md")
|
||||
|
||||
def test_setup_copies_templates(self, tmp_path, monkeypatch):
|
||||
tpl = tmp_path / "_templates"
|
||||
tpl.mkdir()
|
||||
(tpl / "plan.md").write_text("plan content", encoding="utf-8")
|
||||
(tpl / "specify.md").write_text("spec content", encoding="utf-8")
|
||||
|
||||
i = StubIntegration()
|
||||
monkeypatch.setattr(type(i), "list_command_templates", lambda self: sorted(tpl.glob("*.md")))
|
||||
|
||||
project = tmp_path / "project"
|
||||
project.mkdir()
|
||||
created = i.setup(project, IntegrationManifest("stub", project))
|
||||
assert len(created) == 2
|
||||
assert (project / ".stub" / "commands" / "speckit.plan.md").exists()
|
||||
assert (project / ".stub" / "commands" / "speckit.specify.md").exists()
|
||||
|
||||
def test_install_delegates_to_setup(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
result = i.install(tmp_path, manifest)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_uninstall_delegates_to_teardown(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
removed, skipped = i.uninstall(tmp_path, manifest)
|
||||
assert removed == []
|
||||
assert skipped == []
|
||||
|
||||
|
||||
class TestMarkdownIntegration:
|
||||
def test_is_subclass_of_base(self):
|
||||
assert issubclass(MarkdownIntegration, IntegrationBase)
|
||||
|
||||
def test_stub_is_markdown(self):
|
||||
assert isinstance(StubIntegration(), MarkdownIntegration)
|
||||
|
||||
|
||||
class TestBasePrimitives:
|
||||
def test_shared_commands_dir_returns_path(self):
|
||||
i = StubIntegration()
|
||||
cmd_dir = i.shared_commands_dir()
|
||||
assert cmd_dir is not None
|
||||
assert cmd_dir.is_dir()
|
||||
|
||||
def test_shared_templates_dir_returns_path(self):
|
||||
i = StubIntegration()
|
||||
tpl_dir = i.shared_templates_dir()
|
||||
assert tpl_dir is not None
|
||||
assert tpl_dir.is_dir()
|
||||
|
||||
def test_list_command_templates_returns_md_files(self):
|
||||
i = StubIntegration()
|
||||
templates = i.list_command_templates()
|
||||
assert len(templates) > 0
|
||||
assert all(t.suffix == ".md" for t in templates)
|
||||
|
||||
def test_command_filename_default(self):
|
||||
i = StubIntegration()
|
||||
assert i.command_filename("plan") == "speckit.plan.md"
|
||||
|
||||
def test_commands_dest(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
dest = i.commands_dest(tmp_path)
|
||||
assert dest == tmp_path / ".stub" / "commands"
|
||||
|
||||
def test_commands_dest_no_config_raises(self, tmp_path):
|
||||
class NoConfig(MarkdownIntegration):
|
||||
key = "noconfig"
|
||||
with pytest.raises(ValueError, match="config is not set"):
|
||||
NoConfig().commands_dest(tmp_path)
|
||||
|
||||
def test_copy_command_to_directory(self, tmp_path):
|
||||
src = tmp_path / "source.md"
|
||||
src.write_text("content", encoding="utf-8")
|
||||
dest_dir = tmp_path / "output"
|
||||
result = IntegrationBase.copy_command_to_directory(src, dest_dir, "speckit.plan.md")
|
||||
assert result == dest_dir / "speckit.plan.md"
|
||||
assert result.read_text(encoding="utf-8") == "content"
|
||||
|
||||
def test_record_file_in_manifest(self, tmp_path):
|
||||
f = tmp_path / "f.txt"
|
||||
f.write_text("hello", encoding="utf-8")
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
IntegrationBase.record_file_in_manifest(f, tmp_path, m)
|
||||
assert "f.txt" in m.files
|
||||
|
||||
def test_write_file_and_record(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
dest = tmp_path / "sub" / "f.txt"
|
||||
result = IntegrationBase.write_file_and_record("content", dest, tmp_path, m)
|
||||
assert result == dest
|
||||
assert dest.read_text(encoding="utf-8") == "content"
|
||||
assert "sub/f.txt" in m.files
|
||||
|
||||
def test_setup_copies_shared_templates(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
m = IntegrationManifest("stub", tmp_path)
|
||||
created = i.setup(tmp_path, m)
|
||||
assert len(created) > 0
|
||||
for f in created:
|
||||
assert f.parent.name == "commands"
|
||||
assert f.name.startswith("speckit.")
|
||||
assert f.name.endswith(".md")
|
||||
122
tests/integrations/test_cli.py
Normal file
122
tests/integrations/test_cli.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""Tests for --integration flag on specify init (CLI-level)."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class TestInitIntegrationFlag:
|
||||
def test_integration_and_ai_mutually_exclusive(self):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "test-project", "--ai", "claude", "--integration", "copilot",
|
||||
])
|
||||
assert result.exit_code != 0
|
||||
assert "mutually exclusive" in result.output
|
||||
|
||||
def test_unknown_integration_rejected(self):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "test-project", "--integration", "nonexistent",
|
||||
])
|
||||
assert result.exit_code != 0
|
||||
assert "Unknown integration" in result.output
|
||||
|
||||
def test_integration_copilot_creates_files(self, tmp_path):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
runner = CliRunner()
|
||||
project = tmp_path / "int-test"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
result = runner.invoke(app, [
|
||||
"init", "--here", "--integration", "copilot", "--script", "sh", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0, f"init failed: {result.output}"
|
||||
assert (project / ".github" / "agents" / "speckit.plan.agent.md").exists()
|
||||
assert (project / ".github" / "prompts" / "speckit.plan.prompt.md").exists()
|
||||
assert (project / ".specify" / "scripts" / "bash" / "common.sh").exists()
|
||||
|
||||
data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8"))
|
||||
assert data["integration"] == "copilot"
|
||||
assert "scripts" in data
|
||||
assert "update-context" in data["scripts"]
|
||||
|
||||
opts = json.loads((project / ".specify" / "init-options.json").read_text(encoding="utf-8"))
|
||||
assert opts["integration"] == "copilot"
|
||||
|
||||
assert (project / ".specify" / "integrations" / "copilot.manifest.json").exists()
|
||||
assert (project / ".specify" / "integrations" / "copilot" / "scripts" / "update-context.sh").exists()
|
||||
|
||||
shared_manifest = project / ".specify" / "integrations" / "speckit.manifest.json"
|
||||
assert shared_manifest.exists()
|
||||
|
||||
def test_ai_copilot_auto_promotes(self, tmp_path):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
project = tmp_path / "promote-test"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "--here", "--ai", "copilot", "--script", "sh", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0
|
||||
assert "--integration copilot" in result.output
|
||||
assert (project / ".github" / "agents" / "speckit.plan.agent.md").exists()
|
||||
|
||||
def test_shared_infra_skips_existing_files(self, tmp_path):
|
||||
"""Pre-existing shared files are not overwritten by _install_shared_infra."""
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
|
||||
project = tmp_path / "skip-test"
|
||||
project.mkdir()
|
||||
|
||||
# Pre-create a shared script with custom content
|
||||
scripts_dir = project / ".specify" / "scripts" / "bash"
|
||||
scripts_dir.mkdir(parents=True)
|
||||
custom_content = "# user-modified common.sh\n"
|
||||
(scripts_dir / "common.sh").write_text(custom_content, encoding="utf-8")
|
||||
|
||||
# Pre-create a shared template with custom content
|
||||
templates_dir = project / ".specify" / "templates"
|
||||
templates_dir.mkdir(parents=True)
|
||||
custom_template = "# user-modified spec-template\n"
|
||||
(templates_dir / "spec-template.md").write_text(custom_template, encoding="utf-8")
|
||||
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "--here", "--force",
|
||||
"--integration", "copilot",
|
||||
"--script", "sh",
|
||||
"--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
# User's files should be preserved
|
||||
assert (scripts_dir / "common.sh").read_text(encoding="utf-8") == custom_content
|
||||
assert (templates_dir / "spec-template.md").read_text(encoding="utf-8") == custom_template
|
||||
|
||||
# Other shared files should still be installed
|
||||
assert (scripts_dir / "setup-plan.sh").exists()
|
||||
assert (templates_dir / "plan-template.md").exists()
|
||||
266
tests/integrations/test_copilot.py
Normal file
266
tests/integrations/test_copilot.py
Normal file
@@ -0,0 +1,266 @@
|
||||
"""Tests for CopilotIntegration."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from specify_cli.integrations import get_integration
|
||||
from specify_cli.integrations.manifest import IntegrationManifest
|
||||
|
||||
|
||||
class TestCopilotIntegration:
|
||||
def test_copilot_key_and_config(self):
|
||||
copilot = get_integration("copilot")
|
||||
assert copilot is not None
|
||||
assert copilot.key == "copilot"
|
||||
assert copilot.config["folder"] == ".github/"
|
||||
assert copilot.config["commands_subdir"] == "agents"
|
||||
assert copilot.registrar_config["extension"] == ".agent.md"
|
||||
assert copilot.context_file == ".github/copilot-instructions.md"
|
||||
|
||||
def test_command_filename_agent_md(self):
|
||||
copilot = get_integration("copilot")
|
||||
assert copilot.command_filename("plan") == "speckit.plan.agent.md"
|
||||
|
||||
def test_setup_creates_agent_md_files(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
assert len(created) > 0
|
||||
agent_files = [f for f in created if ".agent." in f.name]
|
||||
assert len(agent_files) > 0
|
||||
for f in agent_files:
|
||||
assert f.parent == tmp_path / ".github" / "agents"
|
||||
assert f.name.endswith(".agent.md")
|
||||
|
||||
def test_setup_creates_companion_prompts(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
prompt_files = [f for f in created if f.parent.name == "prompts"]
|
||||
assert len(prompt_files) > 0
|
||||
for f in prompt_files:
|
||||
assert f.name.endswith(".prompt.md")
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert content.startswith("---\nagent: speckit.")
|
||||
|
||||
def test_agent_and_prompt_counts_match(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
agents = [f for f in created if ".agent.md" in f.name]
|
||||
prompts = [f for f in created if ".prompt.md" in f.name]
|
||||
assert len(agents) == len(prompts)
|
||||
|
||||
def test_setup_creates_vscode_settings_new(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
assert copilot._vscode_settings_path() is not None
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
settings = tmp_path / ".vscode" / "settings.json"
|
||||
assert settings.exists()
|
||||
assert settings in created
|
||||
assert any("settings.json" in k for k in m.files)
|
||||
|
||||
def test_setup_merges_existing_vscode_settings(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
vscode_dir = tmp_path / ".vscode"
|
||||
vscode_dir.mkdir(parents=True)
|
||||
existing = {"editor.fontSize": 14, "custom.setting": True}
|
||||
(vscode_dir / "settings.json").write_text(json.dumps(existing, indent=4), encoding="utf-8")
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
settings = tmp_path / ".vscode" / "settings.json"
|
||||
data = json.loads(settings.read_text(encoding="utf-8"))
|
||||
assert data["editor.fontSize"] == 14
|
||||
assert data["custom.setting"] is True
|
||||
assert settings not in created
|
||||
assert not any("settings.json" in k for k in m.files)
|
||||
|
||||
def test_all_created_files_tracked_in_manifest(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
for f in created:
|
||||
rel = f.resolve().relative_to(tmp_path.resolve()).as_posix()
|
||||
assert rel in m.files, f"Created file {rel} not tracked in manifest"
|
||||
|
||||
def test_install_uninstall_roundtrip(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.install(tmp_path, m)
|
||||
assert len(created) > 0
|
||||
m.save()
|
||||
for f in created:
|
||||
assert f.exists()
|
||||
removed, skipped = copilot.uninstall(tmp_path, m)
|
||||
assert len(removed) == len(created)
|
||||
assert skipped == []
|
||||
|
||||
def test_modified_file_survives_uninstall(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.install(tmp_path, m)
|
||||
m.save()
|
||||
modified_file = created[0]
|
||||
modified_file.write_text("user modified this", encoding="utf-8")
|
||||
removed, skipped = copilot.uninstall(tmp_path, m)
|
||||
assert modified_file.exists()
|
||||
assert modified_file in skipped
|
||||
|
||||
def test_directory_structure(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
copilot.setup(tmp_path, m)
|
||||
agents_dir = tmp_path / ".github" / "agents"
|
||||
assert agents_dir.is_dir()
|
||||
agent_files = sorted(agents_dir.glob("speckit.*.agent.md"))
|
||||
assert len(agent_files) == 9
|
||||
expected_commands = {
|
||||
"analyze", "checklist", "clarify", "constitution",
|
||||
"implement", "plan", "specify", "tasks", "taskstoissues",
|
||||
}
|
||||
actual_commands = {f.name.removeprefix("speckit.").removesuffix(".agent.md") for f in agent_files}
|
||||
assert actual_commands == expected_commands
|
||||
|
||||
def test_templates_are_processed(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
copilot.setup(tmp_path, m)
|
||||
agents_dir = tmp_path / ".github" / "agents"
|
||||
for agent_file in agents_dir.glob("speckit.*.agent.md"):
|
||||
content = agent_file.read_text(encoding="utf-8")
|
||||
assert "{SCRIPT}" not in content, f"{agent_file.name} has unprocessed {{SCRIPT}}"
|
||||
assert "__AGENT__" not in content, f"{agent_file.name} has unprocessed __AGENT__"
|
||||
assert "{ARGS}" not in content, f"{agent_file.name} has unprocessed {{ARGS}}"
|
||||
assert "\nscripts:\n" not in content
|
||||
assert "\nagent_scripts:\n" not in content
|
||||
|
||||
def test_complete_file_inventory_sh(self, tmp_path):
|
||||
"""Every file produced by specify init --integration copilot --script sh."""
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
project = tmp_path / "inventory-sh"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
result = CliRunner().invoke(app, [
|
||||
"init", "--here", "--integration", "copilot", "--script", "sh", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0
|
||||
actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file())
|
||||
expected = sorted([
|
||||
".github/agents/speckit.analyze.agent.md",
|
||||
".github/agents/speckit.checklist.agent.md",
|
||||
".github/agents/speckit.clarify.agent.md",
|
||||
".github/agents/speckit.constitution.agent.md",
|
||||
".github/agents/speckit.implement.agent.md",
|
||||
".github/agents/speckit.plan.agent.md",
|
||||
".github/agents/speckit.specify.agent.md",
|
||||
".github/agents/speckit.tasks.agent.md",
|
||||
".github/agents/speckit.taskstoissues.agent.md",
|
||||
".github/prompts/speckit.analyze.prompt.md",
|
||||
".github/prompts/speckit.checklist.prompt.md",
|
||||
".github/prompts/speckit.clarify.prompt.md",
|
||||
".github/prompts/speckit.constitution.prompt.md",
|
||||
".github/prompts/speckit.implement.prompt.md",
|
||||
".github/prompts/speckit.plan.prompt.md",
|
||||
".github/prompts/speckit.specify.prompt.md",
|
||||
".github/prompts/speckit.tasks.prompt.md",
|
||||
".github/prompts/speckit.taskstoissues.prompt.md",
|
||||
".vscode/settings.json",
|
||||
".specify/integration.json",
|
||||
".specify/init-options.json",
|
||||
".specify/integrations/copilot.manifest.json",
|
||||
".specify/integrations/speckit.manifest.json",
|
||||
".specify/integrations/copilot/scripts/update-context.ps1",
|
||||
".specify/integrations/copilot/scripts/update-context.sh",
|
||||
".specify/scripts/bash/check-prerequisites.sh",
|
||||
".specify/scripts/bash/common.sh",
|
||||
".specify/scripts/bash/create-new-feature.sh",
|
||||
".specify/scripts/bash/setup-plan.sh",
|
||||
".specify/scripts/bash/update-agent-context.sh",
|
||||
".specify/templates/agent-file-template.md",
|
||||
".specify/templates/checklist-template.md",
|
||||
".specify/templates/constitution-template.md",
|
||||
".specify/templates/plan-template.md",
|
||||
".specify/templates/spec-template.md",
|
||||
".specify/templates/tasks-template.md",
|
||||
".specify/memory/constitution.md",
|
||||
])
|
||||
assert actual == expected, (
|
||||
f"Missing: {sorted(set(expected) - set(actual))}\n"
|
||||
f"Extra: {sorted(set(actual) - set(expected))}"
|
||||
)
|
||||
|
||||
def test_complete_file_inventory_ps(self, tmp_path):
|
||||
"""Every file produced by specify init --integration copilot --script ps."""
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
project = tmp_path / "inventory-ps"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
result = CliRunner().invoke(app, [
|
||||
"init", "--here", "--integration", "copilot", "--script", "ps", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0
|
||||
actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file())
|
||||
expected = sorted([
|
||||
".github/agents/speckit.analyze.agent.md",
|
||||
".github/agents/speckit.checklist.agent.md",
|
||||
".github/agents/speckit.clarify.agent.md",
|
||||
".github/agents/speckit.constitution.agent.md",
|
||||
".github/agents/speckit.implement.agent.md",
|
||||
".github/agents/speckit.plan.agent.md",
|
||||
".github/agents/speckit.specify.agent.md",
|
||||
".github/agents/speckit.tasks.agent.md",
|
||||
".github/agents/speckit.taskstoissues.agent.md",
|
||||
".github/prompts/speckit.analyze.prompt.md",
|
||||
".github/prompts/speckit.checklist.prompt.md",
|
||||
".github/prompts/speckit.clarify.prompt.md",
|
||||
".github/prompts/speckit.constitution.prompt.md",
|
||||
".github/prompts/speckit.implement.prompt.md",
|
||||
".github/prompts/speckit.plan.prompt.md",
|
||||
".github/prompts/speckit.specify.prompt.md",
|
||||
".github/prompts/speckit.tasks.prompt.md",
|
||||
".github/prompts/speckit.taskstoissues.prompt.md",
|
||||
".vscode/settings.json",
|
||||
".specify/integration.json",
|
||||
".specify/init-options.json",
|
||||
".specify/integrations/copilot.manifest.json",
|
||||
".specify/integrations/speckit.manifest.json",
|
||||
".specify/integrations/copilot/scripts/update-context.ps1",
|
||||
".specify/integrations/copilot/scripts/update-context.sh",
|
||||
".specify/scripts/powershell/check-prerequisites.ps1",
|
||||
".specify/scripts/powershell/common.ps1",
|
||||
".specify/scripts/powershell/create-new-feature.ps1",
|
||||
".specify/scripts/powershell/setup-plan.ps1",
|
||||
".specify/scripts/powershell/update-agent-context.ps1",
|
||||
".specify/templates/agent-file-template.md",
|
||||
".specify/templates/checklist-template.md",
|
||||
".specify/templates/constitution-template.md",
|
||||
".specify/templates/plan-template.md",
|
||||
".specify/templates/spec-template.md",
|
||||
".specify/templates/tasks-template.md",
|
||||
".specify/memory/constitution.md",
|
||||
])
|
||||
assert actual == expected, (
|
||||
f"Missing: {sorted(set(expected) - set(actual))}\n"
|
||||
f"Extra: {sorted(set(actual) - set(expected))}"
|
||||
)
|
||||
245
tests/integrations/test_manifest.py
Normal file
245
tests/integrations/test_manifest.py
Normal file
@@ -0,0 +1,245 @@
|
||||
"""Tests for IntegrationManifest — record, hash, save, load, uninstall, modified detection."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations.manifest import IntegrationManifest, _sha256
|
||||
|
||||
|
||||
class TestManifestRecordFile:
|
||||
def test_record_file_writes_and_hashes(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
content = "hello world"
|
||||
abs_path = m.record_file("a/b.txt", content)
|
||||
assert abs_path == tmp_path / "a" / "b.txt"
|
||||
assert abs_path.read_text(encoding="utf-8") == content
|
||||
expected_hash = hashlib.sha256(content.encode()).hexdigest()
|
||||
assert m.files["a/b.txt"] == expected_hash
|
||||
|
||||
def test_record_file_bytes(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
data = b"\x00\x01\x02"
|
||||
abs_path = m.record_file("bin.dat", data)
|
||||
assert abs_path.read_bytes() == data
|
||||
assert m.files["bin.dat"] == hashlib.sha256(data).hexdigest()
|
||||
|
||||
def test_record_existing(self, tmp_path):
|
||||
f = tmp_path / "existing.txt"
|
||||
f.write_text("content", encoding="utf-8")
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_existing("existing.txt")
|
||||
assert m.files["existing.txt"] == _sha256(f)
|
||||
|
||||
|
||||
class TestManifestPathTraversal:
|
||||
def test_record_file_rejects_parent_traversal(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="outside"):
|
||||
m.record_file("../escape.txt", "bad")
|
||||
|
||||
def test_record_file_rejects_absolute_path(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="Absolute paths"):
|
||||
m.record_file("/tmp/escape.txt", "bad")
|
||||
|
||||
def test_record_existing_rejects_parent_traversal(self, tmp_path):
|
||||
escape = tmp_path.parent / "escape.txt"
|
||||
escape.write_text("evil", encoding="utf-8")
|
||||
try:
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="outside"):
|
||||
m.record_existing("../escape.txt")
|
||||
finally:
|
||||
escape.unlink(missing_ok=True)
|
||||
|
||||
def test_uninstall_skips_traversal_paths(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("safe.txt", "good")
|
||||
m._files["../outside.txt"] = "fakehash"
|
||||
m.save()
|
||||
removed, skipped = m.uninstall()
|
||||
assert len(removed) == 1
|
||||
assert removed[0].name == "safe.txt"
|
||||
|
||||
|
||||
class TestManifestCheckModified:
|
||||
def test_unmodified_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
assert m.check_modified() == []
|
||||
|
||||
def test_modified_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
(tmp_path / "f.txt").write_text("changed", encoding="utf-8")
|
||||
assert m.check_modified() == ["f.txt"]
|
||||
|
||||
def test_deleted_file_not_reported(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
assert m.check_modified() == []
|
||||
|
||||
def test_symlink_treated_as_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
assert m.check_modified() == ["f.txt"]
|
||||
|
||||
|
||||
class TestManifestUninstall:
|
||||
def test_removes_unmodified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("d/f.txt", "content")
|
||||
m.save()
|
||||
removed, skipped = m.uninstall()
|
||||
assert len(removed) == 1
|
||||
assert not (tmp_path / "d" / "f.txt").exists()
|
||||
assert not (tmp_path / "d").exists()
|
||||
assert skipped == []
|
||||
|
||||
def test_skips_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").write_text("modified", encoding="utf-8")
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert len(skipped) == 1
|
||||
assert (tmp_path / "f.txt").exists()
|
||||
|
||||
def test_force_removes_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").write_text("modified", encoding="utf-8")
|
||||
removed, skipped = m.uninstall(force=True)
|
||||
assert len(removed) == 1
|
||||
assert skipped == []
|
||||
|
||||
def test_already_deleted_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").unlink()
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert skipped == []
|
||||
|
||||
def test_removes_manifest_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path, version="1.0")
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
assert m.manifest_path.exists()
|
||||
m.uninstall()
|
||||
assert not m.manifest_path.exists()
|
||||
|
||||
def test_cleans_empty_parent_dirs(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("a/b/c/f.txt", "content")
|
||||
m.save()
|
||||
m.uninstall()
|
||||
assert not (tmp_path / "a").exists()
|
||||
|
||||
def test_preserves_nonempty_parent_dirs(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("a/b/tracked.txt", "content")
|
||||
(tmp_path / "a" / "b" / "other.txt").write_text("keep", encoding="utf-8")
|
||||
m.save()
|
||||
m.uninstall()
|
||||
assert not (tmp_path / "a" / "b" / "tracked.txt").exists()
|
||||
assert (tmp_path / "a" / "b" / "other.txt").exists()
|
||||
|
||||
def test_symlink_skipped_without_force(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert len(skipped) == 1
|
||||
|
||||
def test_symlink_removed_with_force(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
removed, skipped = m.uninstall(force=True)
|
||||
assert len(removed) == 1
|
||||
assert target.exists()
|
||||
|
||||
|
||||
class TestManifestPersistence:
|
||||
def test_save_and_load_roundtrip(self, tmp_path):
|
||||
m = IntegrationManifest("myagent", tmp_path, version="2.0.1")
|
||||
m.record_file("dir/file.md", "# Hello")
|
||||
m.save()
|
||||
loaded = IntegrationManifest.load("myagent", tmp_path)
|
||||
assert loaded.key == "myagent"
|
||||
assert loaded.version == "2.0.1"
|
||||
assert loaded.files == m.files
|
||||
|
||||
def test_manifest_path(self, tmp_path):
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
assert m.manifest_path == tmp_path / ".specify" / "integrations" / "copilot.manifest.json"
|
||||
|
||||
def test_load_missing_raises(self, tmp_path):
|
||||
with pytest.raises(FileNotFoundError):
|
||||
IntegrationManifest.load("nonexistent", tmp_path)
|
||||
|
||||
def test_save_creates_directories(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
path = m.save()
|
||||
assert path.exists()
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
assert data["integration"] == "test"
|
||||
|
||||
def test_save_preserves_installed_at(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
first_ts = m._installed_at
|
||||
m.save()
|
||||
assert m._installed_at == first_ts
|
||||
|
||||
|
||||
class TestManifestLoadValidation:
|
||||
def test_load_non_dict_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text('"just a string"', encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="JSON object"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_bad_files_type_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(json.dumps({"files": ["not", "a", "dict"]}), encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="mapping"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_bad_files_values_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(json.dumps({"files": {"a.txt": 123}}), encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="mapping"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_invalid_json_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text("{not valid json", encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="invalid JSON"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
45
tests/integrations/test_registry.py
Normal file
45
tests/integrations/test_registry.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Tests for INTEGRATION_REGISTRY."""
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations import (
|
||||
INTEGRATION_REGISTRY,
|
||||
_register,
|
||||
get_integration,
|
||||
)
|
||||
from specify_cli.integrations.base import MarkdownIntegration
|
||||
from .conftest import StubIntegration
|
||||
|
||||
|
||||
class TestRegistry:
|
||||
def test_registry_is_dict(self):
|
||||
assert isinstance(INTEGRATION_REGISTRY, dict)
|
||||
|
||||
def test_register_and_get(self):
|
||||
stub = StubIntegration()
|
||||
_register(stub)
|
||||
try:
|
||||
assert get_integration("stub") is stub
|
||||
finally:
|
||||
INTEGRATION_REGISTRY.pop("stub", None)
|
||||
|
||||
def test_get_missing_returns_none(self):
|
||||
assert get_integration("nonexistent-xyz") is None
|
||||
|
||||
def test_register_empty_key_raises(self):
|
||||
class EmptyKey(MarkdownIntegration):
|
||||
key = ""
|
||||
with pytest.raises(ValueError, match="empty key"):
|
||||
_register(EmptyKey())
|
||||
|
||||
def test_register_duplicate_raises(self):
|
||||
stub = StubIntegration()
|
||||
_register(stub)
|
||||
try:
|
||||
with pytest.raises(KeyError, match="already registered"):
|
||||
_register(StubIntegration())
|
||||
finally:
|
||||
INTEGRATION_REGISTRY.pop("stub", None)
|
||||
|
||||
def test_copilot_registered(self):
|
||||
assert "copilot" in INTEGRATION_REGISTRY
|
||||
@@ -24,8 +24,8 @@ import specify_cli
|
||||
|
||||
from specify_cli import (
|
||||
_get_skills_dir,
|
||||
_migrate_legacy_kimi_dotted_skills,
|
||||
install_ai_skills,
|
||||
AGENT_SKILLS_DIR_OVERRIDES,
|
||||
DEFAULT_SKILLS_DIR,
|
||||
SKILL_DESCRIPTIONS,
|
||||
AGENT_CONFIG,
|
||||
@@ -169,8 +169,8 @@ class TestGetSkillsDir:
|
||||
result = _get_skills_dir(project_dir, "copilot")
|
||||
assert result == project_dir / ".github" / "skills"
|
||||
|
||||
def test_codex_uses_override(self, project_dir):
|
||||
"""Codex should use the AGENT_SKILLS_DIR_OVERRIDES value."""
|
||||
def test_codex_skills_dir_from_agent_config(self, project_dir):
|
||||
"""Codex should resolve skills directory from AGENT_CONFIG folder."""
|
||||
result = _get_skills_dir(project_dir, "codex")
|
||||
assert result == project_dir / ".agents" / "skills"
|
||||
|
||||
@@ -203,12 +203,71 @@ class TestGetSkillsDir:
|
||||
# Should always end with "skills"
|
||||
assert result.name == "skills"
|
||||
|
||||
def test_override_takes_precedence_over_config(self, project_dir):
|
||||
"""AGENT_SKILLS_DIR_OVERRIDES should take precedence over AGENT_CONFIG."""
|
||||
for agent_key in AGENT_SKILLS_DIR_OVERRIDES:
|
||||
result = _get_skills_dir(project_dir, agent_key)
|
||||
expected = project_dir / AGENT_SKILLS_DIR_OVERRIDES[agent_key]
|
||||
assert result == expected
|
||||
class TestKimiLegacySkillMigration:
|
||||
"""Test temporary migration from Kimi dotted skill names to hyphenated names."""
|
||||
|
||||
def test_migrates_legacy_dotted_skill_directory(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 1
|
||||
assert removed == 0
|
||||
assert not legacy_dir.exists()
|
||||
assert (skills_dir / "speckit-plan" / "SKILL.md").exists()
|
||||
|
||||
def test_removes_legacy_dir_when_hyphenated_target_exists_with_same_content(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
target_dir = skills_dir / "speckit-plan"
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / "SKILL.md").write_text("legacy")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 0
|
||||
assert removed == 1
|
||||
assert not legacy_dir.exists()
|
||||
assert (target_dir / "SKILL.md").read_text() == "legacy"
|
||||
|
||||
def test_keeps_legacy_dir_when_hyphenated_target_differs(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
target_dir = skills_dir / "speckit-plan"
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / "SKILL.md").write_text("new")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 0
|
||||
assert removed == 0
|
||||
assert legacy_dir.exists()
|
||||
assert (legacy_dir / "SKILL.md").read_text() == "legacy"
|
||||
assert (target_dir / "SKILL.md").read_text() == "new"
|
||||
|
||||
def test_keeps_legacy_dir_when_matching_target_but_extra_files_exist(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
(legacy_dir / "notes.txt").write_text("custom")
|
||||
target_dir = skills_dir / "speckit-plan"
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / "SKILL.md").write_text("legacy")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 0
|
||||
assert removed == 0
|
||||
assert legacy_dir.exists()
|
||||
assert (legacy_dir / "notes.txt").read_text() == "custom"
|
||||
|
||||
|
||||
# ===== install_ai_skills Tests =====
|
||||
@@ -473,8 +532,7 @@ class TestInstallAiSkills:
|
||||
skills_dir = _get_skills_dir(proj, agent_key)
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
# Kimi uses dotted skill names; other agents use hyphen-separated names.
|
||||
expected_skill_name = "speckit.specify" if agent_key == "kimi" else "speckit-specify"
|
||||
expected_skill_name = "speckit-specify"
|
||||
assert expected_skill_name in skill_dirs
|
||||
assert (skills_dir / expected_skill_name / "SKILL.md").exists()
|
||||
|
||||
@@ -773,6 +831,32 @@ class TestNewProjectCommandSkip:
|
||||
mock_skills.assert_called_once()
|
||||
assert mock_skills.call_args.kwargs.get("overwrite_existing") is True
|
||||
|
||||
def test_kimi_legacy_migration_runs_without_ai_skills_flag(self, tmp_path):
|
||||
"""Kimi init should migrate dotted legacy skills even when --ai-skills is not set."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "kimi-legacy-no-ai-skills"
|
||||
|
||||
def fake_download(project_path, *args, **kwargs):
|
||||
legacy_dir = project_path / ".kimi" / "skills" / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True, exist_ok=True)
|
||||
(legacy_dir / "SKILL.md").write_text("---\nname: speckit.plan\n---\n\nlegacy\n")
|
||||
|
||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/kimi"):
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["init", str(target), "--ai", "kimi", "--script", "sh", "--no-git"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert not (target / ".kimi" / "skills" / "speckit.plan").exists()
|
||||
assert (target / ".kimi" / "skills" / "speckit-plan" / "SKILL.md").exists()
|
||||
|
||||
def test_codex_ai_skills_here_mode_preserves_existing_codex_dir(self, tmp_path, monkeypatch):
|
||||
"""Codex --here skills init should not delete a pre-existing .codex directory."""
|
||||
from typer.testing import CliRunner
|
||||
@@ -1118,12 +1202,12 @@ class TestCliValidation:
|
||||
assert "Optional skills that you can use for your specs" in result.output
|
||||
|
||||
def test_kimi_next_steps_show_skill_invocation(self, monkeypatch):
|
||||
"""Kimi next-steps guidance should display /skill:speckit.* usage."""
|
||||
"""Kimi next-steps guidance should display /skill:speckit-* usage."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
def _fake_download(*args, **kwargs):
|
||||
project_path = Path(args[0])
|
||||
skill_dir = project_path / ".kimi" / "skills" / "speckit.specify"
|
||||
skill_dir = project_path / ".kimi" / "skills" / "speckit-specify"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
||||
|
||||
@@ -1137,7 +1221,7 @@ class TestCliValidation:
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "/skill:speckit.constitution" in result.output
|
||||
assert "/skill:speckit-constitution" in result.output
|
||||
assert "/speckit.constitution" not in result.output
|
||||
assert "Optional skills that you can use for your specs" in result.output
|
||||
|
||||
|
||||
96
tests/test_check_tool.py
Normal file
96
tests/test_check_tool.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Tests for check_tool() — Claude Code CLI detection across install methods.
|
||||
|
||||
Covers issue https://github.com/github/spec-kit/issues/550:
|
||||
`specify check` reports "Claude Code CLI (not found)" even when claude is
|
||||
installed via npm-local (the default `claude` installer path).
|
||||
"""
|
||||
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from specify_cli import check_tool
|
||||
|
||||
|
||||
class TestCheckToolClaude:
|
||||
"""Claude CLI detection must work for all install methods."""
|
||||
|
||||
def test_detected_via_migrate_installer_path(self, tmp_path):
|
||||
"""claude migrate-installer puts binary at ~/.claude/local/claude."""
|
||||
fake_claude = tmp_path / "claude"
|
||||
fake_claude.touch()
|
||||
|
||||
# Ensure npm-local path is missing so we only exercise migrate-installer path
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_claude), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_detected_via_npm_local_path(self, tmp_path):
|
||||
"""npm-local install puts binary at ~/.claude/local/node_modules/.bin/claude."""
|
||||
fake_npm_claude = tmp_path / "node_modules" / ".bin" / "claude"
|
||||
fake_npm_claude.parent.mkdir(parents=True)
|
||||
fake_npm_claude.touch()
|
||||
|
||||
# Neither the migrate-installer path nor PATH has claude
|
||||
fake_migrate = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_migrate), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_npm_claude), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_detected_via_path(self, tmp_path):
|
||||
"""claude on PATH (global npm install) should still work."""
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value="/usr/local/bin/claude"):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_not_found_when_nowhere(self, tmp_path):
|
||||
"""Should return False when claude is genuinely not installed."""
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is False
|
||||
|
||||
def test_tracker_updated_on_npm_local_detection(self, tmp_path):
|
||||
"""StepTracker should be marked 'available' for npm-local installs."""
|
||||
fake_npm_claude = tmp_path / "node_modules" / ".bin" / "claude"
|
||||
fake_npm_claude.parent.mkdir(parents=True)
|
||||
fake_npm_claude.touch()
|
||||
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
tracker = MagicMock()
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_npm_claude), \
|
||||
patch("shutil.which", return_value=None):
|
||||
result = check_tool("claude", tracker=tracker)
|
||||
|
||||
assert result is True
|
||||
tracker.complete.assert_called_once_with("claude", "available")
|
||||
|
||||
|
||||
class TestCheckToolOther:
|
||||
"""Non-Claude tools should be unaffected by the fix."""
|
||||
|
||||
def test_git_detected_via_path(self):
|
||||
with patch("shutil.which", return_value="/usr/bin/git"):
|
||||
assert check_tool("git") is True
|
||||
|
||||
def test_missing_tool(self):
|
||||
with patch("shutil.which", return_value=None):
|
||||
assert check_tool("nonexistent-tool") is False
|
||||
|
||||
def test_kiro_fallback(self):
|
||||
"""kiro-cli detection should try both kiro-cli and kiro."""
|
||||
def fake_which(name):
|
||||
return "/usr/bin/kiro" if name == "kiro" else None
|
||||
|
||||
with patch("shutil.which", side_effect=fake_which):
|
||||
assert check_tool("kiro-cli") is True
|
||||
@@ -142,7 +142,7 @@ def _expected_cmd_dir(project_path: Path, agent: str) -> Path:
|
||||
|
||||
# Agents whose commands are laid out as <skills_dir>/<name>/SKILL.md.
|
||||
# Maps agent -> separator used in skill directory names.
|
||||
_SKILL_AGENTS: dict[str, str] = {"codex": "-", "kimi": "."}
|
||||
_SKILL_AGENTS: dict[str, str] = {"codex": "-", "kimi": "-"}
|
||||
|
||||
|
||||
def _expected_ext(agent: str) -> str:
|
||||
|
||||
741
tests/test_extension_skills.py
Normal file
741
tests/test_extension_skills.py
Normal file
@@ -0,0 +1,741 @@
|
||||
"""
|
||||
Unit tests for extension skill auto-registration.
|
||||
|
||||
Tests cover:
|
||||
- SKILL.md generation when --ai-skills was used during init
|
||||
- No skills created when ai_skills not active
|
||||
- SKILL.md content correctness
|
||||
- Existing user-modified skills not overwritten
|
||||
- Skill cleanup on extension removal
|
||||
- Registry metadata includes registered_skills
|
||||
"""
|
||||
|
||||
import json
|
||||
import pytest
|
||||
import tempfile
|
||||
import shutil
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
from specify_cli.extensions import (
|
||||
ExtensionManifest,
|
||||
ExtensionManager,
|
||||
ExtensionError,
|
||||
)
|
||||
|
||||
|
||||
# ===== Helpers =====
|
||||
|
||||
def _create_init_options(project_root: Path, ai: str = "claude", ai_skills: bool = True):
|
||||
"""Write a .specify/init-options.json file."""
|
||||
opts_dir = project_root / ".specify"
|
||||
opts_dir.mkdir(parents=True, exist_ok=True)
|
||||
opts_file = opts_dir / "init-options.json"
|
||||
opts_file.write_text(json.dumps({
|
||||
"ai": ai,
|
||||
"ai_skills": ai_skills,
|
||||
"script": "sh",
|
||||
}))
|
||||
|
||||
|
||||
def _create_skills_dir(project_root: Path, ai: str = "claude") -> Path:
|
||||
"""Create and return the expected skills directory for the given agent."""
|
||||
# Match the logic in _get_skills_dir() from specify_cli
|
||||
from specify_cli import AGENT_CONFIG, DEFAULT_SKILLS_DIR
|
||||
|
||||
agent_config = AGENT_CONFIG.get(ai, {})
|
||||
agent_folder = agent_config.get("folder", "")
|
||||
if agent_folder:
|
||||
skills_dir = project_root / agent_folder.rstrip("/") / "skills"
|
||||
else:
|
||||
skills_dir = project_root / DEFAULT_SKILLS_DIR
|
||||
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
return skills_dir
|
||||
|
||||
|
||||
def _create_extension_dir(temp_dir: Path, ext_id: str = "test-ext") -> Path:
|
||||
"""Create a complete extension directory with manifest and command files."""
|
||||
ext_dir = temp_dir / ext_id
|
||||
ext_dir.mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": ext_id,
|
||||
"name": "Test Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "A test extension for skill registration",
|
||||
},
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0",
|
||||
},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": f"speckit.{ext_id}.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test hello command",
|
||||
},
|
||||
{
|
||||
"name": f"speckit.{ext_id}.world",
|
||||
"file": "commands/world.md",
|
||||
"description": "Test world command",
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
commands_dir = ext_dir / "commands"
|
||||
commands_dir.mkdir()
|
||||
|
||||
(commands_dir / "hello.md").write_text(
|
||||
"---\n"
|
||||
"description: \"Test hello command\"\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"# Hello Command\n"
|
||||
"\n"
|
||||
"Run this to say hello.\n"
|
||||
"$ARGUMENTS\n"
|
||||
)
|
||||
|
||||
(commands_dir / "world.md").write_text(
|
||||
"---\n"
|
||||
"description: \"Test world command\"\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"# World Command\n"
|
||||
"\n"
|
||||
"Run this to greet the world.\n"
|
||||
)
|
||||
|
||||
return ext_dir
|
||||
|
||||
|
||||
# ===== Fixtures =====
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir():
|
||||
"""Create a temporary directory for tests."""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
yield Path(tmpdir)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def project_dir(temp_dir):
|
||||
"""Create a mock spec-kit project directory."""
|
||||
proj_dir = temp_dir / "project"
|
||||
proj_dir.mkdir()
|
||||
|
||||
# Create .specify directory
|
||||
specify_dir = proj_dir / ".specify"
|
||||
specify_dir.mkdir()
|
||||
|
||||
return proj_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def extension_dir(temp_dir):
|
||||
"""Create a complete extension directory."""
|
||||
return _create_extension_dir(temp_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def skills_project(project_dir):
|
||||
"""Create a project with --ai-skills enabled and skills directory."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=True)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="claude")
|
||||
return project_dir, skills_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def no_skills_project(project_dir):
|
||||
"""Create a project without --ai-skills."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=False)
|
||||
return project_dir
|
||||
|
||||
|
||||
# ===== ExtensionManager._get_skills_dir Tests =====
|
||||
|
||||
class TestExtensionManagerGetSkillsDir:
|
||||
"""Test _get_skills_dir() on ExtensionManager."""
|
||||
|
||||
def test_returns_skills_dir_when_active(self, skills_project):
|
||||
"""Should return skills dir when ai_skills is true and dir exists."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result == skills_dir
|
||||
|
||||
def test_returns_none_when_no_ai_skills(self, no_skills_project):
|
||||
"""Should return None when ai_skills is false."""
|
||||
manager = ExtensionManager(no_skills_project)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
def test_returns_none_when_no_init_options(self, project_dir):
|
||||
"""Should return None when init-options.json is missing."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
def test_returns_none_when_skills_dir_missing(self, project_dir):
|
||||
"""Should return None when skills dir doesn't exist on disk."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=True)
|
||||
# Don't create the skills directory
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
def test_returns_kimi_skills_dir_when_ai_skills_disabled(self, project_dir):
|
||||
"""Kimi should still use its native skills dir when ai_skills is false."""
|
||||
_create_init_options(project_dir, ai="kimi", ai_skills=False)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="kimi")
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result == skills_dir
|
||||
|
||||
def test_returns_none_for_non_dict_init_options(self, project_dir):
|
||||
"""Corrupted-but-parseable init-options should not crash skill-dir lookup."""
|
||||
opts_file = project_dir / ".specify" / "init-options.json"
|
||||
opts_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
opts_file.write_text("[]")
|
||||
_create_skills_dir(project_dir, ai="claude")
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
|
||||
# ===== Extension Skill Registration Tests =====
|
||||
|
||||
class TestExtensionSkillRegistration:
|
||||
"""Test _register_extension_skills() on ExtensionManager."""
|
||||
|
||||
def test_skills_created_when_ai_skills_active(self, skills_project, extension_dir):
|
||||
"""Skills should be created when ai_skills is enabled."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Check that skill directories were created
|
||||
skill_dirs = sorted([d.name for d in skills_dir.iterdir() if d.is_dir()])
|
||||
assert "speckit-test-ext-hello" in skill_dirs
|
||||
assert "speckit-test-ext-world" in skill_dirs
|
||||
|
||||
def test_skill_md_content_correct(self, skills_project, extension_dir):
|
||||
"""SKILL.md should have correct agentskills.io structure."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
|
||||
# Check structure
|
||||
assert content.startswith("---\n")
|
||||
assert "name: speckit-test-ext-hello" in content
|
||||
assert "description:" in content
|
||||
assert "Test hello command" in content
|
||||
assert "source: extension:test-ext" in content
|
||||
assert "author: github-spec-kit" in content
|
||||
assert "compatibility:" in content
|
||||
assert "Run this to say hello." in content
|
||||
|
||||
def test_skill_md_has_parseable_yaml(self, skills_project, extension_dir):
|
||||
"""Generated SKILL.md should contain valid, parseable YAML frontmatter."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
content = skill_file.read_text()
|
||||
|
||||
assert content.startswith("---\n")
|
||||
parts = content.split("---", 2)
|
||||
assert len(parts) >= 3
|
||||
parsed = yaml.safe_load(parts[1])
|
||||
assert isinstance(parsed, dict)
|
||||
assert parsed["name"] == "speckit-test-ext-hello"
|
||||
assert "description" in parsed
|
||||
|
||||
def test_no_skills_when_ai_skills_disabled(self, no_skills_project, extension_dir):
|
||||
"""No skills should be created when ai_skills is false."""
|
||||
manager = ExtensionManager(no_skills_project)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify registry
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert metadata["registered_skills"] == []
|
||||
|
||||
def test_no_skills_when_init_options_missing(self, project_dir, extension_dir):
|
||||
"""No skills should be created when init-options.json is absent."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert metadata["registered_skills"] == []
|
||||
|
||||
def test_existing_skill_not_overwritten(self, skills_project, extension_dir):
|
||||
"""Pre-existing SKILL.md should not be overwritten."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
# Pre-create a custom skill
|
||||
custom_dir = skills_dir / "speckit-test-ext-hello"
|
||||
custom_dir.mkdir(parents=True)
|
||||
custom_content = "# My Custom Hello Skill\nUser-modified content\n"
|
||||
(custom_dir / "SKILL.md").write_text(custom_content)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Custom skill should be untouched
|
||||
assert (custom_dir / "SKILL.md").read_text() == custom_content
|
||||
|
||||
# But the other skill should still be created
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
# The pre-existing one should NOT be in registered_skills (it was skipped)
|
||||
assert "speckit-test-ext-hello" not in metadata["registered_skills"]
|
||||
|
||||
def test_registered_skills_in_registry(self, skills_project, extension_dir):
|
||||
"""Registry should contain registered_skills list."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "registered_skills" in metadata
|
||||
assert len(metadata["registered_skills"]) == 2
|
||||
assert "speckit-test-ext-hello" in metadata["registered_skills"]
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
|
||||
def test_kimi_uses_hyphenated_skill_names(self, project_dir, temp_dir):
|
||||
"""Kimi agent should use the same hyphenated skill names as hooks."""
|
||||
_create_init_options(project_dir, ai="kimi", ai_skills=True)
|
||||
_create_skills_dir(project_dir, ai="kimi")
|
||||
ext_dir = _create_extension_dir(temp_dir, ext_id="test-ext")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-test-ext-hello" in metadata["registered_skills"]
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
|
||||
def test_kimi_creates_skills_when_ai_skills_disabled(self, project_dir, temp_dir):
|
||||
"""Kimi should still auto-register extension skills in native-skills mode."""
|
||||
_create_init_options(project_dir, ai="kimi", ai_skills=False)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="kimi")
|
||||
ext_dir = _create_extension_dir(temp_dir, ext_id="test-ext")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-test-ext-hello" in metadata["registered_skills"]
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
|
||||
def test_skill_registration_resolves_script_placeholders(self, project_dir, temp_dir):
|
||||
"""Auto-registered extension skills should resolve script placeholders."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=True)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="claude")
|
||||
|
||||
ext_dir = temp_dir / "scripted-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "scripted-ext",
|
||||
"name": "Scripted Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.scripted-ext.plan",
|
||||
"file": "commands/plan.md",
|
||||
"description": "Scripted plan command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"---\n"
|
||||
"description: Scripted plan command\n"
|
||||
"scripts:\n"
|
||||
" sh: ../../scripts/bash/setup-plan.sh --json \"{ARGS}\"\n"
|
||||
"agent_scripts:\n"
|
||||
" sh: ../../scripts/bash/update-agent-context.sh __AGENT__\n"
|
||||
"---\n\n"
|
||||
"Run {SCRIPT}\n"
|
||||
"Then {AGENT_SCRIPT}\n"
|
||||
"Review templates/checklist.md and memory/constitution.md for __AGENT__.\n"
|
||||
)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
content = (skills_dir / "speckit-scripted-ext-plan" / "SKILL.md").read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "{AGENT_SCRIPT}" not in content
|
||||
assert "{ARGS}" not in content
|
||||
assert "__AGENT__" not in content
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert ".specify/scripts/bash/update-agent-context.sh claude" in content
|
||||
assert ".specify/templates/checklist.md" in content
|
||||
assert ".specify/memory/constitution.md" in content
|
||||
|
||||
def test_missing_command_file_skipped(self, skills_project, temp_dir):
|
||||
"""Commands with missing source files should be skipped gracefully."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir = temp_dir / "missing-cmd-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "missing-cmd-ext",
|
||||
"name": "Missing Cmd Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.missing-cmd-ext.exists",
|
||||
"file": "commands/exists.md",
|
||||
"description": "Exists",
|
||||
},
|
||||
{
|
||||
"name": "speckit.missing-cmd-ext.ghost",
|
||||
"file": "commands/ghost.md",
|
||||
"description": "Does not exist",
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "exists.md").write_text(
|
||||
"---\ndescription: Exists\n---\n\n# Exists\n\nBody.\n"
|
||||
)
|
||||
# Intentionally do NOT create ghost.md
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-missing-cmd-ext-exists" in metadata["registered_skills"]
|
||||
assert "speckit-missing-cmd-ext-ghost" not in metadata["registered_skills"]
|
||||
|
||||
|
||||
# ===== Extension Skill Unregistration Tests =====
|
||||
|
||||
class TestExtensionSkillUnregistration:
|
||||
"""Test _unregister_extension_skills() on ExtensionManager."""
|
||||
|
||||
def test_skills_removed_on_extension_remove(self, skills_project, extension_dir):
|
||||
"""Removing an extension should clean up its skill directories."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify skills exist
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
assert (skills_dir / "speckit-test-ext-world" / "SKILL.md").exists()
|
||||
|
||||
# Remove extension
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
|
||||
# Skills should be gone
|
||||
assert not (skills_dir / "speckit-test-ext-hello").exists()
|
||||
assert not (skills_dir / "speckit-test-ext-world").exists()
|
||||
|
||||
def test_other_skills_preserved_on_remove(self, skills_project, extension_dir):
|
||||
"""Non-extension skills should not be affected by extension removal."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
# Pre-create a custom skill
|
||||
custom_dir = skills_dir / "my-custom-skill"
|
||||
custom_dir.mkdir(parents=True)
|
||||
(custom_dir / "SKILL.md").write_text("# My Custom Skill\n")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
manager.remove(manifest.id, keep_config=False)
|
||||
|
||||
# Custom skill should still exist
|
||||
assert (custom_dir / "SKILL.md").exists()
|
||||
assert (custom_dir / "SKILL.md").read_text() == "# My Custom Skill\n"
|
||||
|
||||
def test_remove_handles_already_deleted_skills(self, skills_project, extension_dir):
|
||||
"""Gracefully handle case where skill dirs were already deleted."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Manually delete skill dirs before calling remove
|
||||
shutil.rmtree(skills_dir / "speckit-test-ext-hello")
|
||||
shutil.rmtree(skills_dir / "speckit-test-ext-world")
|
||||
|
||||
# Should not raise
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
|
||||
def test_remove_no_skills_when_not_active(self, no_skills_project, extension_dir):
|
||||
"""Removal without active skills should not attempt skill cleanup."""
|
||||
manager = ExtensionManager(no_skills_project)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Should not raise even though no skills exist
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
|
||||
|
||||
# ===== Command File Without Frontmatter =====
|
||||
|
||||
class TestExtensionSkillEdgeCases:
|
||||
"""Test edge cases in extension skill registration."""
|
||||
|
||||
def test_install_with_non_dict_init_options_does_not_crash(self, project_dir, extension_dir):
|
||||
"""Corrupted init-options payloads should disable skill registration, not crash install."""
|
||||
opts_file = project_dir / ".specify" / "init-options.json"
|
||||
opts_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
opts_file.write_text("[]")
|
||||
_create_skills_dir(project_dir, ai="claude")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert metadata["registered_skills"] == []
|
||||
|
||||
def test_command_without_frontmatter(self, skills_project, temp_dir):
|
||||
"""Commands without YAML frontmatter should still produce valid skills."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir = temp_dir / "nofm-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "nofm-ext",
|
||||
"name": "No Frontmatter Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.nofm-ext.plain",
|
||||
"file": "commands/plain.md",
|
||||
"description": "Plain command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "plain.md").write_text(
|
||||
"# Plain Command\n\nBody without frontmatter.\n"
|
||||
)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-nofm-ext-plain" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "name: speckit-nofm-ext-plain" in content
|
||||
# Fallback description when no frontmatter description
|
||||
assert "Extension command: speckit.nofm-ext.plain" in content
|
||||
assert "Body without frontmatter." in content
|
||||
|
||||
def test_gemini_agent_skills(self, project_dir, temp_dir):
|
||||
"""Gemini agent should use .gemini/skills/ for skill directory."""
|
||||
_create_init_options(project_dir, ai="gemini", ai_skills=True)
|
||||
_create_skills_dir(project_dir, ai="gemini")
|
||||
ext_dir = _create_extension_dir(temp_dir, ext_id="test-ext")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skills_dir = project_dir / ".gemini" / "skills"
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
assert (skills_dir / "speckit-test-ext-world" / "SKILL.md").exists()
|
||||
|
||||
def test_multiple_extensions_independent_skills(self, skills_project, temp_dir):
|
||||
"""Installing and removing different extensions should be independent."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir_a = _create_extension_dir(temp_dir, ext_id="ext-a")
|
||||
ext_dir_b = _create_extension_dir(temp_dir, ext_id="ext-b")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest_a = manager.install_from_directory(
|
||||
ext_dir_a, "0.1.0", register_commands=False
|
||||
)
|
||||
manifest_b = manager.install_from_directory(
|
||||
ext_dir_b, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Both should have skills
|
||||
assert (skills_dir / "speckit-ext-a-hello" / "SKILL.md").exists()
|
||||
assert (skills_dir / "speckit-ext-b-hello" / "SKILL.md").exists()
|
||||
|
||||
# Remove ext-a
|
||||
manager.remove("ext-a", keep_config=False)
|
||||
|
||||
# ext-a skills gone, ext-b skills preserved
|
||||
assert not (skills_dir / "speckit-ext-a-hello").exists()
|
||||
assert (skills_dir / "speckit-ext-b-hello" / "SKILL.md").exists()
|
||||
|
||||
def test_malformed_frontmatter_handled(self, skills_project, temp_dir):
|
||||
"""Commands with invalid YAML frontmatter should still produce valid skills."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir = temp_dir / "badfm-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "badfm-ext",
|
||||
"name": "Bad Frontmatter Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.badfm-ext.broken",
|
||||
"file": "commands/broken.md",
|
||||
"description": "Broken frontmatter",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
# Malformed YAML: invalid key-value syntax
|
||||
(ext_dir / "commands" / "broken.md").write_text(
|
||||
"---\n"
|
||||
"description: [invalid yaml\n"
|
||||
" unclosed: bracket\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"# Broken Command\n"
|
||||
"\n"
|
||||
"This body should still be used.\n"
|
||||
)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
# Should not raise
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-badfm-ext-broken" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
# Fallback description since frontmatter was invalid
|
||||
assert "Extension command: speckit.badfm-ext.broken" in content
|
||||
assert "This body should still be used." in content
|
||||
|
||||
def test_remove_cleans_up_when_init_options_deleted(self, skills_project, extension_dir):
|
||||
"""Skills should be cleaned up even if init-options.json is deleted after install."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify skills exist
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
|
||||
# Delete init-options.json to simulate user change
|
||||
init_opts = project_dir / ".specify" / "init-options.json"
|
||||
init_opts.unlink()
|
||||
|
||||
# Remove should still clean up via fallback scan
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
assert not (skills_dir / "speckit-test-ext-hello").exists()
|
||||
assert not (skills_dir / "speckit-test-ext-world").exists()
|
||||
|
||||
def test_remove_cleans_up_when_ai_skills_toggled(self, skills_project, extension_dir):
|
||||
"""Skills should be cleaned up even if ai_skills is toggled to false after install."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify skills exist
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
|
||||
# Toggle ai_skills to false
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=False)
|
||||
|
||||
# Remove should still clean up via fallback scan
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
assert not (skills_dir / "speckit-test-ext-hello").exists()
|
||||
assert not (skills_dir / "speckit-test-ext-world").exists()
|
||||
@@ -18,10 +18,12 @@ from datetime import datetime, timezone
|
||||
|
||||
from specify_cli.extensions import (
|
||||
CatalogEntry,
|
||||
CORE_COMMAND_NAMES,
|
||||
ExtensionManifest,
|
||||
ExtensionRegistry,
|
||||
ExtensionManager,
|
||||
CommandRegistrar,
|
||||
HookExecutor,
|
||||
ExtensionCatalog,
|
||||
ExtensionError,
|
||||
ValidationError,
|
||||
@@ -62,7 +64,7 @@ def valid_manifest_data():
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.hello",
|
||||
"name": "speckit.test-ext.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test command",
|
||||
}
|
||||
@@ -70,7 +72,7 @@ def valid_manifest_data():
|
||||
},
|
||||
"hooks": {
|
||||
"after_tasks": {
|
||||
"command": "speckit.test.hello",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"optional": True,
|
||||
"prompt": "Run test?",
|
||||
}
|
||||
@@ -188,7 +190,18 @@ class TestExtensionManifest:
|
||||
assert manifest.version == "1.0.0"
|
||||
assert manifest.description == "A test extension"
|
||||
assert len(manifest.commands) == 1
|
||||
assert manifest.commands[0]["name"] == "speckit.test.hello"
|
||||
assert manifest.commands[0]["name"] == "speckit.test-ext.hello"
|
||||
|
||||
def test_core_command_names_match_bundled_templates(self):
|
||||
"""Core command reservations should stay aligned with bundled templates."""
|
||||
commands_dir = Path(__file__).resolve().parent.parent / "templates" / "commands"
|
||||
expected = {
|
||||
command_file.stem
|
||||
for command_file in commands_dir.iterdir()
|
||||
if command_file.is_file() and command_file.suffix == ".md"
|
||||
}
|
||||
|
||||
assert CORE_COMMAND_NAMES == expected
|
||||
|
||||
def test_missing_required_field(self, temp_dir):
|
||||
"""Test manifest missing required field."""
|
||||
@@ -588,6 +601,172 @@ class TestExtensionManager:
|
||||
with pytest.raises(ExtensionError, match="already installed"):
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_extension_id_in_core_namespace(self, temp_dir, project_dir):
|
||||
"""Install should reject extension IDs that shadow core commands."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "analyze-ext"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "analyze",
|
||||
"name": "Analyze Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.analyze.extra",
|
||||
"file": "commands/cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="conflicts with core command namespace"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_alias_without_extension_namespace(self, temp_dir, project_dir):
|
||||
"""Install should reject legacy short aliases that can shadow core commands."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "alias-shortcut"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "alias-shortcut",
|
||||
"name": "Alias Shortcut",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias-shortcut.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="Invalid alias 'speckit.shortcut'"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_namespace_squatting(self, temp_dir, project_dir):
|
||||
"""Install should reject commands and aliases outside the extension namespace."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "squat-ext"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "squat-ext",
|
||||
"name": "Squat Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.other-ext.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.squat-ext.ok"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="must use extension namespace 'squat-ext'"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_command_collision_with_installed_extension(self, temp_dir, project_dir):
|
||||
"""Install should reject names already claimed by an installed legacy extension."""
|
||||
import yaml
|
||||
|
||||
first_dir = temp_dir / "ext-one"
|
||||
first_dir.mkdir()
|
||||
(first_dir / "commands").mkdir()
|
||||
first_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-one",
|
||||
"name": "Extension One",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-one.sync",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shared.sync"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
(first_dir / "extension.yml").write_text(yaml.dump(first_manifest))
|
||||
(first_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
installed_ext_dir = project_dir / ".specify" / "extensions" / "ext-one"
|
||||
installed_ext_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copytree(first_dir, installed_ext_dir)
|
||||
|
||||
second_dir = temp_dir / "ext-two"
|
||||
second_dir.mkdir()
|
||||
(second_dir / "commands").mkdir()
|
||||
second_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "shared",
|
||||
"name": "Shared Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.shared.sync",
|
||||
"file": "commands/cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
(second_dir / "extension.yml").write_text(yaml.dump(second_manifest))
|
||||
(second_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.registry.add("ext-one", {"version": "1.0.0", "source": "local"})
|
||||
|
||||
with pytest.raises(ValidationError, match="already provided by extension 'ext-one'"):
|
||||
manager.install_from_directory(second_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_remove_extension(self, extension_dir, project_dir):
|
||||
"""Test removing an installed extension."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
@@ -759,6 +938,81 @@ $ARGUMENTS
|
||||
assert "Prüfe Konformität" in output
|
||||
assert "\\u" not in output
|
||||
|
||||
def test_adjust_script_paths_does_not_mutate_input(self):
|
||||
"""Path adjustments should not mutate caller-owned frontmatter dicts."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
original = {
|
||||
"scripts": {
|
||||
"sh": "../../scripts/bash/setup-plan.sh {ARGS}",
|
||||
"ps": "../../scripts/powershell/setup-plan.ps1 {ARGS}",
|
||||
}
|
||||
}
|
||||
before = json.loads(json.dumps(original))
|
||||
|
||||
adjusted = registrar._adjust_script_paths(original)
|
||||
|
||||
assert original == before
|
||||
assert adjusted["scripts"]["sh"] == ".specify/scripts/bash/setup-plan.sh {ARGS}"
|
||||
assert adjusted["scripts"]["ps"] == ".specify/scripts/powershell/setup-plan.ps1 {ARGS}"
|
||||
|
||||
def test_adjust_script_paths_preserves_extension_local_paths(self):
|
||||
"""Extension-local script paths should not be rewritten into .specify/.specify."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
original = {
|
||||
"scripts": {
|
||||
"sh": ".specify/extensions/test-ext/scripts/setup.sh {ARGS}",
|
||||
"ps": "scripts/powershell/setup-plan.ps1 {ARGS}",
|
||||
}
|
||||
}
|
||||
|
||||
adjusted = registrar._adjust_script_paths(original)
|
||||
|
||||
assert adjusted["scripts"]["sh"] == ".specify/extensions/test-ext/scripts/setup.sh {ARGS}"
|
||||
assert adjusted["scripts"]["ps"] == ".specify/scripts/powershell/setup-plan.ps1 {ARGS}"
|
||||
|
||||
def test_rewrite_project_relative_paths_preserves_extension_local_body_paths(self):
|
||||
"""Body rewrites should preserve extension-local assets while fixing top-level refs."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
|
||||
body = (
|
||||
"Read `.specify/extensions/test-ext/templates/spec.md`\n"
|
||||
"Run scripts/bash/setup-plan.sh\n"
|
||||
)
|
||||
|
||||
rewritten = AgentCommandRegistrar._rewrite_project_relative_paths(body)
|
||||
|
||||
assert ".specify/extensions/test-ext/templates/spec.md" in rewritten
|
||||
assert ".specify/scripts/bash/setup-plan.sh" in rewritten
|
||||
|
||||
def test_render_toml_command_handles_embedded_triple_double_quotes(self):
|
||||
"""TOML renderer should stay valid when body includes triple double-quotes."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
output = registrar.render_toml_command(
|
||||
{"description": "x"},
|
||||
'line1\n"""danger"""\nline2',
|
||||
"extension:test-ext",
|
||||
)
|
||||
|
||||
assert "prompt = '''" in output
|
||||
assert '"""danger"""' in output
|
||||
|
||||
def test_render_toml_command_escapes_when_both_triple_quote_styles_exist(self):
|
||||
"""If body has both triple quote styles, fall back to escaped basic string."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
output = registrar.render_toml_command(
|
||||
{"description": "x"},
|
||||
'a """ b\nc \'\'\' d',
|
||||
"extension:test-ext",
|
||||
)
|
||||
|
||||
assert 'prompt = "' in output
|
||||
assert "\\n" in output
|
||||
assert "\\\"\\\"\\\"" in output
|
||||
|
||||
def test_register_commands_for_claude(self, extension_dir, project_dir):
|
||||
"""Test registering commands for Claude agent."""
|
||||
# Create .claude directory
|
||||
@@ -776,10 +1030,10 @@ $ARGUMENTS
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test.hello" in registered
|
||||
assert "speckit.test-ext.hello" in registered
|
||||
|
||||
# Check command file was created
|
||||
cmd_file = claude_dir / "speckit.test.hello.md"
|
||||
cmd_file = claude_dir / "speckit.test-ext.hello.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
@@ -809,9 +1063,9 @@ $ARGUMENTS
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias.cmd",
|
||||
"name": "speckit.ext-alias.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
"aliases": ["speckit.ext-alias.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -831,10 +1085,10 @@ $ARGUMENTS
|
||||
registered = registrar.register_commands_for_claude(manifest, ext_dir, project_dir)
|
||||
|
||||
assert len(registered) == 2
|
||||
assert "speckit.alias.cmd" in registered
|
||||
assert "speckit.shortcut" in registered
|
||||
assert (claude_dir / "speckit.alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.shortcut.md").exists()
|
||||
assert "speckit.ext-alias.cmd" in registered
|
||||
assert "speckit.ext-alias.shortcut" in registered
|
||||
assert (claude_dir / "speckit.ext-alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.ext-alias.shortcut.md").exists()
|
||||
|
||||
def test_unregister_commands_for_codex_skills_uses_mapped_names(self, project_dir):
|
||||
"""Codex skill cleanup should use the same mapped names as registration."""
|
||||
@@ -875,11 +1129,11 @@ $ARGUMENTS
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, extension_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test.hello" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert "name: speckit-test.hello" in content
|
||||
assert "name: speckit-test-ext-hello" in content
|
||||
assert "description: Test hello command" in content
|
||||
assert "compatibility:" in content
|
||||
assert "metadata:" in content
|
||||
@@ -906,7 +1160,7 @@ $ARGUMENTS
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.plan",
|
||||
"name": "speckit.ext-scripted.plan",
|
||||
"file": "commands/plan.md",
|
||||
"description": "Scripted command",
|
||||
}
|
||||
@@ -944,7 +1198,7 @@ Agent __AGENT__
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test.plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-ext-scripted-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -975,9 +1229,9 @@ Agent __AGENT__
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias.cmd",
|
||||
"name": "speckit.ext-alias-skill.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
"aliases": ["speckit.ext-alias-skill.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -994,13 +1248,13 @@ Agent __AGENT__
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
primary = skills_dir / "speckit-alias.cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-shortcut" / "SKILL.md"
|
||||
primary = skills_dir / "speckit-ext-alias-skill-cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-ext-alias-skill-shortcut" / "SKILL.md"
|
||||
|
||||
assert primary.exists()
|
||||
assert alias.exists()
|
||||
assert "name: speckit-alias.cmd" in primary.read_text()
|
||||
assert "name: speckit-shortcut" in alias.read_text()
|
||||
assert "name: speckit-ext-alias-skill-cmd" in primary.read_text()
|
||||
assert "name: speckit-ext-alias-skill-shortcut" in alias.read_text()
|
||||
|
||||
def test_codex_skill_registration_uses_fallback_script_variant_without_init_options(
|
||||
self, project_dir, temp_dir
|
||||
@@ -1024,7 +1278,7 @@ Agent __AGENT__
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.fallback.plan",
|
||||
"name": "speckit.ext-script-fallback.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1056,7 +1310,7 @@ Then {AGENT_SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-fallback.plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-ext-script-fallback-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1065,6 +1319,62 @@ Then {AGENT_SCRIPT}
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert ".specify/scripts/bash/update-agent-context.sh codex" in content
|
||||
|
||||
def test_codex_skill_registration_handles_non_dict_init_options(
|
||||
self, project_dir, temp_dir
|
||||
):
|
||||
"""Non-dict init-options payloads should not crash skill placeholder resolution."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "ext-script-list-init"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-script-list-init",
|
||||
"name": "List init options",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-script-list-init.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"""---
|
||||
description: "List init scripted command"
|
||||
scripts:
|
||||
sh: ../../scripts/bash/setup-plan.sh --json "{ARGS}"
|
||||
---
|
||||
|
||||
Run {SCRIPT}
|
||||
"""
|
||||
)
|
||||
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text("[]")
|
||||
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
content = (skills_dir / "speckit-ext-script-list-init-plan" / "SKILL.md").read_text()
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
|
||||
def test_codex_skill_registration_fallback_prefers_powershell_on_windows(
|
||||
self, project_dir, temp_dir, monkeypatch
|
||||
):
|
||||
@@ -1089,7 +1399,7 @@ Then {AGENT_SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.windows.plan",
|
||||
"name": "speckit.ext-script-windows-fallback.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1121,7 +1431,7 @@ Then {AGENT_SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-windows.plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-ext-script-windows-fallback-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1143,14 +1453,14 @@ Then {AGENT_SCRIPT}
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test.hello" in registered
|
||||
assert "speckit.test-ext.hello" in registered
|
||||
|
||||
# Verify command file uses .agent.md extension
|
||||
cmd_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
cmd_file = agents_dir / "speckit.test-ext.hello.agent.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify NO plain .md file was created
|
||||
plain_md_file = agents_dir / "speckit.test.hello.md"
|
||||
plain_md_file = agents_dir / "speckit.test-ext.hello.md"
|
||||
assert not plain_md_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
@@ -1170,12 +1480,12 @@ Then {AGENT_SCRIPT}
|
||||
)
|
||||
|
||||
# Verify companion .prompt.md file exists
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test-ext.hello.prompt.md"
|
||||
assert prompt_file.exists()
|
||||
|
||||
# Verify content has correct agent frontmatter
|
||||
content = prompt_file.read_text()
|
||||
assert content == "---\nagent: speckit.test.hello\n---\n"
|
||||
assert content == "---\nagent: speckit.test-ext.hello\n---\n"
|
||||
|
||||
def test_copilot_aliases_get_companion_prompts(self, project_dir, temp_dir):
|
||||
"""Test that aliases also get companion .prompt.md files for Copilot."""
|
||||
@@ -1196,9 +1506,9 @@ Then {AGENT_SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias-copilot.cmd",
|
||||
"name": "speckit.ext-alias-copilot.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut-copilot"],
|
||||
"aliases": ["speckit.ext-alias-copilot.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1225,8 +1535,8 @@ Then {AGENT_SCRIPT}
|
||||
|
||||
# Both primary and alias get companion .prompt.md
|
||||
prompts_dir = project_dir / ".github" / "prompts"
|
||||
assert (prompts_dir / "speckit.alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.shortcut-copilot.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.ext-alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.ext-alias-copilot.shortcut.prompt.md").exists()
|
||||
|
||||
def test_non_copilot_agent_no_companion_file(self, extension_dir, project_dir):
|
||||
"""Test that non-copilot agents do NOT create .prompt.md files."""
|
||||
@@ -1299,7 +1609,7 @@ class TestIntegration:
|
||||
assert installed[0]["id"] == "test-ext"
|
||||
|
||||
# Verify command registered
|
||||
cmd_file = project_dir / ".claude" / "commands" / "speckit.test.hello.md"
|
||||
cmd_file = project_dir / ".claude" / "commands" / "speckit.test-ext.hello.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify registry has registered commands (now a dict keyed by agent)
|
||||
@@ -1307,7 +1617,7 @@ class TestIntegration:
|
||||
registered_commands = metadata["registered_commands"]
|
||||
# Check that the command is registered for at least one agent
|
||||
assert any(
|
||||
"speckit.test.hello" in cmds
|
||||
"speckit.test-ext.hello" in cmds
|
||||
for cmds in registered_commands.values()
|
||||
)
|
||||
|
||||
@@ -1333,8 +1643,8 @@ class TestIntegration:
|
||||
assert "copilot" in metadata["registered_commands"]
|
||||
|
||||
# Verify files exist before cleanup
|
||||
agent_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
agent_file = agents_dir / "speckit.test-ext.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test-ext.hello.prompt.md"
|
||||
assert agent_file.exists()
|
||||
assert prompt_file.exists()
|
||||
|
||||
@@ -2644,7 +2954,7 @@ class TestExtensionUpdateCLI:
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.hello",
|
||||
"name": "speckit.test-ext.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test command",
|
||||
}
|
||||
@@ -2652,7 +2962,7 @@ class TestExtensionUpdateCLI:
|
||||
},
|
||||
"hooks": {
|
||||
"after_tasks": {
|
||||
"command": "speckit.test.hello",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"optional": True,
|
||||
}
|
||||
},
|
||||
@@ -2681,7 +2991,7 @@ class TestExtensionUpdateCLI:
|
||||
"description": "A test extension",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {"commands": [{"name": "speckit.test.hello", "file": "commands/hello.md"}]},
|
||||
"provides": {"commands": [{"name": "speckit.test-ext.hello", "file": "commands/hello.md"}]},
|
||||
}
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||
@@ -3231,3 +3541,128 @@ class TestExtensionPriorityBackwardsCompatibility:
|
||||
assert result[0][0] == "ext-with-priority"
|
||||
assert result[1][0] == "legacy-ext"
|
||||
assert result[2][0] == "ext-low-priority"
|
||||
|
||||
|
||||
class TestHookInvocationRendering:
|
||||
"""Test hook invocation formatting for different agent modes."""
|
||||
|
||||
def test_kimi_hooks_render_skill_invocation(self, project_dir):
|
||||
"""Kimi projects should render /skill:speckit-* invocations."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"before_plan",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.plan",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/skill:speckit-plan`" in message
|
||||
assert "EXECUTE_COMMAND: speckit.plan" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /skill:speckit-plan" in message
|
||||
|
||||
def test_codex_hooks_render_dollar_skill_invocation(self, project_dir):
|
||||
"""Codex projects with --ai-skills should render $speckit-* invocations."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "codex", "ai_skills": True}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
execution = hook_executor.execute_hook(
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.tasks",
|
||||
"optional": False,
|
||||
}
|
||||
)
|
||||
|
||||
assert execution["command"] == "speckit.tasks"
|
||||
assert execution["invocation"] == "$speckit-tasks"
|
||||
|
||||
def test_non_skill_command_keeps_slash_invocation(self, project_dir):
|
||||
"""Custom hook commands should keep slash invocation style."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"before_tasks",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "pre_tasks_test",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/pre_tasks_test`" in message
|
||||
assert "EXECUTE_COMMAND: pre_tasks_test" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /pre_tasks_test" in message
|
||||
|
||||
def test_extension_command_uses_hyphenated_skill_invocation(self, project_dir):
|
||||
"""Multi-segment extension command ids should map to hyphenated skills."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"after_tasks",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/skill:speckit-test-ext-hello`" in message
|
||||
assert "EXECUTE_COMMAND: speckit.test-ext.hello" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /skill:speckit-test-ext-hello" in message
|
||||
|
||||
def test_hook_executor_caches_init_options_lookup(self, project_dir, monkeypatch):
|
||||
"""Init options should be loaded once per executor instance."""
|
||||
calls = {"count": 0}
|
||||
|
||||
def fake_load_init_options(_project_root):
|
||||
calls["count"] += 1
|
||||
return {"ai": "kimi", "ai_skills": False}
|
||||
|
||||
monkeypatch.setattr("specify_cli.load_init_options", fake_load_init_options)
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
assert hook_executor._render_hook_invocation("speckit.plan") == "/skill:speckit-plan"
|
||||
assert hook_executor._render_hook_invocation("speckit.tasks") == "/skill:speckit-tasks"
|
||||
assert calls["count"] == 1
|
||||
|
||||
def test_hook_message_falls_back_when_invocation_is_empty(self, project_dir):
|
||||
"""Hook messages should still render actionable command placeholders."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"after_tasks",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": None,
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/<missing command>`" in message
|
||||
assert "EXECUTE_COMMAND: <missing command>" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /<missing command>" in message
|
||||
|
||||
@@ -1170,8 +1170,12 @@ class TestPresetCatalog:
|
||||
assert not catalog.cache_file.exists()
|
||||
assert not catalog.cache_metadata_file.exists()
|
||||
|
||||
def test_search_with_cached_data(self, project_dir):
|
||||
def test_search_with_cached_data(self, project_dir, monkeypatch):
|
||||
"""Test search with cached catalog data."""
|
||||
from unittest.mock import patch
|
||||
|
||||
# Only use the default catalog to prevent fetching the community catalog from the network
|
||||
monkeypatch.setenv("SPECKIT_PRESET_CATALOG_URL", PresetCatalog.DEFAULT_CATALOG_URL)
|
||||
catalog = PresetCatalog(project_dir)
|
||||
catalog.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -1200,23 +1204,26 @@ class TestPresetCatalog:
|
||||
"cached_at": datetime.now(timezone.utc).isoformat(),
|
||||
}))
|
||||
|
||||
# Search by query
|
||||
results = catalog.search(query="agile")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "safe-agile"
|
||||
# Isolate from community catalog so results are deterministic
|
||||
default_only = [PresetCatalogEntry(url=catalog.DEFAULT_CATALOG_URL, name="default", priority=1, install_allowed=True)]
|
||||
with patch.object(catalog, "get_active_catalogs", return_value=default_only):
|
||||
# Search by query
|
||||
results = catalog.search(query="agile")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "safe-agile"
|
||||
|
||||
# Search by tag
|
||||
results = catalog.search(tag="hipaa")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "healthcare"
|
||||
# Search by tag
|
||||
results = catalog.search(tag="hipaa")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "healthcare"
|
||||
|
||||
# Search by author
|
||||
results = catalog.search(author="agile-community")
|
||||
assert len(results) == 1
|
||||
# Search by author
|
||||
results = catalog.search(author="agile-community")
|
||||
assert len(results) == 1
|
||||
|
||||
# Search all
|
||||
results = catalog.search()
|
||||
assert len(results) == 2
|
||||
# Search all
|
||||
results = catalog.search()
|
||||
assert len(results) == 2
|
||||
|
||||
def test_get_pack_info(self, project_dir):
|
||||
"""Test getting info for a specific pack."""
|
||||
@@ -1935,10 +1942,10 @@ class TestInitOptions:
|
||||
class TestPresetSkills:
|
||||
"""Tests for preset skill registration and unregistration."""
|
||||
|
||||
def _write_init_options(self, project_dir, ai="claude", ai_skills=True):
|
||||
def _write_init_options(self, project_dir, ai="claude", ai_skills=True, script="sh"):
|
||||
from specify_cli import save_init_options
|
||||
|
||||
save_init_options(project_dir, {"ai": ai, "ai_skills": ai_skills})
|
||||
save_init_options(project_dir, {"ai": ai, "ai_skills": ai_skills, "script": script})
|
||||
|
||||
def _create_skill(self, skills_dir, skill_name, body="original body"):
|
||||
skill_dir = skills_dir / skill_name
|
||||
@@ -1988,6 +1995,26 @@ class TestPresetSkills:
|
||||
content = skill_file.read_text()
|
||||
assert "untouched" in content, "Skill should not be modified when ai_skills=False"
|
||||
|
||||
def test_get_skills_dir_returns_none_for_non_string_ai(self, project_dir):
|
||||
"""Corrupted init-options ai values should not crash preset skill resolution."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text('{"ai":["codex"],"ai_skills":true,"script":"sh"}')
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
|
||||
assert manager._get_skills_dir() is None
|
||||
|
||||
def test_get_skills_dir_returns_none_for_non_dict_init_options(self, project_dir):
|
||||
"""Corrupted non-dict init-options payloads should fail closed."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text("[]")
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
|
||||
assert manager._get_skills_dir() is None
|
||||
|
||||
def test_skill_not_updated_without_init_options(self, project_dir, temp_dir):
|
||||
"""When no init-options.json exists, preset install should not touch skills."""
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
@@ -2033,6 +2060,52 @@ class TestPresetSkills:
|
||||
assert "preset:self-test" not in content, "Preset content should be gone"
|
||||
assert "templates/commands/specify.md" in content, "Should reference core template"
|
||||
|
||||
def test_skill_restored_on_remove_resolves_script_placeholders(self, project_dir):
|
||||
"""Core restore should resolve {SCRIPT}/{ARGS} placeholders like other skill paths."""
|
||||
self._write_init_options(project_dir, ai="claude", ai_skills=True, script="sh")
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="old")
|
||||
(project_dir / ".claude" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
core_cmds = project_dir / ".specify" / "templates" / "commands"
|
||||
core_cmds.mkdir(parents=True, exist_ok=True)
|
||||
(core_cmds / "specify.md").write_text(
|
||||
"---\n"
|
||||
"description: Core specify command\n"
|
||||
"scripts:\n"
|
||||
" sh: .specify/scripts/bash/create-new-feature.sh --json \"{ARGS}\"\n"
|
||||
"---\n\n"
|
||||
"Run:\n"
|
||||
"{SCRIPT}\n"
|
||||
)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(SELF_TEST_DIR, "0.1.5")
|
||||
manager.remove("self-test")
|
||||
|
||||
content = (skills_dir / "speckit-specify" / "SKILL.md").read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "{ARGS}" not in content
|
||||
assert ".specify/scripts/bash/create-new-feature.sh --json \"$ARGUMENTS\"" in content
|
||||
|
||||
def test_skill_not_overridden_when_skill_path_is_file(self, project_dir):
|
||||
"""Preset install should skip non-directory skill targets."""
|
||||
self._write_init_options(project_dir, ai="claude")
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skills_dir / "speckit-specify").write_text("not-a-directory")
|
||||
|
||||
(project_dir / ".claude" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(SELF_TEST_DIR, "0.1.5")
|
||||
|
||||
assert (skills_dir / "speckit-specify").is_file()
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert "speckit-specify" not in metadata.get("registered_skills", [])
|
||||
|
||||
def test_no_skills_registered_when_no_skill_dir_exists(self, project_dir, temp_dir):
|
||||
"""Skills should not be created when no existing skill dir is found."""
|
||||
self._write_init_options(project_dir, ai="claude")
|
||||
@@ -2047,6 +2120,304 @@ class TestPresetSkills:
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert metadata.get("registered_skills", []) == []
|
||||
|
||||
def test_extension_skill_override_matches_hyphenated_multisegment_name(self, project_dir, temp_dir):
|
||||
"""Preset overrides for speckit.<ext>.<cmd> should target speckit-<ext>-<cmd> skills."""
|
||||
self._write_init_options(project_dir, ai="codex")
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-fakeext-cmd", body="untouched")
|
||||
(project_dir / ".specify" / "extensions" / "fakeext").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
preset_dir = temp_dir / "ext-skill-override"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.fakeext.cmd.md").write_text(
|
||||
"---\ndescription: Override fakeext cmd\n---\n\npreset:ext-skill-override\n"
|
||||
)
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "ext-skill-override",
|
||||
"name": "Ext Skill Override",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/speckit.fakeext.cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(preset_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit-fakeext-cmd" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:ext-skill-override" in content
|
||||
assert "name: speckit-fakeext-cmd" in content
|
||||
assert "# Speckit Fakeext Cmd Skill" in content
|
||||
|
||||
metadata = manager.registry.get("ext-skill-override")
|
||||
assert "speckit-fakeext-cmd" in metadata.get("registered_skills", [])
|
||||
|
||||
def test_extension_skill_restored_on_preset_remove(self, project_dir, temp_dir):
|
||||
"""Preset removal should restore an extension-backed skill instead of deleting it."""
|
||||
self._write_init_options(project_dir, ai="codex")
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-fakeext-cmd", body="original extension skill")
|
||||
|
||||
extension_dir = project_dir / ".specify" / "extensions" / "fakeext"
|
||||
(extension_dir / "commands").mkdir(parents=True, exist_ok=True)
|
||||
(extension_dir / "commands" / "cmd.md").write_text(
|
||||
"---\n"
|
||||
"description: Extension fakeext cmd\n"
|
||||
"scripts:\n"
|
||||
" sh: ../../scripts/bash/setup-plan.sh --json \"{ARGS}\"\n"
|
||||
"---\n\n"
|
||||
"extension:fakeext\n"
|
||||
"Run {SCRIPT}\n"
|
||||
)
|
||||
extension_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "fakeext",
|
||||
"name": "Fake Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"description": "Fake extension command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(extension_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(extension_manifest, f)
|
||||
|
||||
preset_dir = temp_dir / "ext-skill-restore"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.fakeext.cmd.md").write_text(
|
||||
"---\ndescription: Override fakeext cmd\n---\n\npreset:ext-skill-restore\n"
|
||||
)
|
||||
preset_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "ext-skill-restore",
|
||||
"name": "Ext Skill Restore",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/speckit.fakeext.cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(preset_manifest, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(preset_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit-fakeext-cmd" / "SKILL.md"
|
||||
assert "preset:ext-skill-restore" in skill_file.read_text()
|
||||
|
||||
manager.remove("ext-skill-restore")
|
||||
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:ext-skill-restore" not in content
|
||||
assert "source: extension:fakeext" in content
|
||||
assert "extension:fakeext" in content
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert "# Fakeext Cmd Skill" in content
|
||||
|
||||
def test_preset_remove_skips_skill_dir_without_skill_file(self, project_dir, temp_dir):
|
||||
"""Preset removal should not delete arbitrary directories missing SKILL.md."""
|
||||
self._write_init_options(project_dir, ai="codex")
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
stray_skill_dir = skills_dir / "speckit-fakeext-cmd"
|
||||
stray_skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
note_file = stray_skill_dir / "notes.txt"
|
||||
note_file.write_text("user content", encoding="utf-8")
|
||||
|
||||
preset_dir = temp_dir / "ext-skill-missing-file"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.fakeext.cmd.md").write_text(
|
||||
"---\ndescription: Override fakeext cmd\n---\n\npreset:ext-skill-missing-file\n"
|
||||
)
|
||||
preset_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "ext-skill-missing-file",
|
||||
"name": "Ext Skill Missing File",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/speckit.fakeext.cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(preset_manifest, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
installed_preset_dir = manager.presets_dir / "ext-skill-missing-file"
|
||||
shutil.copytree(preset_dir, installed_preset_dir)
|
||||
manager.registry.add(
|
||||
"ext-skill-missing-file",
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"source": str(preset_dir),
|
||||
"provides_templates": ["speckit.fakeext.cmd"],
|
||||
"registered_skills": ["speckit-fakeext-cmd"],
|
||||
"priority": 10,
|
||||
},
|
||||
)
|
||||
|
||||
manager.remove("ext-skill-missing-file")
|
||||
|
||||
assert stray_skill_dir.is_dir()
|
||||
assert note_file.read_text(encoding="utf-8") == "user content"
|
||||
|
||||
def test_kimi_legacy_dotted_skill_override_still_applies(self, project_dir, temp_dir):
|
||||
"""Preset overrides should still target legacy dotted Kimi skill directories."""
|
||||
self._write_init_options(project_dir, ai="kimi")
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
self._create_skill(skills_dir, "speckit.specify", body="untouched")
|
||||
|
||||
(project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
self_test_dir = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(self_test_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit.specify" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:self-test" in content
|
||||
assert "name: speckit.specify" in content
|
||||
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert "speckit.specify" in metadata.get("registered_skills", [])
|
||||
|
||||
def test_kimi_skill_updated_even_when_ai_skills_disabled(self, project_dir, temp_dir):
|
||||
"""Kimi presets should still propagate command overrides to existing skills."""
|
||||
self._write_init_options(project_dir, ai="kimi", ai_skills=False)
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="untouched")
|
||||
|
||||
(project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
self_test_dir = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(self_test_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit-specify" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:self-test" in content
|
||||
assert "name: speckit-specify" in content
|
||||
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert "speckit-specify" in metadata.get("registered_skills", [])
|
||||
|
||||
def test_kimi_preset_skill_override_resolves_script_placeholders(self, project_dir, temp_dir):
|
||||
"""Kimi preset skill overrides should resolve placeholders and rewrite project paths."""
|
||||
self._write_init_options(project_dir, ai="kimi", ai_skills=False, script="sh")
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="untouched")
|
||||
(project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
preset_dir = temp_dir / "kimi-placeholder-override"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.specify.md").write_text(
|
||||
"---\n"
|
||||
"description: Kimi placeholder override\n"
|
||||
"scripts:\n"
|
||||
" sh: scripts/bash/create-new-feature.sh --json \"{ARGS}\"\n"
|
||||
"---\n\n"
|
||||
"Execute `{SCRIPT}` for __AGENT__\n"
|
||||
"Review templates/checklist.md and memory/constitution.md\n"
|
||||
)
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "kimi-placeholder-override",
|
||||
"name": "Kimi Placeholder Override",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.specify",
|
||||
"file": "commands/speckit.specify.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(preset_dir, "0.1.5")
|
||||
|
||||
content = (skills_dir / "speckit-specify" / "SKILL.md").read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "__AGENT__" not in content
|
||||
assert ".specify/scripts/bash/create-new-feature.sh --json \"$ARGUMENTS\"" in content
|
||||
assert ".specify/templates/checklist.md" in content
|
||||
assert ".specify/memory/constitution.md" in content
|
||||
assert "for kimi" in content
|
||||
|
||||
def test_preset_skill_registration_handles_non_dict_init_options(self, project_dir, temp_dir):
|
||||
"""Non-dict init-options payloads should not crash preset install/remove flows."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text("[]")
|
||||
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="untouched")
|
||||
(project_dir / ".claude" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
self_test_dir = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(self_test_dir, "0.1.5")
|
||||
|
||||
content = (skills_dir / "speckit-specify" / "SKILL.md").read_text()
|
||||
assert "untouched" in content
|
||||
|
||||
|
||||
class TestPresetSetPriority:
|
||||
"""Test preset set-priority CLI command."""
|
||||
|
||||
@@ -14,6 +14,7 @@ import pytest
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
CREATE_FEATURE = PROJECT_ROOT / "scripts" / "bash" / "create-new-feature.sh"
|
||||
CREATE_FEATURE_PS = PROJECT_ROOT / "scripts" / "powershell" / "create-new-feature.ps1"
|
||||
COMMON_SH = PROJECT_ROOT / "scripts" / "bash" / "common.sh"
|
||||
|
||||
|
||||
@@ -147,6 +148,24 @@ class TestSequentialBranch:
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch == "003-next-feat", f"expected 003-next-feat, got: {branch}"
|
||||
|
||||
def test_sequential_supports_four_digit_prefixes(self, git_repo: Path):
|
||||
"""Sequential numbering should continue past 999 without truncation."""
|
||||
(git_repo / "specs" / "999-last-3digit").mkdir(parents=True)
|
||||
(git_repo / "specs" / "1000-first-4digit").mkdir(parents=True)
|
||||
result = run_script(git_repo, "--short-name", "next-feat", "Next feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
branch = None
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("BRANCH_NAME:"):
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch == "1001-next-feat", f"expected 1001-next-feat, got: {branch}"
|
||||
|
||||
def test_powershell_scanner_uses_long_tryparse_for_large_prefixes(self):
|
||||
"""PowerShell scanner should parse large prefixes without [int] casts."""
|
||||
content = CREATE_FEATURE_PS.read_text(encoding="utf-8")
|
||||
assert "[long]::TryParse($matches[1], [ref]$num)" in content
|
||||
assert "$num = [int]$matches[1]" not in content
|
||||
|
||||
|
||||
# ── check_feature_branch Tests ───────────────────────────────────────────────
|
||||
|
||||
@@ -250,3 +269,146 @@ class TestE2EFlow:
|
||||
assert (git_repo / "specs" / branch).is_dir()
|
||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
||||
assert val.returncode == 0
|
||||
|
||||
|
||||
# ── Allow Existing Branch Tests ──────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestAllowExistingBranch:
|
||||
def test_allow_existing_switches_to_branch(self, git_repo: Path):
|
||||
"""T006: Pre-create branch, verify script switches to it."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "004-pre-exist"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "pre-exist",
|
||||
"--number", "4", "Pre-existing feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo, capture_output=True, text=True,
|
||||
).stdout.strip()
|
||||
assert current == "004-pre-exist", f"expected 004-pre-exist, got {current}"
|
||||
|
||||
def test_allow_existing_already_on_branch(self, git_repo: Path):
|
||||
"""T007: Verify success when already on the target branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "005-already-on"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "already-on",
|
||||
"--number", "5", "Already on branch",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
|
||||
def test_allow_existing_creates_spec_dir(self, git_repo: Path):
|
||||
"""T008: Verify spec directory created on existing branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "006-spec-dir"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "spec-dir",
|
||||
"--number", "6", "Spec dir feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert (git_repo / "specs" / "006-spec-dir").is_dir()
|
||||
assert (git_repo / "specs" / "006-spec-dir" / "spec.md").exists()
|
||||
|
||||
def test_without_flag_still_errors(self, git_repo: Path):
|
||||
"""T009: Verify backwards compatibility (error without flag)."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "007-no-flag"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--short-name", "no-flag", "--number", "7", "No flag feature",
|
||||
)
|
||||
assert result.returncode != 0, "should fail without --allow-existing-branch"
|
||||
assert "already exists" in result.stderr
|
||||
|
||||
def test_allow_existing_no_overwrite_spec(self, git_repo: Path):
|
||||
"""T010: Pre-create spec.md with content, verify it is preserved."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "008-no-overwrite"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
spec_dir = git_repo / "specs" / "008-no-overwrite"
|
||||
spec_dir.mkdir(parents=True)
|
||||
spec_file = spec_dir / "spec.md"
|
||||
spec_file.write_text("# My custom spec content\n")
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "no-overwrite",
|
||||
"--number", "8", "No overwrite feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert spec_file.read_text() == "# My custom spec content\n"
|
||||
|
||||
def test_allow_existing_creates_branch_if_not_exists(self, git_repo: Path):
|
||||
"""T011: Verify normal creation when branch doesn't exist."""
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "new-branch",
|
||||
"New branch feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo, capture_output=True, text=True,
|
||||
).stdout.strip()
|
||||
assert "new-branch" in current
|
||||
|
||||
def test_allow_existing_with_json(self, git_repo: Path):
|
||||
"""T012: Verify JSON output is correct."""
|
||||
import json
|
||||
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "009-json-test"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--json", "--short-name", "json-test",
|
||||
"--number", "9", "JSON test",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
data = json.loads(result.stdout)
|
||||
assert data["BRANCH_NAME"] == "009-json-test"
|
||||
|
||||
def test_allow_existing_no_git(self, no_git_dir: Path):
|
||||
"""T013: Verify flag is silently ignored in non-git repos."""
|
||||
result = run_script(
|
||||
no_git_dir, "--allow-existing-branch", "--short-name", "no-git",
|
||||
"No git feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
|
||||
|
||||
class TestAllowExistingBranchPowerShell:
|
||||
def test_powershell_supports_allow_existing_branch_flag(self):
|
||||
"""Static guard: PS script exposes and uses -AllowExistingBranch."""
|
||||
contents = CREATE_FEATURE_PS.read_text(encoding="utf-8")
|
||||
assert "-AllowExistingBranch" in contents
|
||||
# Ensure the flag is referenced in script logic, not just declared
|
||||
assert "AllowExistingBranch" in contents.replace("-AllowExistingBranch", "")
|
||||
|
||||
Reference in New Issue
Block a user