mirror of
https://github.com/github/spec-kit.git
synced 2026-03-17 19:03:08 +00:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2656ee1df7 | ||
|
|
69ee7a836e | ||
|
|
c883952b43 | ||
|
|
b9c1a1c7bb | ||
|
|
46bc65b1ce | ||
|
|
017e1c4c2f | ||
|
|
7562664fd1 | ||
|
|
976c9981a4 | ||
|
|
d3fc056743 | ||
|
|
58ce653908 | ||
|
|
82f8a13f83 | ||
|
|
0f1cbd74fe |
@@ -382,7 +382,7 @@ function Build-Variant {
|
|||||||
}
|
}
|
||||||
'qwen' {
|
'qwen' {
|
||||||
$cmdDir = Join-Path $baseDir ".qwen/commands"
|
$cmdDir = Join-Path $baseDir ".qwen/commands"
|
||||||
Generate-Commands -Agent 'qwen' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'qwen' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
if (Test-Path "agent_templates/qwen/QWEN.md") {
|
if (Test-Path "agent_templates/qwen/QWEN.md") {
|
||||||
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
|
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
|
||||||
}
|
}
|
||||||
@@ -442,7 +442,7 @@ function Build-Variant {
|
|||||||
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
||||||
}
|
}
|
||||||
'agy' {
|
'agy' {
|
||||||
$cmdDir = Join-Path $baseDir ".agent/workflows"
|
$cmdDir = Join-Path $baseDir ".agent/commands"
|
||||||
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
}
|
}
|
||||||
'vibe' {
|
'vibe' {
|
||||||
|
|||||||
@@ -240,7 +240,7 @@ build_variant() {
|
|||||||
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
||||||
qwen)
|
qwen)
|
||||||
mkdir -p "$base_dir/.qwen/commands"
|
mkdir -p "$base_dir/.qwen/commands"
|
||||||
generate_commands qwen toml "{{args}}" "$base_dir/.qwen/commands" "$script"
|
generate_commands qwen md "\$ARGUMENTS" "$base_dir/.qwen/commands" "$script"
|
||||||
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
||||||
opencode)
|
opencode)
|
||||||
mkdir -p "$base_dir/.opencode/command"
|
mkdir -p "$base_dir/.opencode/command"
|
||||||
@@ -280,8 +280,8 @@ build_variant() {
|
|||||||
mkdir -p "$base_dir/.kiro/prompts"
|
mkdir -p "$base_dir/.kiro/prompts"
|
||||||
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
||||||
agy)
|
agy)
|
||||||
mkdir -p "$base_dir/.agent/workflows"
|
mkdir -p "$base_dir/.agent/commands"
|
||||||
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/workflows" "$script" ;;
|
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/commands" "$script" ;;
|
||||||
bob)
|
bob)
|
||||||
mkdir -p "$base_dir/.bob/commands"
|
mkdir -p "$base_dir/.bob/commands"
|
||||||
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
||||||
|
|||||||
12
AGENTS.md
12
AGENTS.md
@@ -10,10 +10,6 @@ The toolkit supports multiple AI coding assistants, allowing teams to use their
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## General practices
|
|
||||||
|
|
||||||
- Any changes to `__init__.py` for the Specify CLI require a version rev in `pyproject.toml` and addition of entries to `CHANGELOG.md`.
|
|
||||||
|
|
||||||
## Adding New Agent Support
|
## Adding New Agent Support
|
||||||
|
|
||||||
This section explains how to add support for new AI agents/assistants to the Specify CLI. Use this guide as a reference when integrating new AI tools into the Spec-Driven Development workflow.
|
This section explains how to add support for new AI agents/assistants to the Specify CLI. Use this guide as a reference when integrating new AI tools into the Spec-Driven Development workflow.
|
||||||
@@ -35,7 +31,7 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
|||||||
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
||||||
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
||||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
||||||
| **Qwen Code** | `.qwen/commands/` | TOML | `qwen` | Alibaba's Qwen Code CLI |
|
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
|
||||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
||||||
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
|
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
|
||||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
||||||
@@ -88,7 +84,7 @@ This eliminates the need for special-case mappings throughout the codebase.
|
|||||||
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
||||||
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
||||||
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
||||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode, agy), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
||||||
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
||||||
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
||||||
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
||||||
@@ -339,7 +335,7 @@ Work within integrated development environments:
|
|||||||
|
|
||||||
### Markdown Format
|
### Markdown Format
|
||||||
|
|
||||||
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code
|
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen
|
||||||
|
|
||||||
**Standard format:**
|
**Standard format:**
|
||||||
|
|
||||||
@@ -364,7 +360,7 @@ Command content with {SCRIPT} and $ARGUMENTS placeholders.
|
|||||||
|
|
||||||
### TOML Format
|
### TOML Format
|
||||||
|
|
||||||
Used by: Gemini, Qwen, Tabnine
|
Used by: Gemini, Tabnine
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
description = "Command description"
|
description = "Command description"
|
||||||
|
|||||||
72
CHANGELOG.md
72
CHANGELOG.md
@@ -7,10 +7,21 @@ Recent changes to the Specify CLI and templates are documented here.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
## [0.2.1] - 2026-03-11
|
## [0.3.0] - 2026-03-13
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
|
- feat(presets): Pluggable preset system with catalog, resolver, and skills propagation (#1787)
|
||||||
|
- fix: match 'Last updated' timestamp with or without bold markers (#1836)
|
||||||
|
- Add specify doctor command for project health diagnostics (#1828)
|
||||||
|
- fix: harden bash scripts against shell injection and improve robustness (#1809)
|
||||||
|
- fix: clean up command templates (specify, analyze) (#1810)
|
||||||
|
- fix: migrate Qwen Code CLI from TOML to Markdown format (#1589) (#1730)
|
||||||
|
- fix(cli): deprecate explicit command support for agy (#1798) (#1808)
|
||||||
|
- Add /selftest.extension core extension to test other extensions (#1758)
|
||||||
|
- feat(extensions): Quality of life improvements for RFC-aligned catalog integration (#1776)
|
||||||
|
- Add Java brownfield walkthrough to community walkthroughs (#1820)
|
||||||
|
- chore: bump version to 0.2.1 (#1813)
|
||||||
- Added February 2026 newsletter (#1812)
|
- Added February 2026 newsletter (#1812)
|
||||||
- feat: add Kimi Code CLI agent support (#1790)
|
- feat: add Kimi Code CLI agent support (#1790)
|
||||||
- docs: fix broken links in quickstart guide (#1759) (#1797)
|
- docs: fix broken links in quickstart guide (#1759) (#1797)
|
||||||
@@ -56,8 +67,67 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
|
- feat(presets): Pluggable preset system with preset catalog and template resolver
|
||||||
|
- Preset manifest (`preset.yml`) with validation for artifact, command, and script types
|
||||||
|
- `PresetManifest`, `PresetRegistry`, `PresetManager`, `PresetCatalog`, `PresetResolver` classes in `src/specify_cli/presets.py`
|
||||||
|
- CLI commands: `specify preset search`, `specify preset add`, `specify preset list`, `specify preset remove`, `specify preset resolve`, `specify preset info`
|
||||||
|
- CLI commands: `specify preset catalog list`, `specify preset catalog add`, `specify preset catalog remove` for multi-catalog management
|
||||||
|
- `PresetCatalogEntry` dataclass and multi-catalog support mirroring the extension catalog system
|
||||||
|
- `--preset` option for `specify init` to install presets during initialization
|
||||||
|
- Priority-based preset resolution: presets with lower priority number win (`--priority` flag)
|
||||||
|
- `resolve_template()` / `Resolve-Template` helpers in bash and PowerShell common scripts
|
||||||
|
- Template resolution priority stack: overrides → presets → extensions → core
|
||||||
|
- Preset catalog files (`presets/catalog.json`, `presets/catalog.community.json`)
|
||||||
|
- Preset scaffold directory (`presets/scaffold/`)
|
||||||
|
- Scripts updated to use template resolution instead of hardcoded paths
|
||||||
|
- feat(presets): Preset command overrides now propagate to agent skills when `--ai-skills` was used during init
|
||||||
|
- feat: `specify init` persists CLI options to `.specify/init-options.json` for downstream operations
|
||||||
- feat(extensions): support `.extensionignore` to exclude files/folders during `specify extension add` (#1781)
|
- feat(extensions): support `.extensionignore` to exclude files/folders during `specify extension add` (#1781)
|
||||||
|
|
||||||
|
## [0.2.1] - 2026-03-11
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Added February 2026 newsletter (#1812)
|
||||||
|
- feat: add Kimi Code CLI agent support (#1790)
|
||||||
|
- docs: fix broken links in quickstart guide (#1759) (#1797)
|
||||||
|
- docs: add catalog cli help documentation (#1793) (#1794)
|
||||||
|
- fix: use quiet checkout to avoid exception on git checkout (#1792)
|
||||||
|
- feat(extensions): support .extensionignore to exclude files during install (#1781)
|
||||||
|
- feat: add Codex support for extension command registration (#1767)
|
||||||
|
- chore: bump version to 0.2.0 (#1786)
|
||||||
|
- fix: sync agent list comments with actual supported agents (#1785)
|
||||||
|
- feat(extensions): support multiple active catalogs simultaneously (#1720)
|
||||||
|
- Pavel/add tabnine cli support (#1503)
|
||||||
|
- Add Understanding extension to community catalog (#1778)
|
||||||
|
- Add ralph extension to community catalog (#1780)
|
||||||
|
- Update README with project initialization instructions (#1772)
|
||||||
|
- feat: add review extension to community catalog (#1775)
|
||||||
|
- Add fleet extension to community catalog (#1771)
|
||||||
|
- Integration of Mistral vibe support into speckit (#1725)
|
||||||
|
- fix: Remove duplicate options in specify.md (#1765)
|
||||||
|
- fix: use global branch numbering instead of per-short-name detection (#1757)
|
||||||
|
- Add Community Walkthroughs section to README (#1766)
|
||||||
|
- feat(extensions): add Jira Integration to community catalog (#1764)
|
||||||
|
- Add Azure DevOps Integration extension to community catalog (#1734)
|
||||||
|
- Fix docs: update Antigravity link and add initialization example (#1748)
|
||||||
|
- fix: wire after_tasks and after_implement hook events into command templates (#1702)
|
||||||
|
- make c ignores consistent with c++ (#1747)
|
||||||
|
- chore: bump version to 0.1.13 (#1746)
|
||||||
|
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
|
||||||
|
- feat: add verify extension to community catalog (#1726)
|
||||||
|
- Add Retrospective Extension to community catalog README table (#1741)
|
||||||
|
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
|
||||||
|
- fix: correct Copilot extension command registration (#1724)
|
||||||
|
- fix(implement): remove Makefile from C ignore patterns (#1558)
|
||||||
|
- Add sync extension to community catalog (#1728)
|
||||||
|
- fix(checklist): clarify file handling behavior for append vs create (#1556)
|
||||||
|
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
|
||||||
|
- chore: bump version to 0.1.12 (#1737)
|
||||||
|
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
||||||
|
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
||||||
|
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
||||||
|
|
||||||
## [0.2.0] - 2026-03-09
|
## [0.2.0] - 2026-03-09
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|||||||
@@ -154,7 +154,9 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
|
|
||||||
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
||||||
|
|
||||||
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core, ~307,000 lines of C#, Razor, SQL, JavaScript, and config files) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
||||||
|
|
||||||
|
- **[Brownfield Java runtime extension](https://github.com/mnriem/spec-kit-java-brownfield-demo)** — Extends an existing open-source Jakarta EE runtime (Piranha, ~420,000 lines of Java, XML, JSP, HTML, and config files across 180 Maven modules) with a password-protected Server Admin Console, demonstrating spec-kit on a large multi-module Java project with no prior specs or constitution.
|
||||||
|
|
||||||
## 🤖 Supported AI Agents
|
## 🤖 Supported AI Agents
|
||||||
|
|
||||||
@@ -181,7 +183,7 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
||||||
| [Kimi Code](https://code.kimi.com/) | ✅ | |
|
| [Kimi Code](https://code.kimi.com/) | ✅ | |
|
||||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
| [Windsurf](https://windsurf.com/) | ✅ | |
|
||||||
| [Antigravity (agy)](https://antigravity.google/) | ✅ | |
|
| [Antigravity (agy)](https://antigravity.google/) | ✅ | Requires `--ai-skills` |
|
||||||
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
||||||
|
|
||||||
## 🔧 Specify CLI Reference
|
## 🔧 Specify CLI Reference
|
||||||
@@ -246,7 +248,7 @@ specify init my-project --ai vibe
|
|||||||
specify init my-project --ai bob
|
specify init my-project --ai bob
|
||||||
|
|
||||||
# Initialize with Antigravity support
|
# Initialize with Antigravity support
|
||||||
specify init my-project --ai agy
|
specify init my-project --ai agy --ai-skills
|
||||||
|
|
||||||
# Initialize with an unsupported agent (generic / bring your own agent)
|
# Initialize with an unsupported agent (generic / bring your own agent)
|
||||||
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
||||||
|
|||||||
@@ -76,6 +76,7 @@ The following community-contributed extensions are available in [`catalog.commun
|
|||||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||||
|
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
# RFC: Spec Kit Extension System
|
# RFC: Spec Kit Extension System
|
||||||
|
|
||||||
**Status**: Draft
|
**Status**: Implemented
|
||||||
**Author**: Stats Perform Engineering
|
**Author**: Stats Perform Engineering
|
||||||
**Created**: 2026-01-28
|
**Created**: 2026-01-28
|
||||||
**Updated**: 2026-01-28
|
**Updated**: 2026-03-11
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -24,8 +24,9 @@
|
|||||||
13. [Security Considerations](#security-considerations)
|
13. [Security Considerations](#security-considerations)
|
||||||
14. [Migration Strategy](#migration-strategy)
|
14. [Migration Strategy](#migration-strategy)
|
||||||
15. [Implementation Phases](#implementation-phases)
|
15. [Implementation Phases](#implementation-phases)
|
||||||
16. [Open Questions](#open-questions)
|
16. [Resolved Questions](#resolved-questions)
|
||||||
17. [Appendices](#appendices)
|
17. [Open Questions (Remaining)](#open-questions-remaining)
|
||||||
|
18. [Appendices](#appendices)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -1504,203 +1505,225 @@ AI agent registers both names, so old scripts work.
|
|||||||
|
|
||||||
## Implementation Phases
|
## Implementation Phases
|
||||||
|
|
||||||
### Phase 1: Core Extension System (Week 1-2)
|
### Phase 1: Core Extension System ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Basic extension infrastructure
|
**Goal**: Basic extension infrastructure
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Extension manifest schema (`extension.yml`)
|
- [x] Extension manifest schema (`extension.yml`)
|
||||||
- [ ] Extension directory structure
|
- [x] Extension directory structure
|
||||||
- [ ] CLI commands:
|
- [x] CLI commands:
|
||||||
- [ ] `specify extension list`
|
- [x] `specify extension list`
|
||||||
- [ ] `specify extension add` (from URL)
|
- [x] `specify extension add` (from URL and local `--dev`)
|
||||||
- [ ] `specify extension remove`
|
- [x] `specify extension remove`
|
||||||
- [ ] Extension registry (`.specify/extensions/.registry`)
|
- [x] Extension registry (`.specify/extensions/.registry`)
|
||||||
- [ ] Command registration (Claude only initially)
|
- [x] Command registration (Claude and 15+ other agents)
|
||||||
- [ ] Basic validation (manifest schema, compatibility)
|
- [x] Basic validation (manifest schema, compatibility)
|
||||||
- [ ] Documentation (extension development guide)
|
- [x] Documentation (extension development guide)
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Unit tests for manifest parsing
|
- [x] Unit tests for manifest parsing
|
||||||
- [ ] Integration test: Install dummy extension
|
- [x] Integration test: Install dummy extension
|
||||||
- [ ] Integration test: Register commands with Claude
|
- [x] Integration test: Register commands with Claude
|
||||||
|
|
||||||
### Phase 2: Jira Extension (Week 3)
|
### Phase 2: Jira Extension ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: First production extension
|
**Goal**: First production extension
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Create `spec-kit-jira` repository
|
- [x] Create `spec-kit-jira` repository
|
||||||
- [ ] Port Jira functionality to extension
|
- [x] Port Jira functionality to extension
|
||||||
- [ ] Create `jira-config.yml` template
|
- [x] Create `jira-config.yml` template
|
||||||
- [ ] Commands:
|
- [x] Commands:
|
||||||
- [ ] `specstoissues.md`
|
- [x] `specstoissues.md`
|
||||||
- [ ] `discover-fields.md`
|
- [x] `discover-fields.md`
|
||||||
- [ ] `sync-status.md`
|
- [x] `sync-status.md`
|
||||||
- [ ] Helper scripts
|
- [x] Helper scripts
|
||||||
- [ ] Documentation (README, configuration guide, examples)
|
- [x] Documentation (README, configuration guide, examples)
|
||||||
- [ ] Release v1.0.0
|
- [x] Release v3.0.0
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Test on `eng-msa-ts` project
|
- [x] Test on `eng-msa-ts` project
|
||||||
- [ ] Verify spec→Epic, phase→Story, task→Issue mapping
|
- [x] Verify spec→Epic, phase→Story, task→Issue mapping
|
||||||
- [ ] Test configuration loading and validation
|
- [x] Test configuration loading and validation
|
||||||
- [ ] Test custom field application
|
- [x] Test custom field application
|
||||||
|
|
||||||
### Phase 3: Extension Catalog (Week 4)
|
### Phase 3: Extension Catalog ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Discovery and distribution
|
**Goal**: Discovery and distribution
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Central catalog (`extensions/catalog.json` in spec-kit repo)
|
- [x] Central catalog (`extensions/catalog.json` in spec-kit repo)
|
||||||
- [ ] Catalog fetch and parsing
|
- [x] Community catalog (`extensions/catalog.community.json`)
|
||||||
- [ ] CLI commands:
|
- [x] Catalog fetch and parsing with multi-catalog support
|
||||||
- [ ] `specify extension search`
|
- [x] CLI commands:
|
||||||
- [ ] `specify extension info`
|
- [x] `specify extension search`
|
||||||
- [ ] Catalog publishing process (GitHub Action)
|
- [x] `specify extension info`
|
||||||
- [ ] Documentation (how to publish extensions)
|
- [x] `specify extension catalog list`
|
||||||
|
- [x] `specify extension catalog add`
|
||||||
|
- [x] `specify extension catalog remove`
|
||||||
|
- [x] Documentation (how to publish extensions)
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Test catalog fetch
|
- [x] Test catalog fetch
|
||||||
- [ ] Test extension search/filtering
|
- [x] Test extension search/filtering
|
||||||
- [ ] Test catalog caching
|
- [x] Test catalog caching
|
||||||
|
- [x] Test multi-catalog merge with priority
|
||||||
|
|
||||||
### Phase 4: Advanced Features (Week 5-6)
|
### Phase 4: Advanced Features ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Hooks, updates, multi-agent support
|
**Goal**: Hooks, updates, multi-agent support
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Hook system (`hooks` in extension.yml)
|
- [x] Hook system (`hooks` in extension.yml)
|
||||||
- [ ] Hook registration and execution
|
- [x] Hook registration and execution
|
||||||
- [ ] Project extensions config (`.specify/extensions.yml`)
|
- [x] Project extensions config (`.specify/extensions.yml`)
|
||||||
- [ ] CLI commands:
|
- [x] CLI commands:
|
||||||
- [ ] `specify extension update`
|
- [x] `specify extension update` (with atomic backup/restore)
|
||||||
- [ ] `specify extension enable/disable`
|
- [x] `specify extension enable/disable`
|
||||||
- [ ] Command registration for multiple agents (Gemini, Copilot)
|
- [x] Command registration for multiple agents (15+ agents including Claude, Copilot, Gemini, Cursor, etc.)
|
||||||
- [ ] Extension update notifications
|
- [x] Extension update notifications (version comparison)
|
||||||
- [ ] Configuration layer resolution (project, local, env)
|
- [x] Configuration layer resolution (project, local, env)
|
||||||
|
|
||||||
|
**Additional features implemented beyond original RFC**:
|
||||||
|
|
||||||
|
- [x] **Display name resolution**: All commands accept extension display names in addition to IDs
|
||||||
|
- [x] **Ambiguous name handling**: User-friendly tables when multiple extensions match a name
|
||||||
|
- [x] **Atomic update with rollback**: Full backup of extension dir, commands, hooks, and registry with automatic rollback on failure
|
||||||
|
- [x] **Pre-install ID validation**: Validates extension ID from ZIP before installing (security)
|
||||||
|
- [x] **Enabled state preservation**: Disabled extensions stay disabled after update
|
||||||
|
- [x] **Registry update/restore methods**: Clean API for enable/disable and rollback operations
|
||||||
|
- [x] **Catalog error fallback**: `extension info` falls back to local info when catalog unavailable
|
||||||
|
- [x] **`_install_allowed` flag**: Discovery-only catalogs can't be used for installation
|
||||||
|
- [x] **Cache invalidation**: Cache invalidated when `SPECKIT_CATALOG_URL` changes
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Test hooks in core commands
|
- [x] Test hooks in core commands
|
||||||
- [ ] Test extension updates (preserve config)
|
- [x] Test extension updates (preserve config)
|
||||||
- [ ] Test multi-agent registration
|
- [x] Test multi-agent registration
|
||||||
|
- [x] Test atomic rollback on update failure
|
||||||
|
- [x] Test enabled state preservation
|
||||||
|
- [x] Test display name resolution
|
||||||
|
|
||||||
### Phase 5: Polish & Documentation (Week 7)
|
### Phase 5: Polish & Documentation ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Production ready
|
**Goal**: Production ready
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Comprehensive documentation:
|
- [x] Comprehensive documentation:
|
||||||
- [ ] User guide (installing/using extensions)
|
- [x] User guide (EXTENSION-USER-GUIDE.md)
|
||||||
- [ ] Extension development guide
|
- [x] Extension development guide (EXTENSION-DEV-GUIDE.md)
|
||||||
- [ ] Extension API reference
|
- [x] Extension API reference (EXTENSION-API-REFERENCE.md)
|
||||||
- [ ] Migration guide (core → extension)
|
- [x] Error messages and validation improvements
|
||||||
- [ ] Error messages and validation improvements
|
- [x] CLI help text updates
|
||||||
- [ ] CLI help text updates
|
|
||||||
- [ ] Example extension template (cookiecutter)
|
|
||||||
- [ ] Blog post / announcement
|
|
||||||
- [ ] Video tutorial
|
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] End-to-end testing on multiple projects
|
- [x] End-to-end testing on multiple projects
|
||||||
- [ ] Community beta testing
|
- [x] 163 unit tests passing
|
||||||
- [ ] Performance testing (large projects)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Open Questions
|
## Resolved Questions
|
||||||
|
|
||||||
### 1. Extension Namespace
|
The following questions from the original RFC have been resolved during implementation:
|
||||||
|
|
||||||
|
### 1. Extension Namespace ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Should extension commands use namespace prefix?
|
**Question**: Should extension commands use namespace prefix?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option C** - Both prefixed and aliases are supported. Commands use `speckit.{extension}.{command}` as canonical name, with optional aliases defined in manifest.
|
||||||
|
|
||||||
- A) Prefixed: `/speckit.jira.specstoissues` (explicit, avoids conflicts)
|
**Implementation**: The `aliases` field in `extension.yml` allows extensions to register additional command names.
|
||||||
- B) Short alias: `/jira.specstoissues` (shorter, less verbose)
|
|
||||||
- C) Both: Register both names, prefer prefixed in docs
|
|
||||||
|
|
||||||
**Recommendation**: C (both), prefixed is canonical
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 2. Config File Location
|
### 2. Config File Location ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Where should extension configs live?
|
**Question**: Where should extension configs live?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - Extension directory (`.specify/extensions/{ext-id}/{ext-id}-config.yml`). This keeps extensions self-contained and easier to manage.
|
||||||
|
|
||||||
- A) Extension directory: `.specify/extensions/jira/jira-config.yml` (encapsulated)
|
**Implementation**: Each extension has its own config file within its directory, with layered resolution (defaults → project → local → env vars).
|
||||||
- B) Root level: `.specify/jira-config.yml` (more visible)
|
|
||||||
- C) Unified: `.specify/extensions.yml` (all extension configs in one file)
|
|
||||||
|
|
||||||
**Recommendation**: A (extension directory), cleaner separation
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 3. Command File Format
|
### 3. Command File Format ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Should extensions use universal format or agent-specific?
|
**Question**: Should extensions use universal format or agent-specific?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - Universal Markdown format. Extensions write commands once, CLI converts to agent-specific format during registration.
|
||||||
|
|
||||||
- A) Universal Markdown: Extensions write once, CLI converts per-agent
|
**Implementation**: `CommandRegistrar` class handles conversion to 15+ agent formats (Claude, Copilot, Gemini, Cursor, etc.).
|
||||||
- B) Agent-specific: Extensions provide separate files for each agent
|
|
||||||
- C) Hybrid: Universal default, agent-specific overrides
|
|
||||||
|
|
||||||
**Recommendation**: A (universal), reduces duplication
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 4. Hook Execution Model
|
### 4. Hook Execution Model ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: How should hooks execute?
|
**Question**: How should hooks execute?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - Hooks are registered in `.specify/extensions.yml` and executed by the AI agent when it sees the hook trigger. Hook state (enabled/disabled) is managed per-extension.
|
||||||
|
|
||||||
- A) AI agent interprets: Core commands output `EXECUTE_COMMAND: name`
|
**Implementation**: `HookExecutor` class manages hook registration and state in `extensions.yml`.
|
||||||
- B) CLI executes: Core commands call `specify extension hook after_tasks`
|
|
||||||
- C) Agent built-in: Extension system built into AI agent (Claude SDK)
|
|
||||||
|
|
||||||
**Recommendation**: A initially (simpler), move to C long-term
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 5. Extension Distribution
|
### 5. Extension Distribution ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: How should extensions be packaged?
|
**Question**: How should extensions be packaged?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - ZIP archives downloaded from GitHub releases (via catalog `download_url`). Local development uses `--dev` flag with directory path.
|
||||||
|
|
||||||
- A) ZIP archives: Downloaded from GitHub releases
|
**Implementation**: `ExtensionManager.install_from_zip()` handles ZIP extraction and validation.
|
||||||
- B) Git repos: Cloned directly (`git clone`)
|
|
||||||
- C) Python packages: Installable via `uv tool install`
|
|
||||||
|
|
||||||
**Recommendation**: A (ZIP), simpler for non-Python extensions in future
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 6. Multi-Version Support
|
### 6. Multi-Version Support ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Can multiple versions of same extension coexist?
|
**Question**: Can multiple versions of same extension coexist?
|
||||||
|
|
||||||
|
**Decision**: **Option A** - Single version only. Updates replace the existing version with atomic rollback on failure.
|
||||||
|
|
||||||
|
**Implementation**: `extension update` performs atomic backup/restore to ensure safe updates.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Open Questions (Remaining)
|
||||||
|
|
||||||
|
### 1. Sandboxing / Permissions (Future)
|
||||||
|
|
||||||
|
**Question**: Should extensions declare required permissions?
|
||||||
|
|
||||||
**Options**:
|
**Options**:
|
||||||
|
|
||||||
- A) Single version: Only one version installed at a time
|
- A) No sandboxing (current): Extensions run with same privileges as AI agent
|
||||||
- B) Multi-version: Side-by-side versions (`.specify/extensions/jira@1.0/`, `.specify/extensions/jira@2.0/`)
|
- B) Permission declarations: Extensions declare `filesystem:read`, `network:external`, etc.
|
||||||
- C) Per-branch: Different branches use different versions
|
- C) Opt-in sandboxing: Organizations can enable permission enforcement
|
||||||
|
|
||||||
**Recommendation**: A initially (simpler), consider B in future if needed
|
**Status**: Deferred to future version. Currently using trust-based model where users trust extension authors.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Package Signatures (Future)
|
||||||
|
|
||||||
|
**Question**: Should extensions be cryptographically signed?
|
||||||
|
|
||||||
|
**Options**:
|
||||||
|
|
||||||
|
- A) No signatures (current): Trust based on catalog source
|
||||||
|
- B) GPG/Sigstore signatures: Verify package integrity
|
||||||
|
- C) Catalog-level verification: Catalog maintainers verify packages
|
||||||
|
|
||||||
|
**Status**: Deferred to future version. `checksum` field is available in catalog schema but not enforced.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"schema_version": "1.0",
|
"schema_version": "1.0",
|
||||||
"updated_at": "2026-03-09T00:00:00Z",
|
"updated_at": "2026-03-13T12:00:00Z",
|
||||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||||
"extensions": {
|
"extensions": {
|
||||||
"azure-devops": {
|
"azure-devops": {
|
||||||
@@ -74,6 +74,37 @@
|
|||||||
"created_at": "2026-02-22T00:00:00Z",
|
"created_at": "2026-02-22T00:00:00Z",
|
||||||
"updated_at": "2026-02-22T00:00:00Z"
|
"updated_at": "2026-02-22T00:00:00Z"
|
||||||
},
|
},
|
||||||
|
"doctor": {
|
||||||
|
"name": "Project Health Check",
|
||||||
|
"id": "doctor",
|
||||||
|
"description": "Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git.",
|
||||||
|
"author": "KhawarHabibKhan",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-doctor/archive/refs/tags/v1.0.0.zip",
|
||||||
|
"repository": "https://github.com/KhawarHabibKhan/spec-kit-doctor",
|
||||||
|
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-doctor",
|
||||||
|
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-doctor/blob/main/README.md",
|
||||||
|
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-doctor/blob/main/CHANGELOG.md",
|
||||||
|
"license": "MIT",
|
||||||
|
"requires": {
|
||||||
|
"speckit_version": ">=0.1.0"
|
||||||
|
},
|
||||||
|
"provides": {
|
||||||
|
"commands": 1,
|
||||||
|
"hooks": 0
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"diagnostics",
|
||||||
|
"health-check",
|
||||||
|
"validation",
|
||||||
|
"project-structure"
|
||||||
|
],
|
||||||
|
"verified": false,
|
||||||
|
"downloads": 0,
|
||||||
|
"stars": 0,
|
||||||
|
"created_at": "2026-03-13T00:00:00Z",
|
||||||
|
"updated_at": "2026-03-13T00:00:00Z"
|
||||||
|
},
|
||||||
"fleet": {
|
"fleet": {
|
||||||
"name": "Fleet Orchestrator",
|
"name": "Fleet Orchestrator",
|
||||||
"id": "fleet",
|
"id": "fleet",
|
||||||
|
|||||||
@@ -1,6 +1,21 @@
|
|||||||
{
|
{
|
||||||
"schema_version": "1.0",
|
"schema_version": "1.0",
|
||||||
"updated_at": "2026-02-03T00:00:00Z",
|
"updated_at": "2026-03-10T00:00:00Z",
|
||||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json",
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json",
|
||||||
"extensions": {}
|
"extensions": {
|
||||||
}
|
"selftest": {
|
||||||
|
"name": "Spec Kit Self-Test Utility",
|
||||||
|
"id": "selftest",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "Verifies catalog extensions by programmatically walking through the discovery, installation, and registration lifecycle.",
|
||||||
|
"author": "spec-kit-core",
|
||||||
|
"repository": "https://github.com/github/spec-kit",
|
||||||
|
"download_url": "https://github.com/github/spec-kit/releases/download/selftest-v1.0.0/selftest.zip",
|
||||||
|
"tags": [
|
||||||
|
"testing",
|
||||||
|
"core",
|
||||||
|
"utility"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
69
extensions/selftest/commands/selftest.md
Normal file
69
extensions/selftest/commands/selftest.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
---
|
||||||
|
description: "Validate the lifecycle of an extension from the catalog."
|
||||||
|
---
|
||||||
|
|
||||||
|
# Extension Self-Test: `$ARGUMENTS`
|
||||||
|
|
||||||
|
This command drives a self-test simulating the developer experience with the `$ARGUMENTS` extension.
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Validate the end-to-end lifecycle (discovery, installation, registration) for the extension: `$ARGUMENTS`.
|
||||||
|
If `$ARGUMENTS` is empty, you must tell the user to provide an extension name, for example: `/speckit.selftest.extension linear`.
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
### Step 1: Catalog Discovery Validation
|
||||||
|
|
||||||
|
Check if the extension exists in the Spec Kit catalog.
|
||||||
|
Execute this command and verify that it completes successfully and that the returned extension ID exactly matches `$ARGUMENTS`. If the command fails or the ID does not match `$ARGUMENTS`, fail the test.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify extension info "$ARGUMENTS"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Simulate Installation
|
||||||
|
|
||||||
|
First, try to add the extension to the current workspace configuration directly. If the catalog provides the extension as `install_allowed: false` (discovery-only), this step is *expected* to fail.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify extension add "$ARGUMENTS"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, simulate adding the extension by installing it from its catalog download URL, which should bypass the restriction.
|
||||||
|
Obtain the extension's `download_url` from the catalog metadata (for example, via a catalog info command or UI), then run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify extension add "$ARGUMENTS" --from "<download_url>"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Registration Verification
|
||||||
|
|
||||||
|
Once the `add` command completes, verify the installation by checking the project configuration.
|
||||||
|
Use terminal tools (like `cat`) to verify that the following file contains a record for `$ARGUMENTS`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat .specify/extensions/.registry/$ARGUMENTS.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Verification Report
|
||||||
|
|
||||||
|
Analyze the standard output of the three steps.
|
||||||
|
Generate a terminal-style test output format detailing the results of discovery, installation, and registration. Return this directly to the user.
|
||||||
|
|
||||||
|
Example output format:
|
||||||
|
```text
|
||||||
|
============================= test session starts ==============================
|
||||||
|
collected 3 items
|
||||||
|
|
||||||
|
test_selftest_discovery.py::test_catalog_search [PASS/FAIL]
|
||||||
|
Details: [Provide execution result of specify extension search]
|
||||||
|
|
||||||
|
test_selftest_installation.py::test_extension_add [PASS/FAIL]
|
||||||
|
Details: [Provide execution result of specify extension add]
|
||||||
|
|
||||||
|
test_selftest_registration.py::test_config_verification [PASS/FAIL]
|
||||||
|
Details: [Provide execution result of registry record verification]
|
||||||
|
|
||||||
|
============================== [X] passed in ... ==============================
|
||||||
|
```
|
||||||
16
extensions/selftest/extension.yml
Normal file
16
extensions/selftest/extension.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
schema_version: "1.0"
|
||||||
|
extension:
|
||||||
|
id: selftest
|
||||||
|
name: Spec Kit Self-Test Utility
|
||||||
|
version: 1.0.0
|
||||||
|
description: Verifies catalog extensions by programmatically walking through the discovery, installation, and registration lifecycle.
|
||||||
|
author: spec-kit-core
|
||||||
|
repository: https://github.com/github/spec-kit
|
||||||
|
license: MIT
|
||||||
|
requires:
|
||||||
|
speckit_version: ">=0.2.0"
|
||||||
|
provides:
|
||||||
|
commands:
|
||||||
|
- name: speckit.selftest.extension
|
||||||
|
file: commands/selftest.md
|
||||||
|
description: Validate the lifecycle of an extension from the catalog.
|
||||||
157
presets/ARCHITECTURE.md
Normal file
157
presets/ARCHITECTURE.md
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Preset System Architecture
|
||||||
|
|
||||||
|
This document describes the internal architecture of the preset system — how template resolution, command registration, and catalog management work under the hood.
|
||||||
|
|
||||||
|
For usage instructions, see [README.md](README.md).
|
||||||
|
|
||||||
|
## Template Resolution
|
||||||
|
|
||||||
|
When Spec Kit needs a template (e.g. `spec-template`), the `PresetResolver` walks a priority stack and returns the first match:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
A["resolve_template('spec-template')"] --> B{Override exists?}
|
||||||
|
B -- Yes --> C[".specify/templates/overrides/spec-template.md"]
|
||||||
|
B -- No --> D{Preset provides it?}
|
||||||
|
D -- Yes --> E[".specify/presets/‹preset-id›/templates/spec-template.md"]
|
||||||
|
D -- No --> F{Extension provides it?}
|
||||||
|
F -- Yes --> G[".specify/extensions/‹ext-id›/templates/spec-template.md"]
|
||||||
|
F -- No --> H[".specify/templates/spec-template.md"]
|
||||||
|
|
||||||
|
E -- "multiple presets?" --> I["lowest priority number wins"]
|
||||||
|
I --> E
|
||||||
|
|
||||||
|
style C fill:#4caf50,color:#fff
|
||||||
|
style E fill:#2196f3,color:#fff
|
||||||
|
style G fill:#ff9800,color:#fff
|
||||||
|
style H fill:#9e9e9e,color:#fff
|
||||||
|
```
|
||||||
|
|
||||||
|
| Priority | Source | Path | Use case |
|
||||||
|
|----------|--------|------|----------|
|
||||||
|
| 1 (highest) | Override | `.specify/templates/overrides/` | One-off project-local tweaks |
|
||||||
|
| 2 | Preset | `.specify/presets/<id>/templates/` | Shareable, stackable customizations |
|
||||||
|
| 3 | Extension | `.specify/extensions/<id>/templates/` | Extension-provided templates |
|
||||||
|
| 4 (lowest) | Core | `.specify/templates/` | Shipped defaults |
|
||||||
|
|
||||||
|
When multiple presets are installed, they're sorted by their `priority` field (lower number = higher precedence). This is set via `--priority` on `specify preset add`.
|
||||||
|
|
||||||
|
The resolution is implemented three times to ensure consistency:
|
||||||
|
- **Python**: `PresetResolver` in `src/specify_cli/presets.py`
|
||||||
|
- **Bash**: `resolve_template()` in `scripts/bash/common.sh`
|
||||||
|
- **PowerShell**: `Resolve-Template` in `scripts/powershell/common.ps1`
|
||||||
|
|
||||||
|
## Command Registration
|
||||||
|
|
||||||
|
When a preset is installed with `type: "command"` entries, the `PresetManager` registers them into all detected agent directories using the shared `CommandRegistrar` from `src/specify_cli/agents.py`.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
A["specify preset add my-preset"] --> B{Preset has type: command?}
|
||||||
|
B -- No --> Z["done (templates only)"]
|
||||||
|
B -- Yes --> C{Extension command?}
|
||||||
|
C -- "speckit.myext.cmd\n(3+ dot segments)" --> D{Extension installed?}
|
||||||
|
D -- No --> E["skip (extension not active)"]
|
||||||
|
D -- Yes --> F["register command"]
|
||||||
|
C -- "speckit.specify\n(core command)" --> F
|
||||||
|
F --> G["detect agent directories"]
|
||||||
|
G --> H[".claude/commands/"]
|
||||||
|
G --> I[".gemini/commands/"]
|
||||||
|
G --> J[".github/agents/"]
|
||||||
|
G --> K["... (17+ agents)"]
|
||||||
|
H --> L["write .md (Markdown format)"]
|
||||||
|
I --> M["write .toml (TOML format)"]
|
||||||
|
J --> N["write .agent.md + .prompt.md"]
|
||||||
|
|
||||||
|
style E fill:#ff5722,color:#fff
|
||||||
|
style L fill:#4caf50,color:#fff
|
||||||
|
style M fill:#4caf50,color:#fff
|
||||||
|
style N fill:#4caf50,color:#fff
|
||||||
|
```
|
||||||
|
|
||||||
|
### Extension safety check
|
||||||
|
|
||||||
|
Command names follow the pattern `speckit.<ext-id>.<cmd-name>`. When a command has 3+ dot segments, the system extracts the extension ID and checks if `.specify/extensions/<ext-id>/` exists. If the extension isn't installed, the command is skipped — preventing orphan files referencing non-existent extensions.
|
||||||
|
|
||||||
|
Core commands (e.g. `speckit.specify`, with only 2 segments) are always registered.
|
||||||
|
|
||||||
|
### Agent format rendering
|
||||||
|
|
||||||
|
The `CommandRegistrar` renders commands differently per agent:
|
||||||
|
|
||||||
|
| Agent | Format | Extension | Arg placeholder |
|
||||||
|
|-------|--------|-----------|-----------------|
|
||||||
|
| Claude, Cursor, opencode, Windsurf, etc. | Markdown | `.md` | `$ARGUMENTS` |
|
||||||
|
| Copilot | Markdown | `.agent.md` + `.prompt.md` | `$ARGUMENTS` |
|
||||||
|
| Gemini, Qwen, Tabnine | TOML | `.toml` | `{{args}}` |
|
||||||
|
|
||||||
|
### Cleanup on removal
|
||||||
|
|
||||||
|
When `specify preset remove` is called, the registered commands are read from the registry metadata and the corresponding files are deleted from each agent directory, including Copilot companion `.prompt.md` files.
|
||||||
|
|
||||||
|
## Catalog System
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
A["specify preset search"] --> B["PresetCatalog.get_active_catalogs()"]
|
||||||
|
B --> C{SPECKIT_PRESET_CATALOG_URL set?}
|
||||||
|
C -- Yes --> D["single custom catalog"]
|
||||||
|
C -- No --> E{.specify/preset-catalogs.yml exists?}
|
||||||
|
E -- Yes --> F["project-level catalog stack"]
|
||||||
|
E -- No --> G{"~/.specify/preset-catalogs.yml exists?"}
|
||||||
|
G -- Yes --> H["user-level catalog stack"]
|
||||||
|
G -- No --> I["built-in defaults"]
|
||||||
|
I --> J["default (install allowed)"]
|
||||||
|
I --> K["community (discovery only)"]
|
||||||
|
|
||||||
|
style D fill:#ff9800,color:#fff
|
||||||
|
style F fill:#2196f3,color:#fff
|
||||||
|
style H fill:#2196f3,color:#fff
|
||||||
|
style J fill:#4caf50,color:#fff
|
||||||
|
style K fill:#9e9e9e,color:#fff
|
||||||
|
```
|
||||||
|
|
||||||
|
Catalogs are fetched with a 1-hour cache (per-URL, SHA256-hashed cache files). Each catalog entry has a `priority` (for merge ordering) and `install_allowed` flag.
|
||||||
|
|
||||||
|
## Repository Layout
|
||||||
|
|
||||||
|
```
|
||||||
|
presets/
|
||||||
|
├── ARCHITECTURE.md # This file
|
||||||
|
├── PUBLISHING.md # Guide for submitting presets to the catalog
|
||||||
|
├── README.md # User guide
|
||||||
|
├── catalog.json # Official preset catalog
|
||||||
|
├── catalog.community.json # Community preset catalog
|
||||||
|
├── scaffold/ # Scaffold for creating new presets
|
||||||
|
│ ├── preset.yml # Example manifest
|
||||||
|
│ ├── README.md # Guide for customizing the scaffold
|
||||||
|
│ ├── commands/
|
||||||
|
│ │ ├── speckit.specify.md # Core command override example
|
||||||
|
│ │ └── speckit.myext.myextcmd.md # Extension command override example
|
||||||
|
│ └── templates/
|
||||||
|
│ ├── spec-template.md # Core template override example
|
||||||
|
│ └── myext-template.md # Extension template override example
|
||||||
|
└── self-test/ # Self-test preset (overrides all core templates)
|
||||||
|
├── preset.yml
|
||||||
|
├── commands/
|
||||||
|
│ └── speckit.specify.md
|
||||||
|
└── templates/
|
||||||
|
├── spec-template.md
|
||||||
|
├── plan-template.md
|
||||||
|
├── tasks-template.md
|
||||||
|
├── checklist-template.md
|
||||||
|
├── constitution-template.md
|
||||||
|
└── agent-file-template.md
|
||||||
|
```
|
||||||
|
|
||||||
|
## Module Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
src/specify_cli/
|
||||||
|
├── agents.py # CommandRegistrar — shared infrastructure for writing
|
||||||
|
│ # command files to agent directories
|
||||||
|
├── presets.py # PresetManifest, PresetRegistry, PresetManager,
|
||||||
|
│ # PresetCatalog, PresetCatalogEntry, PresetResolver
|
||||||
|
└── __init__.py # CLI commands: specify preset list/add/remove/search/
|
||||||
|
# resolve/info, specify preset catalog list/add/remove
|
||||||
|
```
|
||||||
295
presets/PUBLISHING.md
Normal file
295
presets/PUBLISHING.md
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
# Preset Publishing Guide
|
||||||
|
|
||||||
|
This guide explains how to publish your preset to the Spec Kit preset catalog, making it discoverable by `specify preset search`.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Prerequisites](#prerequisites)
|
||||||
|
2. [Prepare Your Preset](#prepare-your-preset)
|
||||||
|
3. [Submit to Catalog](#submit-to-catalog)
|
||||||
|
4. [Verification Process](#verification-process)
|
||||||
|
5. [Release Workflow](#release-workflow)
|
||||||
|
6. [Best Practices](#best-practices)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before publishing a preset, ensure you have:
|
||||||
|
|
||||||
|
1. **Valid Preset**: A working preset with a valid `preset.yml` manifest
|
||||||
|
2. **Git Repository**: Preset hosted on GitHub (or other public git hosting)
|
||||||
|
3. **Documentation**: README.md with description and usage instructions
|
||||||
|
4. **License**: Open source license file (MIT, Apache 2.0, etc.)
|
||||||
|
5. **Versioning**: Semantic versioning (e.g., 1.0.0)
|
||||||
|
6. **Testing**: Preset tested on real projects with `specify preset add --dev`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prepare Your Preset
|
||||||
|
|
||||||
|
### 1. Preset Structure
|
||||||
|
|
||||||
|
Ensure your preset follows the standard structure:
|
||||||
|
|
||||||
|
```text
|
||||||
|
your-preset/
|
||||||
|
├── preset.yml # Required: Preset manifest
|
||||||
|
├── README.md # Required: Documentation
|
||||||
|
├── LICENSE # Required: License file
|
||||||
|
├── CHANGELOG.md # Recommended: Version history
|
||||||
|
│
|
||||||
|
├── templates/ # Template overrides
|
||||||
|
│ ├── spec-template.md
|
||||||
|
│ ├── plan-template.md
|
||||||
|
│ └── ...
|
||||||
|
│
|
||||||
|
└── commands/ # Command overrides (optional)
|
||||||
|
└── speckit.specify.md
|
||||||
|
```
|
||||||
|
|
||||||
|
Start from the [scaffold](scaffold/) if you're creating a new preset.
|
||||||
|
|
||||||
|
### 2. preset.yml Validation
|
||||||
|
|
||||||
|
Verify your manifest is valid:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
schema_version: "1.0"
|
||||||
|
|
||||||
|
preset:
|
||||||
|
id: "your-preset" # Unique lowercase-hyphenated ID
|
||||||
|
name: "Your Preset Name" # Human-readable name
|
||||||
|
version: "1.0.0" # Semantic version
|
||||||
|
description: "Brief description (one sentence)"
|
||||||
|
author: "Your Name or Organization"
|
||||||
|
repository: "https://github.com/your-org/spec-kit-preset-your-preset"
|
||||||
|
license: "MIT"
|
||||||
|
|
||||||
|
requires:
|
||||||
|
speckit_version: ">=0.1.0" # Required spec-kit version
|
||||||
|
|
||||||
|
provides:
|
||||||
|
templates:
|
||||||
|
- type: "template"
|
||||||
|
name: "spec-template"
|
||||||
|
file: "templates/spec-template.md"
|
||||||
|
description: "Custom spec template"
|
||||||
|
replaces: "spec-template"
|
||||||
|
|
||||||
|
tags: # 2-5 relevant tags
|
||||||
|
- "category"
|
||||||
|
- "workflow"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Validation Checklist**:
|
||||||
|
|
||||||
|
- ✅ `id` is lowercase with hyphens only (no underscores, spaces, or special characters)
|
||||||
|
- ✅ `version` follows semantic versioning (X.Y.Z)
|
||||||
|
- ✅ `description` is concise (under 200 characters)
|
||||||
|
- ✅ `repository` URL is valid and public
|
||||||
|
- ✅ All template and command files exist in the preset directory
|
||||||
|
- ✅ Template names are lowercase with hyphens only
|
||||||
|
- ✅ Command names use dot notation (e.g. `speckit.specify`)
|
||||||
|
- ✅ Tags are lowercase and descriptive
|
||||||
|
|
||||||
|
### 3. Test Locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install from local directory
|
||||||
|
specify preset add --dev /path/to/your-preset
|
||||||
|
|
||||||
|
# Verify templates resolve from your preset
|
||||||
|
specify preset resolve spec-template
|
||||||
|
|
||||||
|
# Verify preset info
|
||||||
|
specify preset info your-preset
|
||||||
|
|
||||||
|
# List installed presets
|
||||||
|
specify preset list
|
||||||
|
|
||||||
|
# Remove when done testing
|
||||||
|
specify preset remove your-preset
|
||||||
|
```
|
||||||
|
|
||||||
|
If your preset includes command overrides, verify they appear in the agent directories:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check Claude commands (if using Claude)
|
||||||
|
ls .claude/commands/speckit.*.md
|
||||||
|
|
||||||
|
# Check Copilot commands (if using Copilot)
|
||||||
|
ls .github/agents/speckit.*.agent.md
|
||||||
|
|
||||||
|
# Check Gemini commands (if using Gemini)
|
||||||
|
ls .gemini/commands/speckit.*.toml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Create GitHub Release
|
||||||
|
|
||||||
|
Create a GitHub release for your preset version:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Tag the release
|
||||||
|
git tag v1.0.0
|
||||||
|
git push origin v1.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
The release archive URL will be:
|
||||||
|
|
||||||
|
```text
|
||||||
|
https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Test Installation from Archive
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify preset add --from https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Submit to Catalog
|
||||||
|
|
||||||
|
### Understanding the Catalogs
|
||||||
|
|
||||||
|
Spec Kit uses a dual-catalog system:
|
||||||
|
|
||||||
|
- **`catalog.json`** — Official, verified presets (install allowed by default)
|
||||||
|
- **`catalog.community.json`** — Community-contributed presets (discovery only by default)
|
||||||
|
|
||||||
|
All community presets should be submitted to `catalog.community.json`.
|
||||||
|
|
||||||
|
### 1. Fork the spec-kit Repository
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/YOUR-USERNAME/spec-kit.git
|
||||||
|
cd spec-kit
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Add Preset to Community Catalog
|
||||||
|
|
||||||
|
Edit `presets/catalog.community.json` and add your preset.
|
||||||
|
|
||||||
|
> **⚠️ Entries must be sorted alphabetically by preset ID.** Insert your preset in the correct position within the `"presets"` object.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"updated_at": "2026-03-10T00:00:00Z",
|
||||||
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json",
|
||||||
|
"presets": {
|
||||||
|
"your-preset": {
|
||||||
|
"name": "Your Preset Name",
|
||||||
|
"description": "Brief description of what your preset provides",
|
||||||
|
"author": "Your Name",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"download_url": "https://github.com/your-org/spec-kit-preset-your-preset/archive/refs/tags/v1.0.0.zip",
|
||||||
|
"repository": "https://github.com/your-org/spec-kit-preset-your-preset",
|
||||||
|
"license": "MIT",
|
||||||
|
"requires": {
|
||||||
|
"speckit_version": ">=0.1.0"
|
||||||
|
},
|
||||||
|
"provides": {
|
||||||
|
"templates": 3,
|
||||||
|
"commands": 1
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"category",
|
||||||
|
"workflow"
|
||||||
|
],
|
||||||
|
"created_at": "2026-03-10T00:00:00Z",
|
||||||
|
"updated_at": "2026-03-10T00:00:00Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Submit Pull Request
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout -b add-your-preset
|
||||||
|
git add presets/catalog.community.json
|
||||||
|
git commit -m "Add your-preset to community catalog
|
||||||
|
|
||||||
|
- Preset ID: your-preset
|
||||||
|
- Version: 1.0.0
|
||||||
|
- Author: Your Name
|
||||||
|
- Description: Brief description
|
||||||
|
"
|
||||||
|
git push origin add-your-preset
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pull Request Checklist**:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Preset Submission
|
||||||
|
|
||||||
|
**Preset Name**: Your Preset Name
|
||||||
|
**Preset ID**: your-preset
|
||||||
|
**Version**: 1.0.0
|
||||||
|
**Repository**: https://github.com/your-org/spec-kit-preset-your-preset
|
||||||
|
|
||||||
|
### Checklist
|
||||||
|
- [ ] Valid preset.yml manifest
|
||||||
|
- [ ] README.md with description and usage
|
||||||
|
- [ ] LICENSE file included
|
||||||
|
- [ ] GitHub release created
|
||||||
|
- [ ] Preset tested with `specify preset add --dev`
|
||||||
|
- [ ] Templates resolve correctly (`specify preset resolve`)
|
||||||
|
- [ ] Commands register to agent directories (if applicable)
|
||||||
|
- [ ] Commands match template sections (command + template are coherent)
|
||||||
|
- [ ] Added to presets/catalog.community.json
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification Process
|
||||||
|
|
||||||
|
After submission, maintainers will review:
|
||||||
|
|
||||||
|
1. **Manifest validation** — valid `preset.yml`, all files exist
|
||||||
|
2. **Template quality** — templates are useful and well-structured
|
||||||
|
3. **Command coherence** — commands reference sections that exist in templates
|
||||||
|
4. **Security** — no malicious content, safe file operations
|
||||||
|
5. **Documentation** — clear README explaining what the preset does
|
||||||
|
|
||||||
|
Once verified, `verified: true` is set and the preset appears in `specify preset search`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Release Workflow
|
||||||
|
|
||||||
|
When releasing a new version:
|
||||||
|
|
||||||
|
1. Update `version` in `preset.yml`
|
||||||
|
2. Update CHANGELOG.md
|
||||||
|
3. Tag and push: `git tag v1.1.0 && git push origin v1.1.0`
|
||||||
|
4. Submit PR to update `version` and `download_url` in `presets/catalog.community.json`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### Template Design
|
||||||
|
|
||||||
|
- **Keep sections clear** — use headings and placeholder text the LLM can replace
|
||||||
|
- **Match commands to templates** — if your preset overrides a command, make sure it references the sections in your template
|
||||||
|
- **Document customization points** — use HTML comments to guide users on what to change
|
||||||
|
|
||||||
|
### Naming
|
||||||
|
|
||||||
|
- Preset IDs should be descriptive: `healthcare-compliance`, `enterprise-safe`, `startup-lean`
|
||||||
|
- Avoid generic names: `my-preset`, `custom`, `test`
|
||||||
|
|
||||||
|
### Stacking
|
||||||
|
|
||||||
|
- Design presets to work well when stacked with others
|
||||||
|
- Only override templates you need to change
|
||||||
|
- Document which templates and commands your preset modifies
|
||||||
|
|
||||||
|
### Command Overrides
|
||||||
|
|
||||||
|
- Only override commands when the workflow needs to change, not just the output format
|
||||||
|
- If you only need different template sections, a template override is sufficient
|
||||||
|
- Test command overrides with multiple agents (Claude, Gemini, Copilot)
|
||||||
115
presets/README.md
Normal file
115
presets/README.md
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Presets
|
||||||
|
|
||||||
|
Presets are stackable, priority-ordered collections of template and command overrides for Spec Kit. They let you customize both the artifacts produced by the Spec-Driven Development workflow (specs, plans, tasks, checklists, constitutions) and the commands that guide the LLM in creating them — without forking or modifying core files.
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
When Spec Kit needs a template (e.g. `spec-template`), it walks a resolution stack:
|
||||||
|
|
||||||
|
1. `.specify/templates/overrides/` — project-local one-off overrides
|
||||||
|
2. `.specify/presets/<preset-id>/templates/` — installed presets (sorted by priority)
|
||||||
|
3. `.specify/extensions/<ext-id>/templates/` — extension-provided templates
|
||||||
|
4. `.specify/templates/` — core templates shipped with Spec Kit
|
||||||
|
|
||||||
|
If no preset is installed, core templates are used — exactly the same behavior as before presets existed.
|
||||||
|
|
||||||
|
For detailed resolution and command registration flows, see [ARCHITECTURE.md](ARCHITECTURE.md).
|
||||||
|
|
||||||
|
## Command Overrides
|
||||||
|
|
||||||
|
Presets can also override the commands that guide the SDD workflow. Templates define *what* gets produced (specs, plans, constitutions); commands define *how* the LLM produces them (the step-by-step instructions).
|
||||||
|
|
||||||
|
When a preset includes `type: "command"` entries, the commands are automatically registered into all detected agent directories (`.claude/commands/`, `.gemini/commands/`, etc.) in the correct format (Markdown or TOML with appropriate argument placeholders). When the preset is removed, the registered commands are cleaned up.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Search available presets
|
||||||
|
specify preset search
|
||||||
|
|
||||||
|
# Install a preset from the catalog
|
||||||
|
specify preset add healthcare-compliance
|
||||||
|
|
||||||
|
# Install from a local directory (for development)
|
||||||
|
specify preset add --dev ./my-preset
|
||||||
|
|
||||||
|
# Install with a specific priority (lower = higher precedence)
|
||||||
|
specify preset add healthcare-compliance --priority 5
|
||||||
|
|
||||||
|
# List installed presets
|
||||||
|
specify preset list
|
||||||
|
|
||||||
|
# See which template a name resolves to
|
||||||
|
specify preset resolve spec-template
|
||||||
|
|
||||||
|
# Get detailed info about a preset
|
||||||
|
specify preset info healthcare-compliance
|
||||||
|
|
||||||
|
# Remove a preset
|
||||||
|
specify preset remove healthcare-compliance
|
||||||
|
```
|
||||||
|
|
||||||
|
## Stacking Presets
|
||||||
|
|
||||||
|
Multiple presets can be installed simultaneously. The `--priority` flag controls which one wins when two presets provide the same template (lower number = higher precedence):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify preset add enterprise-safe --priority 10 # base layer
|
||||||
|
specify preset add healthcare-compliance --priority 5 # overrides enterprise-safe
|
||||||
|
specify preset add pm-workflow --priority 1 # overrides everything
|
||||||
|
```
|
||||||
|
|
||||||
|
Presets **override**, they don't merge. If two presets both provide `spec-template`, the one with the lowest priority number wins entirely.
|
||||||
|
|
||||||
|
## Catalog Management
|
||||||
|
|
||||||
|
Presets are discovered through catalogs. By default, Spec Kit uses the official and community catalogs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List active catalogs
|
||||||
|
specify preset catalog list
|
||||||
|
|
||||||
|
# Add a custom catalog
|
||||||
|
specify preset catalog add https://example.com/catalog.json --name my-org --install-allowed
|
||||||
|
|
||||||
|
# Remove a catalog
|
||||||
|
specify preset catalog remove my-org
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating a Preset
|
||||||
|
|
||||||
|
See [scaffold/](scaffold/) for a scaffold you can copy to create your own preset.
|
||||||
|
|
||||||
|
1. Copy `scaffold/` to a new directory
|
||||||
|
2. Edit `preset.yml` with your preset's metadata
|
||||||
|
3. Add or replace templates in `templates/`
|
||||||
|
4. Test locally with `specify preset add --dev .`
|
||||||
|
5. Verify with `specify preset resolve spec-template`
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `SPECKIT_PRESET_CATALOG_URL` | Override the catalog URL (replaces all defaults) |
|
||||||
|
|
||||||
|
## Configuration Files
|
||||||
|
|
||||||
|
| File | Scope | Description |
|
||||||
|
|------|-------|-------------|
|
||||||
|
| `.specify/preset-catalogs.yml` | Project | Custom catalog stack for this project |
|
||||||
|
| `~/.specify/preset-catalogs.yml` | User | Custom catalog stack for all projects |
|
||||||
|
|
||||||
|
## Future Considerations
|
||||||
|
|
||||||
|
The following enhancements are under consideration for future releases:
|
||||||
|
|
||||||
|
- **Composition strategies** — Allow presets to declare a `strategy` per template instead of the default `replace`:
|
||||||
|
|
||||||
|
| Type | `replace` | `prepend` | `append` | `wrap` |
|
||||||
|
|------|-----------|-----------|----------|--------|
|
||||||
|
| **template** | ✓ (default) | ✓ | ✓ | ✓ |
|
||||||
|
| **command** | ✓ (default) | ✓ | ✓ | ✓ |
|
||||||
|
| **script** | ✓ (default) | — | — | ✓ |
|
||||||
|
|
||||||
|
For artifacts and commands (which are LLM directives), `wrap` would inject preset content before and after the core template using a `{CORE_TEMPLATE}` placeholder. For scripts, `wrap` would run custom logic before/after the core script via a `$CORE_SCRIPT` variable.
|
||||||
|
- **Script overrides** — Enable presets to provide alternative versions of core scripts (e.g. `create-new-feature.sh`) for workflow customization. A `strategy: "wrap"` option could allow presets to run custom logic before/after the core script without fully replacing it.
|
||||||
6
presets/catalog.community.json
Normal file
6
presets/catalog.community.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"updated_at": "2026-03-09T00:00:00Z",
|
||||||
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json",
|
||||||
|
"presets": {}
|
||||||
|
}
|
||||||
6
presets/catalog.json
Normal file
6
presets/catalog.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"updated_at": "2026-03-10T00:00:00Z",
|
||||||
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.json",
|
||||||
|
"presets": {}
|
||||||
|
}
|
||||||
46
presets/scaffold/README.md
Normal file
46
presets/scaffold/README.md
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# My Preset
|
||||||
|
|
||||||
|
A custom preset for Spec Kit. Copy this directory and customize it to create your own.
|
||||||
|
|
||||||
|
## Templates Included
|
||||||
|
|
||||||
|
| Template | Type | Description |
|
||||||
|
|----------|------|-------------|
|
||||||
|
| `spec-template` | template | Custom feature specification template (overrides core and extensions) |
|
||||||
|
| `myext-template` | template | Override of the myext extension's report template |
|
||||||
|
| `speckit.specify` | command | Custom specification command (overrides core) |
|
||||||
|
| `speckit.myext.myextcmd` | command | Override of the myext extension's myextcmd command |
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
1. Copy this directory: `cp -r presets/scaffold my-preset`
|
||||||
|
2. Edit `preset.yml` — set your preset's ID, name, description, and templates
|
||||||
|
3. Add or modify templates in `templates/`
|
||||||
|
4. Test locally: `specify preset add --dev ./my-preset`
|
||||||
|
5. Verify resolution: `specify preset resolve spec-template`
|
||||||
|
6. Remove when done testing: `specify preset remove my-preset`
|
||||||
|
|
||||||
|
## Manifest Reference (`preset.yml`)
|
||||||
|
|
||||||
|
Required fields:
|
||||||
|
- `schema_version` — always `"1.0"`
|
||||||
|
- `preset.id` — lowercase alphanumeric with hyphens
|
||||||
|
- `preset.name` — human-readable name
|
||||||
|
- `preset.version` — semantic version (e.g. `1.0.0`)
|
||||||
|
- `preset.description` — brief description
|
||||||
|
- `requires.speckit_version` — version constraint (e.g. `>=0.1.0`)
|
||||||
|
- `provides.templates` — list of templates with `type`, `name`, and `file`
|
||||||
|
|
||||||
|
## Template Types
|
||||||
|
|
||||||
|
- **template** — Document scaffolds (spec-template.md, plan-template.md, tasks-template.md, etc.)
|
||||||
|
- **command** — AI agent workflow prompts (e.g. speckit.specify, speckit.plan)
|
||||||
|
- **script** — Custom scripts (reserved for future use)
|
||||||
|
|
||||||
|
## Publishing
|
||||||
|
|
||||||
|
See the [Preset Publishing Guide](../PUBLISHING.md) for details on submitting to the catalog.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT
|
||||||
20
presets/scaffold/commands/speckit.myext.myextcmd.md
Normal file
20
presets/scaffold/commands/speckit.myext.myextcmd.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
description: "Override of the myext extension's myextcmd command"
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Preset override for speckit.myext.myextcmd -->
|
||||||
|
|
||||||
|
You are following a customized version of the myext extension's myextcmd command.
|
||||||
|
|
||||||
|
When executing this command:
|
||||||
|
|
||||||
|
1. Read the user's input from $ARGUMENTS
|
||||||
|
2. Follow the standard myextcmd workflow
|
||||||
|
3. Additionally, apply the following customizations from this preset:
|
||||||
|
- Add compliance checks before proceeding
|
||||||
|
- Include audit trail entries in the output
|
||||||
|
|
||||||
|
> CUSTOMIZE: Replace the instructions above with your own.
|
||||||
|
> This file overrides the command that the "myext" extension provides.
|
||||||
|
> When this preset is installed, all agents (Claude, Gemini, Copilot, etc.)
|
||||||
|
> will use this version instead of the extension's original.
|
||||||
23
presets/scaffold/commands/speckit.specify.md
Normal file
23
presets/scaffold/commands/speckit.specify.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
description: "Create a feature specification (preset override)"
|
||||||
|
scripts:
|
||||||
|
sh: scripts/bash/create-new-feature.sh "{ARGS}"
|
||||||
|
ps: scripts/powershell/create-new-feature.ps1 "{ARGS}"
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
Given the feature description above:
|
||||||
|
|
||||||
|
1. **Create the feature branch** by running the script:
|
||||||
|
- Bash: `{SCRIPT} --json --short-name "<short-name>" "<description>"`
|
||||||
|
- The JSON output contains BRANCH_NAME and SPEC_FILE paths.
|
||||||
|
|
||||||
|
2. **Read the spec-template** to see the sections you need to fill.
|
||||||
|
|
||||||
|
3. **Write the specification** to SPEC_FILE, replacing the placeholders in each section
|
||||||
|
(Overview, Requirements, Acceptance Criteria) with details from the user's description.
|
||||||
91
presets/scaffold/preset.yml
Normal file
91
presets/scaffold/preset.yml
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
schema_version: "1.0"
|
||||||
|
|
||||||
|
preset:
|
||||||
|
# CUSTOMIZE: Change 'my-preset' to your preset ID (lowercase, hyphen-separated)
|
||||||
|
id: "my-preset"
|
||||||
|
|
||||||
|
# CUSTOMIZE: Human-readable name for your preset
|
||||||
|
name: "My Preset"
|
||||||
|
|
||||||
|
# CUSTOMIZE: Update version when releasing (semantic versioning: X.Y.Z)
|
||||||
|
version: "1.0.0"
|
||||||
|
|
||||||
|
# CUSTOMIZE: Brief description (under 200 characters)
|
||||||
|
description: "Brief description of what your preset provides"
|
||||||
|
|
||||||
|
# CUSTOMIZE: Your name or organization name
|
||||||
|
author: "Your Name"
|
||||||
|
|
||||||
|
# CUSTOMIZE: GitHub repository URL (create before publishing)
|
||||||
|
repository: "https://github.com/your-org/spec-kit-preset-my-preset"
|
||||||
|
|
||||||
|
# REVIEW: License (MIT is recommended for open source)
|
||||||
|
license: "MIT"
|
||||||
|
|
||||||
|
# Requirements for this preset
|
||||||
|
requires:
|
||||||
|
# CUSTOMIZE: Minimum spec-kit version required
|
||||||
|
speckit_version: ">=0.1.0"
|
||||||
|
|
||||||
|
# Templates provided by this preset
|
||||||
|
provides:
|
||||||
|
templates:
|
||||||
|
# CUSTOMIZE: Define your template overrides
|
||||||
|
# Templates are document scaffolds (spec-template.md, plan-template.md, etc.)
|
||||||
|
- type: "template"
|
||||||
|
name: "spec-template"
|
||||||
|
file: "templates/spec-template.md"
|
||||||
|
description: "Custom feature specification template"
|
||||||
|
replaces: "spec-template" # Which core template this overrides (optional)
|
||||||
|
|
||||||
|
# ADD MORE TEMPLATES: Copy this block for each template
|
||||||
|
# - type: "template"
|
||||||
|
# name: "plan-template"
|
||||||
|
# file: "templates/plan-template.md"
|
||||||
|
# description: "Custom plan template"
|
||||||
|
# replaces: "plan-template"
|
||||||
|
|
||||||
|
# OVERRIDE EXTENSION TEMPLATES:
|
||||||
|
# Presets sit above extensions in the resolution stack, so you can
|
||||||
|
# override templates provided by any installed extension.
|
||||||
|
# For example, if the "myext" extension provides a spec-template,
|
||||||
|
# the preset's version above will take priority automatically.
|
||||||
|
|
||||||
|
# Override a template provided by the "myext" extension:
|
||||||
|
- type: "template"
|
||||||
|
name: "myext-template"
|
||||||
|
file: "templates/myext-template.md"
|
||||||
|
description: "Override myext's report template"
|
||||||
|
replaces: "myext-template"
|
||||||
|
|
||||||
|
# Command overrides (AI agent workflow prompts)
|
||||||
|
# Presets can override both core and extension commands.
|
||||||
|
# Commands are automatically registered into all detected agent
|
||||||
|
# directories (.claude/commands/, .gemini/commands/, etc.)
|
||||||
|
|
||||||
|
# Override a core command:
|
||||||
|
- type: "command"
|
||||||
|
name: "speckit.specify"
|
||||||
|
file: "commands/speckit.specify.md"
|
||||||
|
description: "Custom specification command"
|
||||||
|
replaces: "speckit.specify"
|
||||||
|
|
||||||
|
# Override an extension command (e.g. from the "myext" extension):
|
||||||
|
- type: "command"
|
||||||
|
name: "speckit.myext.myextcmd"
|
||||||
|
file: "commands/speckit.myext.myextcmd.md"
|
||||||
|
description: "Override myext's myextcmd command with custom workflow"
|
||||||
|
replaces: "speckit.myext.myextcmd"
|
||||||
|
|
||||||
|
# Script templates (reserved for future use)
|
||||||
|
# - type: "script"
|
||||||
|
# name: "create-new-feature"
|
||||||
|
# file: "scripts/bash/create-new-feature.sh"
|
||||||
|
# description: "Custom feature creation script"
|
||||||
|
# replaces: "create-new-feature"
|
||||||
|
|
||||||
|
# CUSTOMIZE: Add relevant tags (2-5 recommended)
|
||||||
|
# Used for discovery in catalog
|
||||||
|
tags:
|
||||||
|
- "example"
|
||||||
|
- "preset"
|
||||||
24
presets/scaffold/templates/myext-template.md
Normal file
24
presets/scaffold/templates/myext-template.md
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# MyExt Report
|
||||||
|
|
||||||
|
> This template overrides the one provided by the "myext" extension.
|
||||||
|
> Customize it to match your needs.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Brief summary of the report.
|
||||||
|
|
||||||
|
## Details
|
||||||
|
|
||||||
|
- Detail 1
|
||||||
|
- Detail 2
|
||||||
|
|
||||||
|
## Actions
|
||||||
|
|
||||||
|
- [ ] Action 1
|
||||||
|
- [ ] Action 2
|
||||||
|
|
||||||
|
<!--
|
||||||
|
CUSTOMIZE: This template takes priority over the myext extension's
|
||||||
|
version of myext-template. The extension's original is still available
|
||||||
|
if you remove this preset.
|
||||||
|
-->
|
||||||
18
presets/scaffold/templates/spec-template.md
Normal file
18
presets/scaffold/templates/spec-template.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Feature Specification: [FEATURE NAME]
|
||||||
|
|
||||||
|
**Created**: [DATE]
|
||||||
|
**Status**: Draft
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
[Brief description of the feature]
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- [ ] Requirement 1
|
||||||
|
- [ ] Requirement 2
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Criterion 1
|
||||||
|
- [ ] Criterion 2
|
||||||
15
presets/self-test/commands/speckit.specify.md
Normal file
15
presets/self-test/commands/speckit.specify.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
description: "Self-test override of the specify command"
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
You are following the self-test preset's version of the specify command.
|
||||||
|
|
||||||
|
When creating a specification, follow this process:
|
||||||
|
|
||||||
|
1. Read the user's requirements from $ARGUMENTS
|
||||||
|
2. Create a specification document using the spec-template
|
||||||
|
3. Include all standard sections plus the self-test marker
|
||||||
|
|
||||||
|
> This command is provided by the self-test preset.
|
||||||
61
presets/self-test/preset.yml
Normal file
61
presets/self-test/preset.yml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
schema_version: "1.0"
|
||||||
|
|
||||||
|
preset:
|
||||||
|
id: "self-test"
|
||||||
|
name: "Self-Test Preset"
|
||||||
|
version: "1.0.0"
|
||||||
|
description: "A preset that overrides all core templates for testing purposes"
|
||||||
|
author: "github"
|
||||||
|
repository: "https://github.com/github/spec-kit"
|
||||||
|
license: "MIT"
|
||||||
|
|
||||||
|
requires:
|
||||||
|
speckit_version: ">=0.1.0"
|
||||||
|
|
||||||
|
provides:
|
||||||
|
templates:
|
||||||
|
- type: "template"
|
||||||
|
name: "spec-template"
|
||||||
|
file: "templates/spec-template.md"
|
||||||
|
description: "Self-test spec template"
|
||||||
|
replaces: "spec-template"
|
||||||
|
|
||||||
|
- type: "template"
|
||||||
|
name: "plan-template"
|
||||||
|
file: "templates/plan-template.md"
|
||||||
|
description: "Self-test plan template"
|
||||||
|
replaces: "plan-template"
|
||||||
|
|
||||||
|
- type: "template"
|
||||||
|
name: "tasks-template"
|
||||||
|
file: "templates/tasks-template.md"
|
||||||
|
description: "Self-test tasks template"
|
||||||
|
replaces: "tasks-template"
|
||||||
|
|
||||||
|
- type: "template"
|
||||||
|
name: "checklist-template"
|
||||||
|
file: "templates/checklist-template.md"
|
||||||
|
description: "Self-test checklist template"
|
||||||
|
replaces: "checklist-template"
|
||||||
|
|
||||||
|
- type: "template"
|
||||||
|
name: "constitution-template"
|
||||||
|
file: "templates/constitution-template.md"
|
||||||
|
description: "Self-test constitution template"
|
||||||
|
replaces: "constitution-template"
|
||||||
|
|
||||||
|
- type: "template"
|
||||||
|
name: "agent-file-template"
|
||||||
|
file: "templates/agent-file-template.md"
|
||||||
|
description: "Self-test agent file template"
|
||||||
|
replaces: "agent-file-template"
|
||||||
|
|
||||||
|
- type: "command"
|
||||||
|
name: "speckit.specify"
|
||||||
|
file: "commands/speckit.specify.md"
|
||||||
|
description: "Self-test override of the specify command"
|
||||||
|
replaces: "speckit.specify"
|
||||||
|
|
||||||
|
tags:
|
||||||
|
- "testing"
|
||||||
|
- "self-test"
|
||||||
9
presets/self-test/templates/agent-file-template.md
Normal file
9
presets/self-test/templates/agent-file-template.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Agent File (Self-Test Preset)
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
> This template is provided by the self-test preset.
|
||||||
|
|
||||||
|
## Agent Instructions
|
||||||
|
|
||||||
|
Follow these guidelines when working on this project.
|
||||||
15
presets/self-test/templates/checklist-template.md
Normal file
15
presets/self-test/templates/checklist-template.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Checklist (Self-Test Preset)
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
> This template is provided by the self-test preset.
|
||||||
|
|
||||||
|
## Pre-Implementation
|
||||||
|
|
||||||
|
- [ ] Spec reviewed
|
||||||
|
- [ ] Plan approved
|
||||||
|
|
||||||
|
## Post-Implementation
|
||||||
|
|
||||||
|
- [ ] Tests passing
|
||||||
|
- [ ] Documentation updated
|
||||||
15
presets/self-test/templates/constitution-template.md
Normal file
15
presets/self-test/templates/constitution-template.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Constitution (Self-Test Preset)
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
> This template is provided by the self-test preset.
|
||||||
|
|
||||||
|
## Principles
|
||||||
|
|
||||||
|
1. Principle 1
|
||||||
|
2. Principle 2
|
||||||
|
|
||||||
|
## Guidelines
|
||||||
|
|
||||||
|
- Guideline 1
|
||||||
|
- Guideline 2
|
||||||
22
presets/self-test/templates/plan-template.md
Normal file
22
presets/self-test/templates/plan-template.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Implementation Plan (Self-Test Preset)
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
> This template is provided by the self-test preset.
|
||||||
|
|
||||||
|
## Approach
|
||||||
|
|
||||||
|
Describe the implementation approach.
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
1. Step 1
|
||||||
|
2. Step 2
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- Dependency 1
|
||||||
|
|
||||||
|
## Risks
|
||||||
|
|
||||||
|
- Risk 1
|
||||||
23
presets/self-test/templates/spec-template.md
Normal file
23
presets/self-test/templates/spec-template.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Feature Specification (Self-Test Preset)
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
> This template is provided by the self-test preset.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Brief description of the feature.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Requirement 1
|
||||||
|
- Requirement 2
|
||||||
|
|
||||||
|
## Design
|
||||||
|
|
||||||
|
Describe the design approach.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Criterion 1
|
||||||
|
- [ ] Criterion 2
|
||||||
17
presets/self-test/templates/tasks-template.md
Normal file
17
presets/self-test/templates/tasks-template.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Tasks (Self-Test Preset)
|
||||||
|
|
||||||
|
<!-- preset:self-test -->
|
||||||
|
|
||||||
|
> This template is provided by the self-test preset.
|
||||||
|
|
||||||
|
## Task List
|
||||||
|
|
||||||
|
- [ ] Task 1
|
||||||
|
- [ ] Task 2
|
||||||
|
|
||||||
|
## Estimation
|
||||||
|
|
||||||
|
| Task | Estimate |
|
||||||
|
|------|----------|
|
||||||
|
| Task 1 | TBD |
|
||||||
|
| Task 2 | TBD |
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "specify-cli"
|
name = "specify-cli"
|
||||||
version = "0.2.1"
|
version = "0.3.0"
|
||||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||||
requires-python = ">=3.11"
|
requires-python = ">=3.11"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
|||||||
@@ -79,15 +79,28 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get feature paths and validate branch
|
# Get feature paths and validate branch
|
||||||
eval $(get_feature_paths)
|
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
||||||
|
eval "$_paths_output"
|
||||||
|
unset _paths_output
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||||
|
|
||||||
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
||||||
if $PATHS_ONLY; then
|
if $PATHS_ONLY; then
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
# Minimal JSON paths payload (no validation performed)
|
# Minimal JSON paths payload (no validation performed)
|
||||||
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
if has_jq; then
|
||||||
"$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS"
|
jq -cn \
|
||||||
|
--arg repo_root "$REPO_ROOT" \
|
||||||
|
--arg branch "$CURRENT_BRANCH" \
|
||||||
|
--arg feature_dir "$FEATURE_DIR" \
|
||||||
|
--arg feature_spec "$FEATURE_SPEC" \
|
||||||
|
--arg impl_plan "$IMPL_PLAN" \
|
||||||
|
--arg tasks "$TASKS" \
|
||||||
|
'{REPO_ROOT:$repo_root,BRANCH:$branch,FEATURE_DIR:$feature_dir,FEATURE_SPEC:$feature_spec,IMPL_PLAN:$impl_plan,TASKS:$tasks}'
|
||||||
|
else
|
||||||
|
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
||||||
|
"$(json_escape "$REPO_ROOT")" "$(json_escape "$CURRENT_BRANCH")" "$(json_escape "$FEATURE_DIR")" "$(json_escape "$FEATURE_SPEC")" "$(json_escape "$IMPL_PLAN")" "$(json_escape "$TASKS")"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "REPO_ROOT: $REPO_ROOT"
|
echo "REPO_ROOT: $REPO_ROOT"
|
||||||
echo "BRANCH: $CURRENT_BRANCH"
|
echo "BRANCH: $CURRENT_BRANCH"
|
||||||
@@ -141,14 +154,25 @@ fi
|
|||||||
# Output results
|
# Output results
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
# Build JSON array of documents
|
# Build JSON array of documents
|
||||||
if [[ ${#docs[@]} -eq 0 ]]; then
|
if has_jq; then
|
||||||
json_docs="[]"
|
if [[ ${#docs[@]} -eq 0 ]]; then
|
||||||
|
json_docs="[]"
|
||||||
|
else
|
||||||
|
json_docs=$(printf '%s\n' "${docs[@]}" | jq -R . | jq -s .)
|
||||||
|
fi
|
||||||
|
jq -cn \
|
||||||
|
--arg feature_dir "$FEATURE_DIR" \
|
||||||
|
--argjson docs "$json_docs" \
|
||||||
|
'{FEATURE_DIR:$feature_dir,AVAILABLE_DOCS:$docs}'
|
||||||
else
|
else
|
||||||
json_docs=$(printf '"%s",' "${docs[@]}")
|
if [[ ${#docs[@]} -eq 0 ]]; then
|
||||||
json_docs="[${json_docs%,}]"
|
json_docs="[]"
|
||||||
|
else
|
||||||
|
json_docs=$(printf '"%s",' "${docs[@]}")
|
||||||
|
json_docs="[${json_docs%,}]"
|
||||||
|
fi
|
||||||
|
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$(json_escape "$FEATURE_DIR")" "$json_docs"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
|
||||||
else
|
else
|
||||||
# Text output
|
# Text output
|
||||||
echo "FEATURE_DIR:$FEATURE_DIR"
|
echo "FEATURE_DIR:$FEATURE_DIR"
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ find_feature_dir_by_prefix() {
|
|||||||
# Multiple matches - this shouldn't happen with proper naming convention
|
# Multiple matches - this shouldn't happen with proper naming convention
|
||||||
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
||||||
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
||||||
echo "$specs_dir/$branch_name" # Return something to avoid breaking the script
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,23 +134,120 @@ get_feature_paths() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Use prefix-based lookup to support multiple branches per spec
|
# Use prefix-based lookup to support multiple branches per spec
|
||||||
local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch")
|
local feature_dir
|
||||||
|
if ! feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch"); then
|
||||||
|
echo "ERROR: Failed to resolve feature directory" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
cat <<EOF
|
# Use printf '%q' to safely quote values, preventing shell injection
|
||||||
REPO_ROOT='$repo_root'
|
# via crafted branch names or paths containing special characters
|
||||||
CURRENT_BRANCH='$current_branch'
|
printf 'REPO_ROOT=%q\n' "$repo_root"
|
||||||
HAS_GIT='$has_git_repo'
|
printf 'CURRENT_BRANCH=%q\n' "$current_branch"
|
||||||
FEATURE_DIR='$feature_dir'
|
printf 'HAS_GIT=%q\n' "$has_git_repo"
|
||||||
FEATURE_SPEC='$feature_dir/spec.md'
|
printf 'FEATURE_DIR=%q\n' "$feature_dir"
|
||||||
IMPL_PLAN='$feature_dir/plan.md'
|
printf 'FEATURE_SPEC=%q\n' "$feature_dir/spec.md"
|
||||||
TASKS='$feature_dir/tasks.md'
|
printf 'IMPL_PLAN=%q\n' "$feature_dir/plan.md"
|
||||||
RESEARCH='$feature_dir/research.md'
|
printf 'TASKS=%q\n' "$feature_dir/tasks.md"
|
||||||
DATA_MODEL='$feature_dir/data-model.md'
|
printf 'RESEARCH=%q\n' "$feature_dir/research.md"
|
||||||
QUICKSTART='$feature_dir/quickstart.md'
|
printf 'DATA_MODEL=%q\n' "$feature_dir/data-model.md"
|
||||||
CONTRACTS_DIR='$feature_dir/contracts'
|
printf 'QUICKSTART=%q\n' "$feature_dir/quickstart.md"
|
||||||
EOF
|
printf 'CONTRACTS_DIR=%q\n' "$feature_dir/contracts"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if jq is available for safe JSON construction
|
||||||
|
has_jq() {
|
||||||
|
command -v jq >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
||||||
|
# Handles backslash, double-quote, and control characters (newline, tab, carriage return).
|
||||||
|
json_escape() {
|
||||||
|
local s="$1"
|
||||||
|
s="${s//\\/\\\\}"
|
||||||
|
s="${s//\"/\\\"}"
|
||||||
|
s="${s//$'\n'/\\n}"
|
||||||
|
s="${s//$'\t'/\\t}"
|
||||||
|
s="${s//$'\r'/\\r}"
|
||||||
|
printf '%s' "$s"
|
||||||
}
|
}
|
||||||
|
|
||||||
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||||
check_dir() { [[ -d "$1" && -n $(ls -A "$1" 2>/dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
check_dir() { [[ -d "$1" && -n $(ls -A "$1" 2>/dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||||
|
|
||||||
|
# Resolve a template name to a file path using the priority stack:
|
||||||
|
# 1. .specify/templates/overrides/
|
||||||
|
# 2. .specify/presets/<preset-id>/templates/ (sorted by priority from .registry)
|
||||||
|
# 3. .specify/extensions/<ext-id>/templates/
|
||||||
|
# 4. .specify/templates/ (core)
|
||||||
|
resolve_template() {
|
||||||
|
local template_name="$1"
|
||||||
|
local repo_root="$2"
|
||||||
|
local base="$repo_root/.specify/templates"
|
||||||
|
|
||||||
|
# Priority 1: Project overrides
|
||||||
|
local override="$base/overrides/${template_name}.md"
|
||||||
|
[ -f "$override" ] && echo "$override" && return 0
|
||||||
|
|
||||||
|
# Priority 2: Installed presets (sorted by priority from .registry)
|
||||||
|
local presets_dir="$repo_root/.specify/presets"
|
||||||
|
if [ -d "$presets_dir" ]; then
|
||||||
|
local registry_file="$presets_dir/.registry"
|
||||||
|
if [ -f "$registry_file" ] && command -v python3 >/dev/null 2>&1; then
|
||||||
|
# Read preset IDs sorted by priority (lower number = higher precedence)
|
||||||
|
local sorted_presets
|
||||||
|
sorted_presets=$(SPECKIT_REGISTRY="$registry_file" python3 -c "
|
||||||
|
import json, sys, os
|
||||||
|
try:
|
||||||
|
with open(os.environ['SPECKIT_REGISTRY']) as f:
|
||||||
|
data = json.load(f)
|
||||||
|
presets = data.get('presets', {})
|
||||||
|
for pid, meta in sorted(presets.items(), key=lambda x: x[1].get('priority', 10)):
|
||||||
|
print(pid)
|
||||||
|
except Exception:
|
||||||
|
sys.exit(1)
|
||||||
|
" 2>/dev/null)
|
||||||
|
if [ $? -eq 0 ] && [ -n "$sorted_presets" ]; then
|
||||||
|
while IFS= read -r preset_id; do
|
||||||
|
local candidate="$presets_dir/$preset_id/templates/${template_name}.md"
|
||||||
|
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||||
|
done <<< "$sorted_presets"
|
||||||
|
else
|
||||||
|
# python3 returned empty list — fall through to directory scan
|
||||||
|
for preset in "$presets_dir"/*/; do
|
||||||
|
[ -d "$preset" ] || continue
|
||||||
|
local candidate="$preset/templates/${template_name}.md"
|
||||||
|
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Fallback: alphabetical directory order (no python3 available)
|
||||||
|
for preset in "$presets_dir"/*/; do
|
||||||
|
[ -d "$preset" ] || continue
|
||||||
|
local candidate="$preset/templates/${template_name}.md"
|
||||||
|
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Priority 3: Extension-provided templates
|
||||||
|
local ext_dir="$repo_root/.specify/extensions"
|
||||||
|
if [ -d "$ext_dir" ]; then
|
||||||
|
for ext in "$ext_dir"/*/; do
|
||||||
|
[ -d "$ext" ] || continue
|
||||||
|
# Skip hidden directories (e.g. .backup, .cache)
|
||||||
|
case "$(basename "$ext")" in .*) continue;; esac
|
||||||
|
local candidate="$ext/templates/${template_name}.md"
|
||||||
|
[ -f "$candidate" ] && echo "$candidate" && return 0
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Priority 4: Core templates
|
||||||
|
local core="$base/${template_name}.md"
|
||||||
|
[ -f "$core" ] && echo "$core" && return 0
|
||||||
|
|
||||||
|
# Return success with empty output so callers using set -e don't abort;
|
||||||
|
# callers check [ -n "$TEMPLATE" ] to detect "not found".
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -162,10 +162,22 @@ clean_branch_name() {
|
|||||||
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
|
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
||||||
|
json_escape() {
|
||||||
|
local s="$1"
|
||||||
|
s="${s//\\/\\\\}"
|
||||||
|
s="${s//\"/\\\"}"
|
||||||
|
s="${s//$'\n'/\\n}"
|
||||||
|
s="${s//$'\t'/\\t}"
|
||||||
|
s="${s//$'\r'/\\r}"
|
||||||
|
printf '%s' "$s"
|
||||||
|
}
|
||||||
|
|
||||||
# Resolve repository root. Prefer git information when available, but fall back
|
# Resolve repository root. Prefer git information when available, but fall back
|
||||||
# to searching for repository markers so the workflow still functions in repositories that
|
# to searching for repository markers so the workflow still functions in repositories that
|
||||||
# were initialised with --no-git.
|
# were initialised with --no-git.
|
||||||
SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
if git rev-parse --show-toplevel >/dev/null 2>&1; then
|
if git rev-parse --show-toplevel >/dev/null 2>&1; then
|
||||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||||
@@ -296,18 +308,26 @@ fi
|
|||||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||||
mkdir -p "$FEATURE_DIR"
|
mkdir -p "$FEATURE_DIR"
|
||||||
|
|
||||||
TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md"
|
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT")
|
||||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||||
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
||||||
|
|
||||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
# Inform the user how to persist the feature variable in their own shell
|
||||||
export SPECIFY_FEATURE="$BRANCH_NAME"
|
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
|
||||||
|
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
if command -v jq >/dev/null 2>&1; then
|
||||||
|
jq -cn \
|
||||||
|
--arg branch_name "$BRANCH_NAME" \
|
||||||
|
--arg spec_file "$SPEC_FILE" \
|
||||||
|
--arg feature_num "$FEATURE_NUM" \
|
||||||
|
'{BRANCH_NAME:$branch_name,SPEC_FILE:$spec_file,FEATURE_NUM:$feature_num}'
|
||||||
|
else
|
||||||
|
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$(json_escape "$BRANCH_NAME")" "$(json_escape "$SPEC_FILE")" "$(json_escape "$FEATURE_NUM")"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||||
echo "SPEC_FILE: $SPEC_FILE"
|
echo "SPEC_FILE: $SPEC_FILE"
|
||||||
echo "FEATURE_NUM: $FEATURE_NUM"
|
echo "FEATURE_NUM: $FEATURE_NUM"
|
||||||
echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME"
|
printf '# To persist in your shell: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -28,7 +28,9 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
# Get all paths and variables from common functions
|
||||||
eval $(get_feature_paths)
|
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
||||||
|
eval "$_paths_output"
|
||||||
|
unset _paths_output
|
||||||
|
|
||||||
# Check if we're on a proper feature branch (only for git repos)
|
# Check if we're on a proper feature branch (only for git repos)
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||||
@@ -37,20 +39,30 @@ check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
|||||||
mkdir -p "$FEATURE_DIR"
|
mkdir -p "$FEATURE_DIR"
|
||||||
|
|
||||||
# Copy plan template if it exists
|
# Copy plan template if it exists
|
||||||
TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md"
|
TEMPLATE=$(resolve_template "plan-template" "$REPO_ROOT")
|
||||||
if [[ -f "$TEMPLATE" ]]; then
|
if [[ -n "$TEMPLATE" ]] && [[ -f "$TEMPLATE" ]]; then
|
||||||
cp "$TEMPLATE" "$IMPL_PLAN"
|
cp "$TEMPLATE" "$IMPL_PLAN"
|
||||||
echo "Copied plan template to $IMPL_PLAN"
|
echo "Copied plan template to $IMPL_PLAN"
|
||||||
else
|
else
|
||||||
echo "Warning: Plan template not found at $TEMPLATE"
|
echo "Warning: Plan template not found"
|
||||||
# Create a basic plan file if template doesn't exist
|
# Create a basic plan file if template doesn't exist
|
||||||
touch "$IMPL_PLAN"
|
touch "$IMPL_PLAN"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Output results
|
# Output results
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
if has_jq; then
|
||||||
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT"
|
jq -cn \
|
||||||
|
--arg feature_spec "$FEATURE_SPEC" \
|
||||||
|
--arg impl_plan "$IMPL_PLAN" \
|
||||||
|
--arg specs_dir "$FEATURE_DIR" \
|
||||||
|
--arg branch "$CURRENT_BRANCH" \
|
||||||
|
--arg has_git "$HAS_GIT" \
|
||||||
|
'{FEATURE_SPEC:$feature_spec,IMPL_PLAN:$impl_plan,SPECS_DIR:$specs_dir,BRANCH:$branch,HAS_GIT:$has_git}'
|
||||||
|
else
|
||||||
|
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
||||||
|
"$(json_escape "$FEATURE_SPEC")" "$(json_escape "$IMPL_PLAN")" "$(json_escape "$FEATURE_DIR")" "$(json_escape "$CURRENT_BRANCH")" "$(json_escape "$HAS_GIT")"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
||||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
echo "IMPL_PLAN: $IMPL_PLAN"
|
||||||
|
|||||||
@@ -53,7 +53,9 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
# Get all paths and variables from common functions
|
||||||
eval $(get_feature_paths)
|
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
||||||
|
eval "$_paths_output"
|
||||||
|
unset _paths_output
|
||||||
|
|
||||||
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
||||||
AGENT_TYPE="${1:-}"
|
AGENT_TYPE="${1:-}"
|
||||||
@@ -71,12 +73,14 @@ AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
|
|||||||
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
||||||
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
||||||
QODER_FILE="$REPO_ROOT/QODER.md"
|
QODER_FILE="$REPO_ROOT/QODER.md"
|
||||||
AMP_FILE="$REPO_ROOT/AGENTS.md"
|
# AMP, Kiro CLI, and IBM Bob all share AGENTS.md — use AGENTS_FILE to avoid
|
||||||
|
# updating the same file multiple times.
|
||||||
|
AMP_FILE="$AGENTS_FILE"
|
||||||
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
||||||
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
||||||
KIRO_FILE="$REPO_ROOT/AGENTS.md"
|
KIRO_FILE="$AGENTS_FILE"
|
||||||
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
||||||
BOB_FILE="$REPO_ROOT/AGENTS.md"
|
BOB_FILE="$AGENTS_FILE"
|
||||||
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
||||||
KIMI_FILE="$REPO_ROOT/KIMI.md"
|
KIMI_FILE="$REPO_ROOT/KIMI.md"
|
||||||
|
|
||||||
@@ -112,6 +116,8 @@ log_warning() {
|
|||||||
# Cleanup function for temporary files
|
# Cleanup function for temporary files
|
||||||
cleanup() {
|
cleanup() {
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
# Disarm traps to prevent re-entrant loop
|
||||||
|
trap - EXIT INT TERM
|
||||||
rm -f /tmp/agent_update_*_$$
|
rm -f /tmp/agent_update_*_$$
|
||||||
rm -f /tmp/manual_additions_$$
|
rm -f /tmp/manual_additions_$$
|
||||||
exit $exit_code
|
exit $exit_code
|
||||||
@@ -476,7 +482,7 @@ update_existing_agent_file() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Update timestamp
|
# Update timestamp
|
||||||
if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
if [[ "$line" =~ (\*\*)?Last\ updated(\*\*)?:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
||||||
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
||||||
else
|
else
|
||||||
echo "$line" >> "$temp_file"
|
echo "$line" >> "$temp_file"
|
||||||
@@ -607,67 +613,67 @@ update_specific_agent() {
|
|||||||
|
|
||||||
case "$agent_type" in
|
case "$agent_type" in
|
||||||
claude)
|
claude)
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
||||||
;;
|
;;
|
||||||
gemini)
|
gemini)
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
update_agent_file "$GEMINI_FILE" "Gemini CLI" || return 1
|
||||||
;;
|
;;
|
||||||
copilot)
|
copilot)
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
update_agent_file "$COPILOT_FILE" "GitHub Copilot" || return 1
|
||||||
;;
|
;;
|
||||||
cursor-agent)
|
cursor-agent)
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
update_agent_file "$CURSOR_FILE" "Cursor IDE" || return 1
|
||||||
;;
|
;;
|
||||||
qwen)
|
qwen)
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code"
|
update_agent_file "$QWEN_FILE" "Qwen Code" || return 1
|
||||||
;;
|
;;
|
||||||
opencode)
|
opencode)
|
||||||
update_agent_file "$AGENTS_FILE" "opencode"
|
update_agent_file "$AGENTS_FILE" "opencode" || return 1
|
||||||
;;
|
;;
|
||||||
codex)
|
codex)
|
||||||
update_agent_file "$AGENTS_FILE" "Codex CLI"
|
update_agent_file "$AGENTS_FILE" "Codex CLI" || return 1
|
||||||
;;
|
;;
|
||||||
windsurf)
|
windsurf)
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
update_agent_file "$WINDSURF_FILE" "Windsurf" || return 1
|
||||||
;;
|
;;
|
||||||
kilocode)
|
kilocode)
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
update_agent_file "$KILOCODE_FILE" "Kilo Code" || return 1
|
||||||
;;
|
;;
|
||||||
auggie)
|
auggie)
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
update_agent_file "$AUGGIE_FILE" "Auggie CLI" || return 1
|
||||||
;;
|
;;
|
||||||
roo)
|
roo)
|
||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
update_agent_file "$ROO_FILE" "Roo Code" || return 1
|
||||||
;;
|
;;
|
||||||
codebuddy)
|
codebuddy)
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI" || return 1
|
||||||
;;
|
;;
|
||||||
qodercli)
|
qodercli)
|
||||||
update_agent_file "$QODER_FILE" "Qoder CLI"
|
update_agent_file "$QODER_FILE" "Qoder CLI" || return 1
|
||||||
;;
|
;;
|
||||||
amp)
|
amp)
|
||||||
update_agent_file "$AMP_FILE" "Amp"
|
update_agent_file "$AMP_FILE" "Amp" || return 1
|
||||||
;;
|
;;
|
||||||
shai)
|
shai)
|
||||||
update_agent_file "$SHAI_FILE" "SHAI"
|
update_agent_file "$SHAI_FILE" "SHAI" || return 1
|
||||||
;;
|
;;
|
||||||
tabnine)
|
tabnine)
|
||||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
update_agent_file "$TABNINE_FILE" "Tabnine CLI" || return 1
|
||||||
;;
|
;;
|
||||||
kiro-cli)
|
kiro-cli)
|
||||||
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
update_agent_file "$KIRO_FILE" "Kiro CLI" || return 1
|
||||||
;;
|
;;
|
||||||
agy)
|
agy)
|
||||||
update_agent_file "$AGY_FILE" "Antigravity"
|
update_agent_file "$AGY_FILE" "Antigravity" || return 1
|
||||||
;;
|
;;
|
||||||
bob)
|
bob)
|
||||||
update_agent_file "$BOB_FILE" "IBM Bob"
|
update_agent_file "$BOB_FILE" "IBM Bob" || return 1
|
||||||
;;
|
;;
|
||||||
vibe)
|
vibe)
|
||||||
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
update_agent_file "$VIBE_FILE" "Mistral Vibe" || return 1
|
||||||
;;
|
;;
|
||||||
kimi)
|
kimi)
|
||||||
update_agent_file "$KIMI_FILE" "Kimi Code"
|
update_agent_file "$KIMI_FILE" "Kimi Code" || return 1
|
||||||
;;
|
;;
|
||||||
generic)
|
generic)
|
||||||
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
||||||
@@ -682,106 +688,53 @@ update_specific_agent() {
|
|||||||
|
|
||||||
update_all_existing_agents() {
|
update_all_existing_agents() {
|
||||||
local found_agent=false
|
local found_agent=false
|
||||||
|
local _updated_paths=()
|
||||||
# Check each possible agent file and update if it exists
|
|
||||||
if [[ -f "$CLAUDE_FILE" ]]; then
|
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$GEMINI_FILE" ]]; then
|
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$COPILOT_FILE" ]]; then
|
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$CURSOR_FILE" ]]; then
|
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$QWEN_FILE" ]]; then
|
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AGENTS_FILE" ]]; then
|
|
||||||
update_agent_file "$AGENTS_FILE" "Codex/opencode"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$WINDSURF_FILE" ]]; then
|
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KILOCODE_FILE" ]]; then
|
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AUGGIE_FILE" ]]; then
|
# Helper: skip non-existent files and files already updated (dedup by
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
|
||||||
|
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
|
||||||
|
# Uses a linear array instead of associative array for bash 3.2 compatibility.
|
||||||
|
update_if_new() {
|
||||||
|
local file="$1" name="$2"
|
||||||
|
[[ -f "$file" ]] || return 0
|
||||||
|
local real_path
|
||||||
|
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
|
||||||
|
local p
|
||||||
|
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
|
||||||
|
for p in "${_updated_paths[@]}"; do
|
||||||
|
[[ "$p" == "$real_path" ]] && return 0
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
update_agent_file "$file" "$name" || return 1
|
||||||
|
_updated_paths+=("$real_path")
|
||||||
found_agent=true
|
found_agent=true
|
||||||
fi
|
}
|
||||||
|
|
||||||
if [[ -f "$ROO_FILE" ]]; then
|
|
||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$CODEBUDDY_FILE" ]]; then
|
update_if_new "$CLAUDE_FILE" "Claude Code"
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
update_if_new "$GEMINI_FILE" "Gemini CLI"
|
||||||
found_agent=true
|
update_if_new "$COPILOT_FILE" "GitHub Copilot"
|
||||||
fi
|
update_if_new "$CURSOR_FILE" "Cursor IDE"
|
||||||
|
update_if_new "$QWEN_FILE" "Qwen Code"
|
||||||
|
update_if_new "$AGENTS_FILE" "Codex/opencode"
|
||||||
|
update_if_new "$AMP_FILE" "Amp"
|
||||||
|
update_if_new "$KIRO_FILE" "Kiro CLI"
|
||||||
|
update_if_new "$BOB_FILE" "IBM Bob"
|
||||||
|
update_if_new "$WINDSURF_FILE" "Windsurf"
|
||||||
|
update_if_new "$KILOCODE_FILE" "Kilo Code"
|
||||||
|
update_if_new "$AUGGIE_FILE" "Auggie CLI"
|
||||||
|
update_if_new "$ROO_FILE" "Roo Code"
|
||||||
|
update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||||
|
update_if_new "$SHAI_FILE" "SHAI"
|
||||||
|
update_if_new "$TABNINE_FILE" "Tabnine CLI"
|
||||||
|
update_if_new "$QODER_FILE" "Qoder CLI"
|
||||||
|
update_if_new "$AGY_FILE" "Antigravity"
|
||||||
|
update_if_new "$VIBE_FILE" "Mistral Vibe"
|
||||||
|
update_if_new "$KIMI_FILE" "Kimi Code"
|
||||||
|
|
||||||
if [[ -f "$SHAI_FILE" ]]; then
|
|
||||||
update_agent_file "$SHAI_FILE" "SHAI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$TABNINE_FILE" ]]; then
|
|
||||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$QODER_FILE" ]]; then
|
|
||||||
update_agent_file "$QODER_FILE" "Qoder CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KIRO_FILE" ]]; then
|
|
||||||
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AGY_FILE" ]]; then
|
|
||||||
update_agent_file "$AGY_FILE" "Antigravity"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
if [[ -f "$BOB_FILE" ]]; then
|
|
||||||
update_agent_file "$BOB_FILE" "IBM Bob"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$VIBE_FILE" ]]; then
|
|
||||||
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KIMI_FILE" ]]; then
|
|
||||||
update_agent_file "$KIMI_FILE" "Kimi Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If no agent files exist, create a default Claude file
|
# If no agent files exist, create a default Claude file
|
||||||
if [[ "$found_agent" == false ]]; then
|
if [[ "$found_agent" == false ]]; then
|
||||||
log_info "No existing agent files found, creating default Claude file..."
|
log_info "No existing agent files found, creating default Claude file..."
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
print_summary() {
|
print_summary() {
|
||||||
|
|||||||
@@ -135,3 +135,70 @@ function Test-DirHasFiles {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Resolve a template name to a file path using the priority stack:
|
||||||
|
# 1. .specify/templates/overrides/
|
||||||
|
# 2. .specify/presets/<preset-id>/templates/ (sorted by priority from .registry)
|
||||||
|
# 3. .specify/extensions/<ext-id>/templates/
|
||||||
|
# 4. .specify/templates/ (core)
|
||||||
|
function Resolve-Template {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)][string]$TemplateName,
|
||||||
|
[Parameter(Mandatory=$true)][string]$RepoRoot
|
||||||
|
)
|
||||||
|
|
||||||
|
$base = Join-Path $RepoRoot '.specify/templates'
|
||||||
|
|
||||||
|
# Priority 1: Project overrides
|
||||||
|
$override = Join-Path $base "overrides/$TemplateName.md"
|
||||||
|
if (Test-Path $override) { return $override }
|
||||||
|
|
||||||
|
# Priority 2: Installed presets (sorted by priority from .registry)
|
||||||
|
$presetsDir = Join-Path $RepoRoot '.specify/presets'
|
||||||
|
if (Test-Path $presetsDir) {
|
||||||
|
$registryFile = Join-Path $presetsDir '.registry'
|
||||||
|
$sortedPresets = @()
|
||||||
|
if (Test-Path $registryFile) {
|
||||||
|
try {
|
||||||
|
$registryData = Get-Content $registryFile -Raw | ConvertFrom-Json
|
||||||
|
$presets = $registryData.presets
|
||||||
|
if ($presets) {
|
||||||
|
$sortedPresets = $presets.PSObject.Properties |
|
||||||
|
Sort-Object { if ($null -ne $_.Value.priority) { $_.Value.priority } else { 10 } } |
|
||||||
|
ForEach-Object { $_.Name }
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
# Fallback: alphabetical directory order
|
||||||
|
$sortedPresets = @()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($sortedPresets.Count -gt 0) {
|
||||||
|
foreach ($presetId in $sortedPresets) {
|
||||||
|
$candidate = Join-Path $presetsDir "$presetId/templates/$TemplateName.md"
|
||||||
|
if (Test-Path $candidate) { return $candidate }
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
# Fallback: alphabetical directory order
|
||||||
|
foreach ($preset in Get-ChildItem -Path $presetsDir -Directory -ErrorAction SilentlyContinue | Where-Object { $_.Name -notlike '.*' }) {
|
||||||
|
$candidate = Join-Path $preset.FullName "templates/$TemplateName.md"
|
||||||
|
if (Test-Path $candidate) { return $candidate }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Priority 3: Extension-provided templates
|
||||||
|
$extDir = Join-Path $RepoRoot '.specify/extensions'
|
||||||
|
if (Test-Path $extDir) {
|
||||||
|
foreach ($ext in Get-ChildItem -Path $extDir -Directory -ErrorAction SilentlyContinue | Where-Object { $_.Name -notlike '.*' } | Sort-Object Name) {
|
||||||
|
$candidate = Join-Path $ext.FullName "templates/$TemplateName.md"
|
||||||
|
if (Test-Path $candidate) { return $candidate }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Priority 4: Core templates
|
||||||
|
$core = Join-Path $base "$TemplateName.md"
|
||||||
|
if (Test-Path $core) { return $core }
|
||||||
|
|
||||||
|
return $null
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -141,6 +141,9 @@ if (-not $fallbackRoot) {
|
|||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Load common functions (includes Resolve-Template)
|
||||||
|
. "$PSScriptRoot/common.ps1"
|
||||||
|
|
||||||
try {
|
try {
|
||||||
$repoRoot = git rev-parse --show-toplevel 2>$null
|
$repoRoot = git rev-parse --show-toplevel 2>$null
|
||||||
if ($LASTEXITCODE -eq 0) {
|
if ($LASTEXITCODE -eq 0) {
|
||||||
@@ -276,9 +279,9 @@ if ($hasGit) {
|
|||||||
$featureDir = Join-Path $specsDir $branchName
|
$featureDir = Join-Path $specsDir $branchName
|
||||||
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||||
|
|
||||||
$template = Join-Path $repoRoot '.specify/templates/spec-template.md'
|
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||||
$specFile = Join-Path $featureDir 'spec.md'
|
$specFile = Join-Path $featureDir 'spec.md'
|
||||||
if (Test-Path $template) {
|
if ($template -and (Test-Path $template)) {
|
||||||
Copy-Item $template $specFile -Force
|
Copy-Item $template $specFile -Force
|
||||||
} else {
|
} else {
|
||||||
New-Item -ItemType File -Path $specFile | Out-Null
|
New-Item -ItemType File -Path $specFile | Out-Null
|
||||||
|
|||||||
@@ -32,12 +32,12 @@ if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit $paths.HAS_GI
|
|||||||
New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null
|
New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null
|
||||||
|
|
||||||
# Copy plan template if it exists, otherwise note it or create empty file
|
# Copy plan template if it exists, otherwise note it or create empty file
|
||||||
$template = Join-Path $paths.REPO_ROOT '.specify/templates/plan-template.md'
|
$template = Resolve-Template -TemplateName 'plan-template' -RepoRoot $paths.REPO_ROOT
|
||||||
if (Test-Path $template) {
|
if ($template -and (Test-Path $template)) {
|
||||||
Copy-Item $template $paths.IMPL_PLAN -Force
|
Copy-Item $template $paths.IMPL_PLAN -Force
|
||||||
Write-Output "Copied plan template to $($paths.IMPL_PLAN)"
|
Write-Output "Copied plan template to $($paths.IMPL_PLAN)"
|
||||||
} else {
|
} else {
|
||||||
Write-Warning "Plan template not found at $template"
|
Write-Warning "Plan template not found"
|
||||||
# Create a basic plan file if template doesn't exist
|
# Create a basic plan file if template doesn't exist
|
||||||
New-Item -ItemType File -Path $paths.IMPL_PLAN -Force | Out-Null
|
New-Item -ItemType File -Path $paths.IMPL_PLAN -Force | Out-Null
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -331,7 +331,7 @@ function Update-ExistingAgentFile {
|
|||||||
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') {
|
if ($line -match '(\*\*)?Last updated(\*\*)?: .*\d{4}-\d{2}-\d{2}') {
|
||||||
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
422
src/specify_cli/agents.py
Normal file
422
src/specify_cli/agents.py
Normal file
@@ -0,0 +1,422 @@
|
|||||||
|
"""
|
||||||
|
Agent Command Registrar for Spec Kit
|
||||||
|
|
||||||
|
Shared infrastructure for registering commands with AI agents.
|
||||||
|
Used by both the extension system and the preset system to write
|
||||||
|
command files into agent-specific directories in the correct format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
class CommandRegistrar:
|
||||||
|
"""Handles registration of commands with AI agents.
|
||||||
|
|
||||||
|
Supports writing command files in Markdown or TOML format to the
|
||||||
|
appropriate agent directory, with correct argument placeholders
|
||||||
|
and companion files (e.g. Copilot .prompt.md).
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Agent configurations with directory, format, and argument placeholder
|
||||||
|
AGENT_CONFIGS = {
|
||||||
|
"claude": {
|
||||||
|
"dir": ".claude/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"gemini": {
|
||||||
|
"dir": ".gemini/commands",
|
||||||
|
"format": "toml",
|
||||||
|
"args": "{{args}}",
|
||||||
|
"extension": ".toml"
|
||||||
|
},
|
||||||
|
"copilot": {
|
||||||
|
"dir": ".github/agents",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".agent.md"
|
||||||
|
},
|
||||||
|
"cursor": {
|
||||||
|
"dir": ".cursor/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"qwen": {
|
||||||
|
"dir": ".qwen/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"opencode": {
|
||||||
|
"dir": ".opencode/command",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"codex": {
|
||||||
|
"dir": ".codex/prompts",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"windsurf": {
|
||||||
|
"dir": ".windsurf/workflows",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"kilocode": {
|
||||||
|
"dir": ".kilocode/workflows",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"auggie": {
|
||||||
|
"dir": ".augment/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"roo": {
|
||||||
|
"dir": ".roo/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"codebuddy": {
|
||||||
|
"dir": ".codebuddy/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"qodercli": {
|
||||||
|
"dir": ".qoder/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"kiro-cli": {
|
||||||
|
"dir": ".kiro/prompts",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"amp": {
|
||||||
|
"dir": ".agents/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"shai": {
|
||||||
|
"dir": ".shai/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"tabnine": {
|
||||||
|
"dir": ".tabnine/agent/commands",
|
||||||
|
"format": "toml",
|
||||||
|
"args": "{{args}}",
|
||||||
|
"extension": ".toml"
|
||||||
|
},
|
||||||
|
"bob": {
|
||||||
|
"dir": ".bob/commands",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": ".md"
|
||||||
|
},
|
||||||
|
"kimi": {
|
||||||
|
"dir": ".kimi/skills",
|
||||||
|
"format": "markdown",
|
||||||
|
"args": "$ARGUMENTS",
|
||||||
|
"extension": "/SKILL.md"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_frontmatter(content: str) -> tuple[dict, str]:
|
||||||
|
"""Parse YAML frontmatter from Markdown content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content: Markdown content with YAML frontmatter
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (frontmatter_dict, body_content)
|
||||||
|
"""
|
||||||
|
if not content.startswith("---"):
|
||||||
|
return {}, content
|
||||||
|
|
||||||
|
# Find second ---
|
||||||
|
end_marker = content.find("---", 3)
|
||||||
|
if end_marker == -1:
|
||||||
|
return {}, content
|
||||||
|
|
||||||
|
frontmatter_str = content[3:end_marker].strip()
|
||||||
|
body = content[end_marker + 3:].strip()
|
||||||
|
|
||||||
|
try:
|
||||||
|
frontmatter = yaml.safe_load(frontmatter_str) or {}
|
||||||
|
except yaml.YAMLError:
|
||||||
|
frontmatter = {}
|
||||||
|
|
||||||
|
return frontmatter, body
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def render_frontmatter(fm: dict) -> str:
|
||||||
|
"""Render frontmatter dictionary as YAML.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
fm: Frontmatter dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
YAML-formatted frontmatter with delimiters
|
||||||
|
"""
|
||||||
|
if not fm:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
yaml_str = yaml.dump(fm, default_flow_style=False, sort_keys=False)
|
||||||
|
return f"---\n{yaml_str}---\n"
|
||||||
|
|
||||||
|
def _adjust_script_paths(self, frontmatter: dict) -> dict:
|
||||||
|
"""Adjust script paths from extension-relative to repo-relative.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
frontmatter: Frontmatter dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Modified frontmatter with adjusted paths
|
||||||
|
"""
|
||||||
|
if "scripts" in frontmatter:
|
||||||
|
for key in frontmatter["scripts"]:
|
||||||
|
script_path = frontmatter["scripts"][key]
|
||||||
|
if script_path.startswith("../../scripts/"):
|
||||||
|
frontmatter["scripts"][key] = f".specify/scripts/{script_path[14:]}"
|
||||||
|
return frontmatter
|
||||||
|
|
||||||
|
def render_markdown_command(
|
||||||
|
self,
|
||||||
|
frontmatter: dict,
|
||||||
|
body: str,
|
||||||
|
source_id: str,
|
||||||
|
context_note: str = None
|
||||||
|
) -> str:
|
||||||
|
"""Render command in Markdown format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
frontmatter: Command frontmatter
|
||||||
|
body: Command body content
|
||||||
|
source_id: Source identifier (extension or preset ID)
|
||||||
|
context_note: Custom context comment (default: <!-- Source: {source_id} -->)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted Markdown command file content
|
||||||
|
"""
|
||||||
|
if context_note is None:
|
||||||
|
context_note = f"\n<!-- Source: {source_id} -->\n"
|
||||||
|
return self.render_frontmatter(frontmatter) + "\n" + context_note + body
|
||||||
|
|
||||||
|
def render_toml_command(
|
||||||
|
self,
|
||||||
|
frontmatter: dict,
|
||||||
|
body: str,
|
||||||
|
source_id: str
|
||||||
|
) -> str:
|
||||||
|
"""Render command in TOML format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
frontmatter: Command frontmatter
|
||||||
|
body: Command body content
|
||||||
|
source_id: Source identifier (extension or preset ID)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted TOML command file content
|
||||||
|
"""
|
||||||
|
toml_lines = []
|
||||||
|
|
||||||
|
if "description" in frontmatter:
|
||||||
|
desc = frontmatter["description"].replace('"', '\\"')
|
||||||
|
toml_lines.append(f'description = "{desc}"')
|
||||||
|
toml_lines.append("")
|
||||||
|
|
||||||
|
toml_lines.append(f"# Source: {source_id}")
|
||||||
|
toml_lines.append("")
|
||||||
|
|
||||||
|
toml_lines.append('prompt = """')
|
||||||
|
toml_lines.append(body)
|
||||||
|
toml_lines.append('"""')
|
||||||
|
|
||||||
|
return "\n".join(toml_lines)
|
||||||
|
|
||||||
|
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
||||||
|
"""Convert argument placeholder format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content: Command content
|
||||||
|
from_placeholder: Source placeholder (e.g., "$ARGUMENTS")
|
||||||
|
to_placeholder: Target placeholder (e.g., "{{args}}")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Content with converted placeholders
|
||||||
|
"""
|
||||||
|
return content.replace(from_placeholder, to_placeholder)
|
||||||
|
|
||||||
|
def register_commands(
|
||||||
|
self,
|
||||||
|
agent_name: str,
|
||||||
|
commands: List[Dict[str, Any]],
|
||||||
|
source_id: str,
|
||||||
|
source_dir: Path,
|
||||||
|
project_root: Path,
|
||||||
|
context_note: str = None
|
||||||
|
) -> List[str]:
|
||||||
|
"""Register commands for a specific agent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent_name: Agent name (claude, gemini, copilot, etc.)
|
||||||
|
commands: List of command info dicts with 'name', 'file', and optional 'aliases'
|
||||||
|
source_id: Identifier of the source (extension or preset ID)
|
||||||
|
source_dir: Directory containing command source files
|
||||||
|
project_root: Path to project root
|
||||||
|
context_note: Custom context comment for markdown output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of registered command names
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If agent is not supported
|
||||||
|
"""
|
||||||
|
if agent_name not in self.AGENT_CONFIGS:
|
||||||
|
raise ValueError(f"Unsupported agent: {agent_name}")
|
||||||
|
|
||||||
|
agent_config = self.AGENT_CONFIGS[agent_name]
|
||||||
|
commands_dir = project_root / agent_config["dir"]
|
||||||
|
commands_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
registered = []
|
||||||
|
|
||||||
|
for cmd_info in commands:
|
||||||
|
cmd_name = cmd_info["name"]
|
||||||
|
cmd_file = cmd_info["file"]
|
||||||
|
|
||||||
|
source_file = source_dir / cmd_file
|
||||||
|
if not source_file.exists():
|
||||||
|
continue
|
||||||
|
|
||||||
|
content = source_file.read_text(encoding="utf-8")
|
||||||
|
frontmatter, body = self.parse_frontmatter(content)
|
||||||
|
|
||||||
|
frontmatter = self._adjust_script_paths(frontmatter)
|
||||||
|
|
||||||
|
body = self._convert_argument_placeholder(
|
||||||
|
body, "$ARGUMENTS", agent_config["args"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if agent_config["format"] == "markdown":
|
||||||
|
output = self.render_markdown_command(frontmatter, body, source_id, context_note)
|
||||||
|
elif agent_config["format"] == "toml":
|
||||||
|
output = self.render_toml_command(frontmatter, body, source_id)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported format: {agent_config['format']}")
|
||||||
|
|
||||||
|
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||||
|
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
dest_file.write_text(output, encoding="utf-8")
|
||||||
|
|
||||||
|
if agent_name == "copilot":
|
||||||
|
self.write_copilot_prompt(project_root, cmd_name)
|
||||||
|
|
||||||
|
registered.append(cmd_name)
|
||||||
|
|
||||||
|
for alias in cmd_info.get("aliases", []):
|
||||||
|
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
|
||||||
|
alias_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
alias_file.write_text(output, encoding="utf-8")
|
||||||
|
if agent_name == "copilot":
|
||||||
|
self.write_copilot_prompt(project_root, alias)
|
||||||
|
registered.append(alias)
|
||||||
|
|
||||||
|
return registered
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def write_copilot_prompt(project_root: Path, cmd_name: str) -> None:
|
||||||
|
"""Generate a companion .prompt.md file for a Copilot agent command.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_root: Path to project root
|
||||||
|
cmd_name: Command name (e.g. 'speckit.my-ext.example')
|
||||||
|
"""
|
||||||
|
prompts_dir = project_root / ".github" / "prompts"
|
||||||
|
prompts_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
prompt_file = prompts_dir / f"{cmd_name}.prompt.md"
|
||||||
|
prompt_file.write_text(f"---\nagent: {cmd_name}\n---\n", encoding="utf-8")
|
||||||
|
|
||||||
|
def register_commands_for_all_agents(
|
||||||
|
self,
|
||||||
|
commands: List[Dict[str, Any]],
|
||||||
|
source_id: str,
|
||||||
|
source_dir: Path,
|
||||||
|
project_root: Path,
|
||||||
|
context_note: str = None
|
||||||
|
) -> Dict[str, List[str]]:
|
||||||
|
"""Register commands for all detected agents in the project.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
commands: List of command info dicts
|
||||||
|
source_id: Identifier of the source (extension or preset ID)
|
||||||
|
source_dir: Directory containing command source files
|
||||||
|
project_root: Path to project root
|
||||||
|
context_note: Custom context comment for markdown output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping agent names to list of registered commands
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
|
||||||
|
for agent_name, agent_config in self.AGENT_CONFIGS.items():
|
||||||
|
agent_dir = project_root / agent_config["dir"].split("/")[0]
|
||||||
|
|
||||||
|
if agent_dir.exists():
|
||||||
|
try:
|
||||||
|
registered = self.register_commands(
|
||||||
|
agent_name, commands, source_id, source_dir, project_root,
|
||||||
|
context_note=context_note
|
||||||
|
)
|
||||||
|
if registered:
|
||||||
|
results[agent_name] = registered
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def unregister_commands(
|
||||||
|
self,
|
||||||
|
registered_commands: Dict[str, List[str]],
|
||||||
|
project_root: Path
|
||||||
|
) -> None:
|
||||||
|
"""Remove previously registered command files from agent directories.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registered_commands: Dict mapping agent names to command name lists
|
||||||
|
project_root: Path to project root
|
||||||
|
"""
|
||||||
|
for agent_name, cmd_names in registered_commands.items():
|
||||||
|
if agent_name not in self.AGENT_CONFIGS:
|
||||||
|
continue
|
||||||
|
|
||||||
|
agent_config = self.AGENT_CONFIGS[agent_name]
|
||||||
|
commands_dir = project_root / agent_config["dir"]
|
||||||
|
|
||||||
|
for cmd_name in cmd_names:
|
||||||
|
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
||||||
|
if cmd_file.exists():
|
||||||
|
cmd_file.unlink()
|
||||||
|
|
||||||
|
if agent_name == "copilot":
|
||||||
|
prompt_file = project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
|
||||||
|
if prompt_file.exists():
|
||||||
|
prompt_file.unlink()
|
||||||
@@ -12,6 +12,7 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
import zipfile
|
import zipfile
|
||||||
import shutil
|
import shutil
|
||||||
|
import copy
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional, Dict, List, Any, Callable, Set
|
from typing import Optional, Dict, List, Any, Callable, Set
|
||||||
@@ -228,6 +229,54 @@ class ExtensionRegistry:
|
|||||||
}
|
}
|
||||||
self._save()
|
self._save()
|
||||||
|
|
||||||
|
def update(self, extension_id: str, metadata: dict):
|
||||||
|
"""Update extension metadata in registry, merging with existing entry.
|
||||||
|
|
||||||
|
Merges the provided metadata with the existing entry, preserving any
|
||||||
|
fields not specified in the new metadata. The installed_at timestamp
|
||||||
|
is always preserved from the original entry.
|
||||||
|
|
||||||
|
Use this method instead of add() when updating existing extension
|
||||||
|
metadata (e.g., enabling/disabling) to preserve the original
|
||||||
|
installation timestamp and other existing fields.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extension_id: Extension ID
|
||||||
|
metadata: Extension metadata fields to update (merged with existing)
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
KeyError: If extension is not installed
|
||||||
|
"""
|
||||||
|
if extension_id not in self.data["extensions"]:
|
||||||
|
raise KeyError(f"Extension '{extension_id}' is not installed")
|
||||||
|
# Merge new metadata with existing, preserving original installed_at
|
||||||
|
existing = self.data["extensions"][extension_id]
|
||||||
|
# Merge: existing fields preserved, new fields override
|
||||||
|
merged = {**existing, **metadata}
|
||||||
|
# Always preserve original installed_at based on key existence, not truthiness,
|
||||||
|
# to handle cases where the field exists but may be falsy (legacy/corruption)
|
||||||
|
if "installed_at" in existing:
|
||||||
|
merged["installed_at"] = existing["installed_at"]
|
||||||
|
else:
|
||||||
|
# If not present in existing, explicitly remove from merged if caller provided it
|
||||||
|
merged.pop("installed_at", None)
|
||||||
|
self.data["extensions"][extension_id] = merged
|
||||||
|
self._save()
|
||||||
|
|
||||||
|
def restore(self, extension_id: str, metadata: dict):
|
||||||
|
"""Restore extension metadata to registry without modifying timestamps.
|
||||||
|
|
||||||
|
Use this method for rollback scenarios where you have a complete backup
|
||||||
|
of the registry entry (including installed_at) and want to restore it
|
||||||
|
exactly as it was.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extension_id: Extension ID
|
||||||
|
metadata: Complete extension metadata including installed_at
|
||||||
|
"""
|
||||||
|
self.data["extensions"][extension_id] = dict(metadata)
|
||||||
|
self._save()
|
||||||
|
|
||||||
def remove(self, extension_id: str):
|
def remove(self, extension_id: str):
|
||||||
"""Remove extension from registry.
|
"""Remove extension from registry.
|
||||||
|
|
||||||
@@ -241,21 +290,28 @@ class ExtensionRegistry:
|
|||||||
def get(self, extension_id: str) -> Optional[dict]:
|
def get(self, extension_id: str) -> Optional[dict]:
|
||||||
"""Get extension metadata from registry.
|
"""Get extension metadata from registry.
|
||||||
|
|
||||||
|
Returns a deep copy to prevent callers from accidentally mutating
|
||||||
|
nested internal registry state without going through the write path.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
extension_id: Extension ID
|
extension_id: Extension ID
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Extension metadata or None if not found
|
Deep copy of extension metadata, or None if not found
|
||||||
"""
|
"""
|
||||||
return self.data["extensions"].get(extension_id)
|
entry = self.data["extensions"].get(extension_id)
|
||||||
|
return copy.deepcopy(entry) if entry is not None else None
|
||||||
|
|
||||||
def list(self) -> Dict[str, dict]:
|
def list(self) -> Dict[str, dict]:
|
||||||
"""Get all installed extensions.
|
"""Get all installed extensions.
|
||||||
|
|
||||||
|
Returns a deep copy of the extensions mapping to prevent callers
|
||||||
|
from accidentally mutating nested internal registry state.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary of extension_id -> metadata
|
Dictionary of extension_id -> metadata (deep copies)
|
||||||
"""
|
"""
|
||||||
return self.data["extensions"]
|
return copy.deepcopy(self.data["extensions"])
|
||||||
|
|
||||||
def is_installed(self, extension_id: str) -> bool:
|
def is_installed(self, extension_id: str) -> bool:
|
||||||
"""Check if extension is installed.
|
"""Check if extension is installed.
|
||||||
@@ -522,23 +578,7 @@ class ExtensionManager:
|
|||||||
# Unregister commands from all AI agents
|
# Unregister commands from all AI agents
|
||||||
if registered_commands:
|
if registered_commands:
|
||||||
registrar = CommandRegistrar()
|
registrar = CommandRegistrar()
|
||||||
for agent_name, cmd_names in registered_commands.items():
|
registrar.unregister_commands(registered_commands, self.project_root)
|
||||||
if agent_name not in registrar.AGENT_CONFIGS:
|
|
||||||
continue
|
|
||||||
|
|
||||||
agent_config = registrar.AGENT_CONFIGS[agent_name]
|
|
||||||
commands_dir = self.project_root / agent_config["dir"]
|
|
||||||
|
|
||||||
for cmd_name in cmd_names:
|
|
||||||
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
|
||||||
if cmd_file.exists():
|
|
||||||
cmd_file.unlink()
|
|
||||||
|
|
||||||
# Also remove companion .prompt.md for Copilot
|
|
||||||
if agent_name == "copilot":
|
|
||||||
prompt_file = self.project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
|
|
||||||
if prompt_file.exists():
|
|
||||||
prompt_file.unlink()
|
|
||||||
|
|
||||||
if keep_config:
|
if keep_config:
|
||||||
# Preserve config files, only remove non-config files
|
# Preserve config files, only remove non-config files
|
||||||
@@ -600,7 +640,7 @@ class ExtensionManager:
|
|||||||
result.append({
|
result.append({
|
||||||
"id": ext_id,
|
"id": ext_id,
|
||||||
"name": manifest.name,
|
"name": manifest.name,
|
||||||
"version": metadata["version"],
|
"version": metadata.get("version", "unknown"),
|
||||||
"description": manifest.description,
|
"description": manifest.description,
|
||||||
"enabled": metadata.get("enabled", True),
|
"enabled": metadata.get("enabled", True),
|
||||||
"installed_at": metadata.get("installed_at"),
|
"installed_at": metadata.get("installed_at"),
|
||||||
@@ -662,255 +702,47 @@ def version_satisfies(current: str, required: str) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
class CommandRegistrar:
|
class CommandRegistrar:
|
||||||
"""Handles registration of extension commands with AI agents."""
|
"""Handles registration of extension commands with AI agents.
|
||||||
|
|
||||||
# Agent configurations with directory, format, and argument placeholder
|
This is a backward-compatible wrapper around the shared CommandRegistrar
|
||||||
AGENT_CONFIGS = {
|
in agents.py. Extension-specific methods accept ExtensionManifest objects
|
||||||
"claude": {
|
and delegate to the generic API.
|
||||||
"dir": ".claude/commands",
|
"""
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"gemini": {
|
|
||||||
"dir": ".gemini/commands",
|
|
||||||
"format": "toml",
|
|
||||||
"args": "{{args}}",
|
|
||||||
"extension": ".toml"
|
|
||||||
},
|
|
||||||
"copilot": {
|
|
||||||
"dir": ".github/agents",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".agent.md"
|
|
||||||
},
|
|
||||||
"cursor": {
|
|
||||||
"dir": ".cursor/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"qwen": {
|
|
||||||
"dir": ".qwen/commands",
|
|
||||||
"format": "toml",
|
|
||||||
"args": "{{args}}",
|
|
||||||
"extension": ".toml"
|
|
||||||
},
|
|
||||||
"opencode": {
|
|
||||||
"dir": ".opencode/command",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"codex": {
|
|
||||||
"dir": ".codex/prompts",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"windsurf": {
|
|
||||||
"dir": ".windsurf/workflows",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"kilocode": {
|
|
||||||
"dir": ".kilocode/rules",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"auggie": {
|
|
||||||
"dir": ".augment/rules",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"roo": {
|
|
||||||
"dir": ".roo/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"codebuddy": {
|
|
||||||
"dir": ".codebuddy/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"qodercli": {
|
|
||||||
"dir": ".qoder/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"kiro-cli": {
|
|
||||||
"dir": ".kiro/prompts",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"amp": {
|
|
||||||
"dir": ".agents/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"shai": {
|
|
||||||
"dir": ".shai/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"tabnine": {
|
|
||||||
"dir": ".tabnine/agent/commands",
|
|
||||||
"format": "toml",
|
|
||||||
"args": "{{args}}",
|
|
||||||
"extension": ".toml"
|
|
||||||
},
|
|
||||||
"bob": {
|
|
||||||
"dir": ".bob/commands",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": ".md"
|
|
||||||
},
|
|
||||||
"kimi": {
|
|
||||||
"dir": ".kimi/skills",
|
|
||||||
"format": "markdown",
|
|
||||||
"args": "$ARGUMENTS",
|
|
||||||
"extension": "/SKILL.md"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
# Re-export AGENT_CONFIGS at class level for direct attribute access
|
||||||
|
from .agents import CommandRegistrar as _AgentRegistrar
|
||||||
|
AGENT_CONFIGS = _AgentRegistrar.AGENT_CONFIGS
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
from .agents import CommandRegistrar as _Registrar
|
||||||
|
self._registrar = _Registrar()
|
||||||
|
|
||||||
|
# Delegate static/utility methods
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_frontmatter(content: str) -> tuple[dict, str]:
|
def parse_frontmatter(content: str) -> tuple[dict, str]:
|
||||||
"""Parse YAML frontmatter from Markdown content.
|
from .agents import CommandRegistrar as _Registrar
|
||||||
|
return _Registrar.parse_frontmatter(content)
|
||||||
Args:
|
|
||||||
content: Markdown content with YAML frontmatter
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (frontmatter_dict, body_content)
|
|
||||||
"""
|
|
||||||
if not content.startswith("---"):
|
|
||||||
return {}, content
|
|
||||||
|
|
||||||
# Find second ---
|
|
||||||
end_marker = content.find("---", 3)
|
|
||||||
if end_marker == -1:
|
|
||||||
return {}, content
|
|
||||||
|
|
||||||
frontmatter_str = content[3:end_marker].strip()
|
|
||||||
body = content[end_marker + 3:].strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
frontmatter = yaml.safe_load(frontmatter_str) or {}
|
|
||||||
except yaml.YAMLError:
|
|
||||||
frontmatter = {}
|
|
||||||
|
|
||||||
return frontmatter, body
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def render_frontmatter(fm: dict) -> str:
|
def render_frontmatter(fm: dict) -> str:
|
||||||
"""Render frontmatter dictionary as YAML.
|
from .agents import CommandRegistrar as _Registrar
|
||||||
|
return _Registrar.render_frontmatter(fm)
|
||||||
|
|
||||||
Args:
|
@staticmethod
|
||||||
fm: Frontmatter dictionary
|
def _write_copilot_prompt(project_root, cmd_name: str) -> None:
|
||||||
|
from .agents import CommandRegistrar as _Registrar
|
||||||
|
_Registrar.write_copilot_prompt(project_root, cmd_name)
|
||||||
|
|
||||||
Returns:
|
def _render_markdown_command(self, frontmatter, body, ext_id):
|
||||||
YAML-formatted frontmatter with delimiters
|
# Preserve extension-specific comment format for backward compatibility
|
||||||
"""
|
|
||||||
if not fm:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
yaml_str = yaml.dump(fm, default_flow_style=False, sort_keys=False)
|
|
||||||
return f"---\n{yaml_str}---\n"
|
|
||||||
|
|
||||||
def _adjust_script_paths(self, frontmatter: dict) -> dict:
|
|
||||||
"""Adjust script paths from extension-relative to repo-relative.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
frontmatter: Frontmatter dictionary
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Modified frontmatter with adjusted paths
|
|
||||||
"""
|
|
||||||
if "scripts" in frontmatter:
|
|
||||||
for key in frontmatter["scripts"]:
|
|
||||||
script_path = frontmatter["scripts"][key]
|
|
||||||
if script_path.startswith("../../scripts/"):
|
|
||||||
frontmatter["scripts"][key] = f".specify/scripts/{script_path[14:]}"
|
|
||||||
return frontmatter
|
|
||||||
|
|
||||||
def _render_markdown_command(
|
|
||||||
self,
|
|
||||||
frontmatter: dict,
|
|
||||||
body: str,
|
|
||||||
ext_id: str
|
|
||||||
) -> str:
|
|
||||||
"""Render command in Markdown format.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
frontmatter: Command frontmatter
|
|
||||||
body: Command body content
|
|
||||||
ext_id: Extension ID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted Markdown command file content
|
|
||||||
"""
|
|
||||||
context_note = f"\n<!-- Extension: {ext_id} -->\n<!-- Config: .specify/extensions/{ext_id}/ -->\n"
|
context_note = f"\n<!-- Extension: {ext_id} -->\n<!-- Config: .specify/extensions/{ext_id}/ -->\n"
|
||||||
return self.render_frontmatter(frontmatter) + "\n" + context_note + body
|
return self._registrar.render_frontmatter(frontmatter) + "\n" + context_note + body
|
||||||
|
|
||||||
def _render_toml_command(
|
def _render_toml_command(self, frontmatter, body, ext_id):
|
||||||
self,
|
# Preserve extension-specific context comments for backward compatibility
|
||||||
frontmatter: dict,
|
base = self._registrar.render_toml_command(frontmatter, body, ext_id)
|
||||||
body: str,
|
context_lines = f"# Extension: {ext_id}\n# Config: .specify/extensions/{ext_id}/\n"
|
||||||
ext_id: str
|
return base.rstrip("\n") + "\n" + context_lines
|
||||||
) -> str:
|
|
||||||
"""Render command in TOML format.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
frontmatter: Command frontmatter
|
|
||||||
body: Command body content
|
|
||||||
ext_id: Extension ID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted TOML command file content
|
|
||||||
"""
|
|
||||||
# TOML format for Gemini/Qwen
|
|
||||||
toml_lines = []
|
|
||||||
|
|
||||||
# Add description if present
|
|
||||||
if "description" in frontmatter:
|
|
||||||
# Escape quotes in description
|
|
||||||
desc = frontmatter["description"].replace('"', '\\"')
|
|
||||||
toml_lines.append(f'description = "{desc}"')
|
|
||||||
toml_lines.append("")
|
|
||||||
|
|
||||||
# Add extension context as comments
|
|
||||||
toml_lines.append(f"# Extension: {ext_id}")
|
|
||||||
toml_lines.append(f"# Config: .specify/extensions/{ext_id}/")
|
|
||||||
toml_lines.append("")
|
|
||||||
|
|
||||||
# Add prompt content
|
|
||||||
toml_lines.append('prompt = """')
|
|
||||||
toml_lines.append(body)
|
|
||||||
toml_lines.append('"""')
|
|
||||||
|
|
||||||
return "\n".join(toml_lines)
|
|
||||||
|
|
||||||
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
|
||||||
"""Convert argument placeholder format.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
content: Command content
|
|
||||||
from_placeholder: Source placeholder (e.g., "$ARGUMENTS")
|
|
||||||
to_placeholder: Target placeholder (e.g., "{{args}}")
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Content with converted placeholders
|
|
||||||
"""
|
|
||||||
return content.replace(from_placeholder, to_placeholder)
|
|
||||||
|
|
||||||
def register_commands_for_agent(
|
def register_commands_for_agent(
|
||||||
self,
|
self,
|
||||||
@@ -919,96 +751,14 @@ class CommandRegistrar:
|
|||||||
extension_dir: Path,
|
extension_dir: Path,
|
||||||
project_root: Path
|
project_root: Path
|
||||||
) -> List[str]:
|
) -> List[str]:
|
||||||
"""Register extension commands for a specific agent.
|
"""Register extension commands for a specific agent."""
|
||||||
|
|
||||||
Args:
|
|
||||||
agent_name: Agent name (claude, gemini, copilot, etc.)
|
|
||||||
manifest: Extension manifest
|
|
||||||
extension_dir: Path to extension directory
|
|
||||||
project_root: Path to project root
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of registered command names
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ExtensionError: If agent is not supported
|
|
||||||
"""
|
|
||||||
if agent_name not in self.AGENT_CONFIGS:
|
if agent_name not in self.AGENT_CONFIGS:
|
||||||
raise ExtensionError(f"Unsupported agent: {agent_name}")
|
raise ExtensionError(f"Unsupported agent: {agent_name}")
|
||||||
|
context_note = f"\n<!-- Extension: {manifest.id} -->\n<!-- Config: .specify/extensions/{manifest.id}/ -->\n"
|
||||||
agent_config = self.AGENT_CONFIGS[agent_name]
|
return self._registrar.register_commands(
|
||||||
commands_dir = project_root / agent_config["dir"]
|
agent_name, manifest.commands, manifest.id, extension_dir, project_root,
|
||||||
commands_dir.mkdir(parents=True, exist_ok=True)
|
context_note=context_note
|
||||||
|
)
|
||||||
registered = []
|
|
||||||
|
|
||||||
for cmd_info in manifest.commands:
|
|
||||||
cmd_name = cmd_info["name"]
|
|
||||||
cmd_file = cmd_info["file"]
|
|
||||||
|
|
||||||
# Read source command file
|
|
||||||
source_file = extension_dir / cmd_file
|
|
||||||
if not source_file.exists():
|
|
||||||
continue
|
|
||||||
|
|
||||||
content = source_file.read_text()
|
|
||||||
frontmatter, body = self.parse_frontmatter(content)
|
|
||||||
|
|
||||||
# Adjust script paths
|
|
||||||
frontmatter = self._adjust_script_paths(frontmatter)
|
|
||||||
|
|
||||||
# Convert argument placeholders
|
|
||||||
body = self._convert_argument_placeholder(
|
|
||||||
body, "$ARGUMENTS", agent_config["args"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Render in agent-specific format
|
|
||||||
if agent_config["format"] == "markdown":
|
|
||||||
output = self._render_markdown_command(frontmatter, body, manifest.id)
|
|
||||||
elif agent_config["format"] == "toml":
|
|
||||||
output = self._render_toml_command(frontmatter, body, manifest.id)
|
|
||||||
else:
|
|
||||||
raise ExtensionError(f"Unsupported format: {agent_config['format']}")
|
|
||||||
|
|
||||||
# Write command file
|
|
||||||
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
|
|
||||||
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
dest_file.write_text(output)
|
|
||||||
|
|
||||||
# Generate companion .prompt.md for Copilot agents
|
|
||||||
if agent_name == "copilot":
|
|
||||||
self._write_copilot_prompt(project_root, cmd_name)
|
|
||||||
|
|
||||||
registered.append(cmd_name)
|
|
||||||
|
|
||||||
# Register aliases
|
|
||||||
for alias in cmd_info.get("aliases", []):
|
|
||||||
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
|
|
||||||
alias_file.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
alias_file.write_text(output)
|
|
||||||
# Generate companion .prompt.md for alias too
|
|
||||||
if agent_name == "copilot":
|
|
||||||
self._write_copilot_prompt(project_root, alias)
|
|
||||||
registered.append(alias)
|
|
||||||
|
|
||||||
return registered
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _write_copilot_prompt(project_root: Path, cmd_name: str) -> None:
|
|
||||||
"""Generate a companion .prompt.md file for a Copilot agent command.
|
|
||||||
|
|
||||||
Copilot requires a .prompt.md file in .github/prompts/ that references
|
|
||||||
the corresponding .agent.md file in .github/agents/ via an ``agent:``
|
|
||||||
frontmatter field.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
project_root: Path to project root
|
|
||||||
cmd_name: Command name (used as the file stem, e.g. 'speckit.my-ext.example')
|
|
||||||
"""
|
|
||||||
prompts_dir = project_root / ".github" / "prompts"
|
|
||||||
prompts_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
prompt_file = prompts_dir / f"{cmd_name}.prompt.md"
|
|
||||||
prompt_file.write_text(f"---\nagent: {cmd_name}\n---\n")
|
|
||||||
|
|
||||||
def register_commands_for_all_agents(
|
def register_commands_for_all_agents(
|
||||||
self,
|
self,
|
||||||
@@ -1016,35 +766,20 @@ class CommandRegistrar:
|
|||||||
extension_dir: Path,
|
extension_dir: Path,
|
||||||
project_root: Path
|
project_root: Path
|
||||||
) -> Dict[str, List[str]]:
|
) -> Dict[str, List[str]]:
|
||||||
"""Register extension commands for all detected agents.
|
"""Register extension commands for all detected agents."""
|
||||||
|
context_note = f"\n<!-- Extension: {manifest.id} -->\n<!-- Config: .specify/extensions/{manifest.id}/ -->\n"
|
||||||
|
return self._registrar.register_commands_for_all_agents(
|
||||||
|
manifest.commands, manifest.id, extension_dir, project_root,
|
||||||
|
context_note=context_note
|
||||||
|
)
|
||||||
|
|
||||||
Args:
|
def unregister_commands(
|
||||||
manifest: Extension manifest
|
self,
|
||||||
extension_dir: Path to extension directory
|
registered_commands: Dict[str, List[str]],
|
||||||
project_root: Path to project root
|
project_root: Path
|
||||||
|
) -> None:
|
||||||
Returns:
|
"""Remove previously registered command files from agent directories."""
|
||||||
Dictionary mapping agent names to list of registered commands
|
self._registrar.unregister_commands(registered_commands, project_root)
|
||||||
"""
|
|
||||||
results = {}
|
|
||||||
|
|
||||||
# Detect which agents are present in the project
|
|
||||||
for agent_name, agent_config in self.AGENT_CONFIGS.items():
|
|
||||||
agent_dir = project_root / agent_config["dir"].split("/")[0]
|
|
||||||
|
|
||||||
# Register if agent directory exists
|
|
||||||
if agent_dir.exists():
|
|
||||||
try:
|
|
||||||
registered = self.register_commands_for_agent(
|
|
||||||
agent_name, manifest, extension_dir, project_root
|
|
||||||
)
|
|
||||||
if registered:
|
|
||||||
results[agent_name] = registered
|
|
||||||
except ExtensionError:
|
|
||||||
# Skip agent on error
|
|
||||||
continue
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
def register_commands_for_claude(
|
def register_commands_for_claude(
|
||||||
self,
|
self,
|
||||||
@@ -1052,16 +787,7 @@ class CommandRegistrar:
|
|||||||
extension_dir: Path,
|
extension_dir: Path,
|
||||||
project_root: Path
|
project_root: Path
|
||||||
) -> List[str]:
|
) -> List[str]:
|
||||||
"""Register extension commands for Claude Code agent.
|
"""Register extension commands for Claude Code agent."""
|
||||||
|
|
||||||
Args:
|
|
||||||
manifest: Extension manifest
|
|
||||||
extension_dir: Path to extension directory
|
|
||||||
project_root: Path to project root
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of registered command names
|
|
||||||
"""
|
|
||||||
return self.register_commands_for_agent("claude", manifest, extension_dir, project_root)
|
return self.register_commands_for_agent("claude", manifest, extension_dir, project_root)
|
||||||
|
|
||||||
|
|
||||||
@@ -1112,12 +838,13 @@ class ExtensionCatalog:
|
|||||||
config_path: Path to extension-catalogs.yml
|
config_path: Path to extension-catalogs.yml
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Ordered list of CatalogEntry objects, or None if file doesn't exist
|
Ordered list of CatalogEntry objects, or None if file doesn't exist.
|
||||||
or contains no valid catalog entries.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValidationError: If any catalog entry has an invalid URL,
|
ValidationError: If any catalog entry has an invalid URL,
|
||||||
the file cannot be parsed, or a priority value is invalid.
|
the file cannot be parsed, a priority value is invalid,
|
||||||
|
or the file exists but contains no valid catalog entries
|
||||||
|
(fail-closed for security).
|
||||||
"""
|
"""
|
||||||
if not config_path.exists():
|
if not config_path.exists():
|
||||||
return None
|
return None
|
||||||
@@ -1129,12 +856,17 @@ class ExtensionCatalog:
|
|||||||
)
|
)
|
||||||
catalogs_data = data.get("catalogs", [])
|
catalogs_data = data.get("catalogs", [])
|
||||||
if not catalogs_data:
|
if not catalogs_data:
|
||||||
return None
|
# File exists but has no catalogs key or empty list - fail closed
|
||||||
|
raise ValidationError(
|
||||||
|
f"Catalog config {config_path} exists but contains no 'catalogs' entries. "
|
||||||
|
f"Remove the file to use built-in defaults, or add valid catalog entries."
|
||||||
|
)
|
||||||
if not isinstance(catalogs_data, list):
|
if not isinstance(catalogs_data, list):
|
||||||
raise ValidationError(
|
raise ValidationError(
|
||||||
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
||||||
)
|
)
|
||||||
entries: List[CatalogEntry] = []
|
entries: List[CatalogEntry] = []
|
||||||
|
skipped_entries: List[int] = []
|
||||||
for idx, item in enumerate(catalogs_data):
|
for idx, item in enumerate(catalogs_data):
|
||||||
if not isinstance(item, dict):
|
if not isinstance(item, dict):
|
||||||
raise ValidationError(
|
raise ValidationError(
|
||||||
@@ -1142,6 +874,7 @@ class ExtensionCatalog:
|
|||||||
)
|
)
|
||||||
url = str(item.get("url", "")).strip()
|
url = str(item.get("url", "")).strip()
|
||||||
if not url:
|
if not url:
|
||||||
|
skipped_entries.append(idx)
|
||||||
continue
|
continue
|
||||||
self._validate_catalog_url(url)
|
self._validate_catalog_url(url)
|
||||||
try:
|
try:
|
||||||
@@ -1164,7 +897,14 @@ class ExtensionCatalog:
|
|||||||
description=str(item.get("description", "")),
|
description=str(item.get("description", "")),
|
||||||
))
|
))
|
||||||
entries.sort(key=lambda e: e.priority)
|
entries.sort(key=lambda e: e.priority)
|
||||||
return entries if entries else None
|
if not entries:
|
||||||
|
# All entries were invalid (missing URLs) - fail closed for security
|
||||||
|
raise ValidationError(
|
||||||
|
f"Catalog config {config_path} contains {len(catalogs_data)} entries but none have valid URLs "
|
||||||
|
f"(entries at indices {skipped_entries} were skipped). "
|
||||||
|
f"Each catalog entry must have a 'url' field."
|
||||||
|
)
|
||||||
|
return entries
|
||||||
|
|
||||||
def get_active_catalogs(self) -> List[CatalogEntry]:
|
def get_active_catalogs(self) -> List[CatalogEntry]:
|
||||||
"""Get the ordered list of active catalogs.
|
"""Get the ordered list of active catalogs.
|
||||||
|
|||||||
1530
src/specify_cli/presets.py
Normal file
1530
src/specify_cli/presets.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -129,7 +129,7 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
c. **Handle Validation Results**:
|
c. **Handle Validation Results**:
|
||||||
|
|
||||||
- **If all items pass**: Mark checklist complete and proceed to step 6
|
- **If all items pass**: Mark checklist complete and proceed to step 7
|
||||||
|
|
||||||
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
||||||
1. List the failing items and specific issues
|
1. List the failing items and specific issues
|
||||||
@@ -178,8 +178,6 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||||
|
|
||||||
## General Guidelines
|
|
||||||
|
|
||||||
## Quick Guidelines
|
## Quick Guidelines
|
||||||
|
|
||||||
- Focus on **WHAT** users need and **WHY**.
|
- Focus on **WHAT** users need and **WHY**.
|
||||||
|
|||||||
@@ -62,7 +62,14 @@ class TestAgentConfigConsistency:
|
|||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||||
|
|
||||||
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
||||||
assert re.search(r"'agy'\s*\{.*?\.agent/workflows", ps_text, re.S) is not None
|
assert re.search(r"'agy'\s*\{.*?\.agent/commands", ps_text, re.S) is not None
|
||||||
|
|
||||||
|
def test_release_sh_switch_has_shai_and_agy_generation(self):
|
||||||
|
"""Bash release builder must generate files for shai and agy agents."""
|
||||||
|
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||||
|
|
||||||
|
assert re.search(r"shai\)\s*\n.*?\.shai/commands", sh_text, re.S) is not None
|
||||||
|
assert re.search(r"agy\)\s*\n.*?\.agent/commands", sh_text, re.S) is not None
|
||||||
|
|
||||||
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
||||||
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
||||||
|
|||||||
@@ -132,6 +132,16 @@ def commands_dir_gemini(project_dir):
|
|||||||
return cmd_dir
|
return cmd_dir
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def commands_dir_qwen(project_dir):
|
||||||
|
"""Create a populated .qwen/commands directory (Markdown format)."""
|
||||||
|
cmd_dir = project_dir / ".qwen" / "commands"
|
||||||
|
cmd_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
for name in ["speckit.specify.md", "speckit.plan.md", "speckit.tasks.md"]:
|
||||||
|
(cmd_dir / name).write_text(f"# {name}\nContent here\n")
|
||||||
|
return cmd_dir
|
||||||
|
|
||||||
|
|
||||||
# ===== _get_skills_dir Tests =====
|
# ===== _get_skills_dir Tests =====
|
||||||
|
|
||||||
class TestGetSkillsDir:
|
class TestGetSkillsDir:
|
||||||
@@ -390,6 +400,28 @@ class TestInstallAiSkills:
|
|||||||
# .toml commands should be untouched
|
# .toml commands should be untouched
|
||||||
assert (cmds_dir / "speckit.specify.toml").exists()
|
assert (cmds_dir / "speckit.specify.toml").exists()
|
||||||
|
|
||||||
|
def test_qwen_md_commands_dir_installs_skills(self, project_dir):
|
||||||
|
"""Qwen now uses Markdown format; skills should install directly from .qwen/commands/."""
|
||||||
|
cmds_dir = project_dir / ".qwen" / "commands"
|
||||||
|
cmds_dir.mkdir(parents=True)
|
||||||
|
(cmds_dir / "speckit.specify.md").write_text(
|
||||||
|
"---\ndescription: Create or update the feature specification.\n---\n\n# Specify\n\nBody.\n"
|
||||||
|
)
|
||||||
|
(cmds_dir / "speckit.plan.md").write_text(
|
||||||
|
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = install_ai_skills(project_dir, "qwen")
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
skills_dir = project_dir / ".qwen" / "skills"
|
||||||
|
assert skills_dir.exists()
|
||||||
|
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||||
|
assert len(skill_dirs) >= 1
|
||||||
|
# .md commands should be untouched
|
||||||
|
assert (cmds_dir / "speckit.specify.md").exists()
|
||||||
|
assert (cmds_dir / "speckit.plan.md").exists()
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
||||||
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
||||||
"""install_ai_skills should produce skills for every configured agent."""
|
"""install_ai_skills should produce skills for every configured agent."""
|
||||||
@@ -446,6 +478,15 @@ class TestCommandCoexistence:
|
|||||||
remaining = list(commands_dir_gemini.glob("speckit.*"))
|
remaining = list(commands_dir_gemini.glob("speckit.*"))
|
||||||
assert len(remaining) == 3
|
assert len(remaining) == 3
|
||||||
|
|
||||||
|
def test_existing_commands_preserved_qwen(self, project_dir, templates_dir, commands_dir_qwen):
|
||||||
|
"""install_ai_skills must NOT remove pre-existing .qwen/commands files."""
|
||||||
|
assert len(list(commands_dir_qwen.glob("speckit.*"))) == 3
|
||||||
|
|
||||||
|
install_ai_skills(project_dir, "qwen")
|
||||||
|
|
||||||
|
remaining = list(commands_dir_qwen.glob("speckit.*"))
|
||||||
|
assert len(remaining) == 3
|
||||||
|
|
||||||
def test_commands_dir_not_removed(self, project_dir, templates_dir, commands_dir_claude):
|
def test_commands_dir_not_removed(self, project_dir, templates_dir, commands_dir_claude):
|
||||||
"""install_ai_skills must not remove the commands directory."""
|
"""install_ai_skills must not remove the commands directory."""
|
||||||
install_ai_skills(project_dir, "claude")
|
install_ai_skills(project_dir, "claude")
|
||||||
@@ -661,6 +702,59 @@ class TestCliValidation:
|
|||||||
assert "Usage:" in result.output
|
assert "Usage:" in result.output
|
||||||
assert "--ai" in result.output
|
assert "--ai" in result.output
|
||||||
|
|
||||||
|
def test_agy_without_ai_skills_fails(self):
|
||||||
|
"""--ai agy without --ai-skills should fail with exit code 1."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
result = runner.invoke(app, ["init", "test-proj", "--ai", "agy"])
|
||||||
|
|
||||||
|
assert result.exit_code == 1
|
||||||
|
assert "Explicit command support was deprecated in Antigravity version 1.20.5." in result.output
|
||||||
|
assert "--ai-skills" in result.output
|
||||||
|
|
||||||
|
def test_interactive_agy_without_ai_skills_prompts_skills(self, monkeypatch):
|
||||||
|
"""Interactive selector returning agy without --ai-skills should automatically enable --ai-skills."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
|
||||||
|
# Mock select_with_arrows to simulate the user picking 'agy' for AI,
|
||||||
|
# and return a deterministic default for any other prompts to avoid
|
||||||
|
# calling the real interactive implementation.
|
||||||
|
def _fake_select_with_arrows(*args, **kwargs):
|
||||||
|
options = kwargs.get("options")
|
||||||
|
if options is None and len(args) >= 1:
|
||||||
|
options = args[0]
|
||||||
|
|
||||||
|
# If the options include 'agy', simulate selecting it.
|
||||||
|
if isinstance(options, dict) and "agy" in options:
|
||||||
|
return "agy"
|
||||||
|
if isinstance(options, (list, tuple)) and "agy" in options:
|
||||||
|
return "agy"
|
||||||
|
|
||||||
|
# For any other prompt, return a deterministic, non-interactive default:
|
||||||
|
# pick the first option if available.
|
||||||
|
if isinstance(options, dict) and options:
|
||||||
|
return next(iter(options.keys()))
|
||||||
|
if isinstance(options, (list, tuple)) and options:
|
||||||
|
return options[0]
|
||||||
|
|
||||||
|
# If no options are provided, fall back to None (should not occur in normal use).
|
||||||
|
return None
|
||||||
|
|
||||||
|
monkeypatch.setattr("specify_cli.select_with_arrows", _fake_select_with_arrows)
|
||||||
|
|
||||||
|
# Mock download_and_extract_template to prevent real HTTP downloads during testing
|
||||||
|
monkeypatch.setattr("specify_cli.download_and_extract_template", lambda *args, **kwargs: None)
|
||||||
|
# We need to bypass the `git init` step, wait, it has `--no-git` by default in tests maybe?
|
||||||
|
runner = CliRunner()
|
||||||
|
# Create temp dir to avoid directory already exists errors or whatever
|
||||||
|
with runner.isolated_filesystem():
|
||||||
|
result = runner.invoke(app, ["init", "test-proj", "--no-git"])
|
||||||
|
|
||||||
|
# Interactive selection should NOT raise the deprecation error!
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "Explicit command support was deprecated" not in result.output
|
||||||
|
|
||||||
def test_ai_skills_flag_appears_in_help(self):
|
def test_ai_skills_flag_appears_in_help(self):
|
||||||
"""--ai-skills should appear in init --help output."""
|
"""--ai-skills should appear in init --help output."""
|
||||||
from typer.testing import CliRunner
|
from typer.testing import CliRunner
|
||||||
|
|||||||
@@ -277,6 +277,135 @@ class TestExtensionRegistry:
|
|||||||
assert registry2.is_installed("test-ext")
|
assert registry2.is_installed("test-ext")
|
||||||
assert registry2.get("test-ext")["version"] == "1.0.0"
|
assert registry2.get("test-ext")["version"] == "1.0.0"
|
||||||
|
|
||||||
|
def test_update_preserves_installed_at(self, temp_dir):
|
||||||
|
"""Test that update() preserves the original installed_at timestamp."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {"version": "1.0.0", "enabled": True})
|
||||||
|
|
||||||
|
# Get original installed_at
|
||||||
|
original_data = registry.get("test-ext")
|
||||||
|
original_installed_at = original_data["installed_at"]
|
||||||
|
|
||||||
|
# Update with new metadata
|
||||||
|
registry.update("test-ext", {"version": "2.0.0", "enabled": False})
|
||||||
|
|
||||||
|
# Verify installed_at is preserved
|
||||||
|
updated_data = registry.get("test-ext")
|
||||||
|
assert updated_data["installed_at"] == original_installed_at
|
||||||
|
assert updated_data["version"] == "2.0.0"
|
||||||
|
assert updated_data["enabled"] is False
|
||||||
|
|
||||||
|
def test_update_merges_with_existing(self, temp_dir):
|
||||||
|
"""Test that update() merges new metadata with existing fields."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"enabled": True,
|
||||||
|
"registered_commands": {"claude": ["cmd1", "cmd2"]},
|
||||||
|
})
|
||||||
|
|
||||||
|
# Update with partial metadata (only enabled field)
|
||||||
|
registry.update("test-ext", {"enabled": False})
|
||||||
|
|
||||||
|
# Verify existing fields are preserved
|
||||||
|
updated_data = registry.get("test-ext")
|
||||||
|
assert updated_data["enabled"] is False
|
||||||
|
assert updated_data["version"] == "1.0.0" # Preserved
|
||||||
|
assert updated_data["registered_commands"] == {"claude": ["cmd1", "cmd2"]} # Preserved
|
||||||
|
|
||||||
|
def test_update_raises_for_missing_extension(self, temp_dir):
|
||||||
|
"""Test that update() raises KeyError for non-installed extension."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
|
||||||
|
with pytest.raises(KeyError, match="not installed"):
|
||||||
|
registry.update("nonexistent-ext", {"enabled": False})
|
||||||
|
|
||||||
|
def test_restore_overwrites_completely(self, temp_dir):
|
||||||
|
"""Test that restore() overwrites the registry entry completely."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {"version": "2.0.0", "enabled": True})
|
||||||
|
|
||||||
|
# Restore with complete backup data
|
||||||
|
backup_data = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"enabled": False,
|
||||||
|
"installed_at": "2024-01-01T00:00:00+00:00",
|
||||||
|
"registered_commands": {"claude": ["old-cmd"]},
|
||||||
|
}
|
||||||
|
registry.restore("test-ext", backup_data)
|
||||||
|
|
||||||
|
# Verify entry is exactly as restored
|
||||||
|
restored_data = registry.get("test-ext")
|
||||||
|
assert restored_data == backup_data
|
||||||
|
|
||||||
|
def test_restore_can_recreate_removed_entry(self, temp_dir):
|
||||||
|
"""Test that restore() can recreate an entry after remove()."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {"version": "1.0.0"})
|
||||||
|
|
||||||
|
# Save backup and remove
|
||||||
|
backup = registry.get("test-ext").copy()
|
||||||
|
registry.remove("test-ext")
|
||||||
|
assert not registry.is_installed("test-ext")
|
||||||
|
|
||||||
|
# Restore should recreate the entry
|
||||||
|
registry.restore("test-ext", backup)
|
||||||
|
assert registry.is_installed("test-ext")
|
||||||
|
assert registry.get("test-ext")["version"] == "1.0.0"
|
||||||
|
|
||||||
|
def test_get_returns_deep_copy(self, temp_dir):
|
||||||
|
"""Test that get() returns deep copies for nested structures."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
metadata = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"registered_commands": {"claude": ["cmd1"]},
|
||||||
|
}
|
||||||
|
registry.add("test-ext", metadata)
|
||||||
|
|
||||||
|
fetched = registry.get("test-ext")
|
||||||
|
fetched["registered_commands"]["claude"].append("cmd2")
|
||||||
|
|
||||||
|
# Internal registry must remain unchanged.
|
||||||
|
internal = registry.data["extensions"]["test-ext"]
|
||||||
|
assert internal["registered_commands"] == {"claude": ["cmd1"]}
|
||||||
|
|
||||||
|
def test_list_returns_deep_copy(self, temp_dir):
|
||||||
|
"""Test that list() returns deep copies for nested structures."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
metadata = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"registered_commands": {"claude": ["cmd1"]},
|
||||||
|
}
|
||||||
|
registry.add("test-ext", metadata)
|
||||||
|
|
||||||
|
listed = registry.list()
|
||||||
|
listed["test-ext"]["registered_commands"]["claude"].append("cmd2")
|
||||||
|
|
||||||
|
# Internal registry must remain unchanged.
|
||||||
|
internal = registry.data["extensions"]["test-ext"]
|
||||||
|
assert internal["registered_commands"] == {"claude": ["cmd1"]}
|
||||||
|
|
||||||
|
|
||||||
# ===== ExtensionManager Tests =====
|
# ===== ExtensionManager Tests =====
|
||||||
|
|
||||||
@@ -412,6 +541,15 @@ class TestCommandRegistrar:
|
|||||||
assert "codex" in CommandRegistrar.AGENT_CONFIGS
|
assert "codex" in CommandRegistrar.AGENT_CONFIGS
|
||||||
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".codex/prompts"
|
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".codex/prompts"
|
||||||
|
|
||||||
|
def test_qwen_agent_config_is_markdown(self):
|
||||||
|
"""Qwen should use Markdown format with $ARGUMENTS (not TOML)."""
|
||||||
|
assert "qwen" in CommandRegistrar.AGENT_CONFIGS
|
||||||
|
cfg = CommandRegistrar.AGENT_CONFIGS["qwen"]
|
||||||
|
assert cfg["dir"] == ".qwen/commands"
|
||||||
|
assert cfg["format"] == "markdown"
|
||||||
|
assert cfg["args"] == "$ARGUMENTS"
|
||||||
|
assert cfg["extension"] == ".md"
|
||||||
|
|
||||||
def test_parse_frontmatter_valid(self):
|
def test_parse_frontmatter_valid(self):
|
||||||
"""Test parsing valid YAML frontmatter."""
|
"""Test parsing valid YAML frontmatter."""
|
||||||
content = """---
|
content = """---
|
||||||
@@ -1402,8 +1540,8 @@ class TestCatalogStack:
|
|||||||
with pytest.raises(ValidationError, match="HTTPS"):
|
with pytest.raises(ValidationError, match="HTTPS"):
|
||||||
catalog.get_active_catalogs()
|
catalog.get_active_catalogs()
|
||||||
|
|
||||||
def test_empty_project_config_falls_back_to_defaults(self, temp_dir):
|
def test_empty_project_config_raises_error(self, temp_dir):
|
||||||
"""Empty catalogs list in config falls back to default stack."""
|
"""Empty catalogs list in config raises ValidationError (fail-closed for security)."""
|
||||||
import yaml as yaml_module
|
import yaml as yaml_module
|
||||||
|
|
||||||
project_dir = self._make_project(temp_dir)
|
project_dir = self._make_project(temp_dir)
|
||||||
@@ -1412,11 +1550,32 @@ class TestCatalogStack:
|
|||||||
yaml_module.dump({"catalogs": []}, f)
|
yaml_module.dump({"catalogs": []}, f)
|
||||||
|
|
||||||
catalog = ExtensionCatalog(project_dir)
|
catalog = ExtensionCatalog(project_dir)
|
||||||
entries = catalog.get_active_catalogs()
|
|
||||||
|
|
||||||
# Falls back to default stack
|
# Fail-closed: empty config should raise, not fall back to defaults
|
||||||
assert len(entries) == 2
|
with pytest.raises(ValidationError) as exc_info:
|
||||||
assert entries[0].url == ExtensionCatalog.DEFAULT_CATALOG_URL
|
catalog.get_active_catalogs()
|
||||||
|
assert "contains no 'catalogs' entries" in str(exc_info.value)
|
||||||
|
|
||||||
|
def test_catalog_entries_without_urls_raises_error(self, temp_dir):
|
||||||
|
"""Catalog entries without URLs raise ValidationError (fail-closed for security)."""
|
||||||
|
import yaml as yaml_module
|
||||||
|
|
||||||
|
project_dir = self._make_project(temp_dir)
|
||||||
|
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||||
|
with open(config_path, "w") as f:
|
||||||
|
yaml_module.dump({
|
||||||
|
"catalogs": [
|
||||||
|
{"name": "no-url-catalog", "priority": 1},
|
||||||
|
{"name": "another-no-url", "description": "Also missing URL"},
|
||||||
|
]
|
||||||
|
}, f)
|
||||||
|
|
||||||
|
catalog = ExtensionCatalog(project_dir)
|
||||||
|
|
||||||
|
# Fail-closed: entries without URLs should raise, not fall back to defaults
|
||||||
|
with pytest.raises(ValidationError) as exc_info:
|
||||||
|
catalog.get_active_catalogs()
|
||||||
|
assert "none have valid URLs" in str(exc_info.value)
|
||||||
|
|
||||||
# --- _load_catalog_config ---
|
# --- _load_catalog_config ---
|
||||||
|
|
||||||
@@ -1943,3 +2102,238 @@ class TestExtensionIgnore:
|
|||||||
assert not (dest / "docs" / "guide.md").exists()
|
assert not (dest / "docs" / "guide.md").exists()
|
||||||
assert not (dest / "docs" / "internal.md").exists()
|
assert not (dest / "docs" / "internal.md").exists()
|
||||||
assert (dest / "docs" / "api.md").exists()
|
assert (dest / "docs" / "api.md").exists()
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtensionAddCLI:
|
||||||
|
"""CLI integration tests for extension add command."""
|
||||||
|
|
||||||
|
def test_add_by_display_name_uses_resolved_id_for_download(self, tmp_path):
|
||||||
|
"""extension add by display name should use resolved ID for download_extension()."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
from specify_cli import app
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
|
||||||
|
# Create project structure
|
||||||
|
project_dir = tmp_path / "test-project"
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / ".specify").mkdir()
|
||||||
|
(project_dir / ".specify" / "extensions").mkdir(parents=True)
|
||||||
|
|
||||||
|
# Mock catalog that returns extension by display name
|
||||||
|
mock_catalog = MagicMock()
|
||||||
|
mock_catalog.get_extension_info.return_value = None # ID lookup fails
|
||||||
|
mock_catalog.search.return_value = [
|
||||||
|
{
|
||||||
|
"id": "acme-jira-integration",
|
||||||
|
"name": "Jira Integration",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "Jira integration extension",
|
||||||
|
"_install_allowed": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Track what ID was passed to download_extension
|
||||||
|
download_called_with = []
|
||||||
|
def mock_download(extension_id):
|
||||||
|
download_called_with.append(extension_id)
|
||||||
|
# Return a path that will fail install (we just want to verify the ID)
|
||||||
|
raise ExtensionError("Mock download - checking ID was resolved")
|
||||||
|
|
||||||
|
mock_catalog.download_extension.side_effect = mock_download
|
||||||
|
|
||||||
|
with patch("specify_cli.extensions.ExtensionCatalog", return_value=mock_catalog), \
|
||||||
|
patch.object(Path, "cwd", return_value=project_dir):
|
||||||
|
result = runner.invoke(
|
||||||
|
app,
|
||||||
|
["extension", "add", "Jira Integration"],
|
||||||
|
catch_exceptions=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.exit_code != 0, (
|
||||||
|
f"Expected non-zero exit code since mock download raises, got {result.exit_code}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify download_extension was called with the resolved ID, not the display name
|
||||||
|
assert len(download_called_with) == 1
|
||||||
|
assert download_called_with[0] == "acme-jira-integration", (
|
||||||
|
f"Expected download_extension to be called with resolved ID 'acme-jira-integration', "
|
||||||
|
f"but was called with '{download_called_with[0]}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtensionUpdateCLI:
|
||||||
|
"""CLI integration tests for extension update command."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_extension_source(base_dir: Path, version: str, include_config: bool = False) -> Path:
|
||||||
|
"""Create a minimal extension source directory for install tests."""
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
ext_dir = base_dir / f"test-ext-{version}"
|
||||||
|
ext_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"extension": {
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": version,
|
||||||
|
"description": "A test extension",
|
||||||
|
},
|
||||||
|
"requires": {"speckit_version": ">=0.1.0"},
|
||||||
|
"provides": {
|
||||||
|
"commands": [
|
||||||
|
{
|
||||||
|
"name": "speckit.test.hello",
|
||||||
|
"file": "commands/hello.md",
|
||||||
|
"description": "Test command",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hooks": {
|
||||||
|
"after_tasks": {
|
||||||
|
"command": "speckit.test.hello",
|
||||||
|
"optional": True,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
(ext_dir / "extension.yml").write_text(yaml.dump(manifest, sort_keys=False))
|
||||||
|
commands_dir = ext_dir / "commands"
|
||||||
|
commands_dir.mkdir(exist_ok=True)
|
||||||
|
(commands_dir / "hello.md").write_text("---\ndescription: Test\n---\n\n$ARGUMENTS\n")
|
||||||
|
if include_config:
|
||||||
|
(ext_dir / "linear-config.yml").write_text("custom: true\nvalue: original\n")
|
||||||
|
return ext_dir
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_catalog_zip(zip_path: Path, version: str):
|
||||||
|
"""Create a minimal ZIP that passes extension_update ID validation."""
|
||||||
|
import zipfile
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"extension": {
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": version,
|
||||||
|
"description": "A test extension",
|
||||||
|
},
|
||||||
|
"requires": {"speckit_version": ">=0.1.0"},
|
||||||
|
"provides": {"commands": [{"name": "speckit.test.hello", "file": "commands/hello.md"}]},
|
||||||
|
}
|
||||||
|
|
||||||
|
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||||
|
zf.writestr("extension.yml", yaml.dump(manifest, sort_keys=False))
|
||||||
|
|
||||||
|
def test_update_success_preserves_installed_at(self, tmp_path):
|
||||||
|
"""Successful update should keep original installed_at and apply new version."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
from unittest.mock import patch
|
||||||
|
from specify_cli import app
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
project_dir = tmp_path / "project"
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / ".specify").mkdir()
|
||||||
|
(project_dir / ".claude" / "commands").mkdir(parents=True)
|
||||||
|
|
||||||
|
manager = ExtensionManager(project_dir)
|
||||||
|
v1_dir = self._create_extension_source(tmp_path, "1.0.0", include_config=True)
|
||||||
|
manager.install_from_directory(v1_dir, "0.1.0")
|
||||||
|
original_installed_at = manager.registry.get("test-ext")["installed_at"]
|
||||||
|
original_config_content = (
|
||||||
|
project_dir / ".specify" / "extensions" / "test-ext" / "linear-config.yml"
|
||||||
|
).read_text()
|
||||||
|
|
||||||
|
zip_path = tmp_path / "test-ext-update.zip"
|
||||||
|
self._create_catalog_zip(zip_path, "2.0.0")
|
||||||
|
v2_dir = self._create_extension_source(tmp_path, "2.0.0")
|
||||||
|
|
||||||
|
def fake_install_from_zip(self_obj, _zip_path, speckit_version):
|
||||||
|
return self_obj.install_from_directory(v2_dir, speckit_version)
|
||||||
|
|
||||||
|
with patch.object(Path, "cwd", return_value=project_dir), \
|
||||||
|
patch.object(ExtensionCatalog, "get_extension_info", return_value={
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": "2.0.0",
|
||||||
|
"_install_allowed": True,
|
||||||
|
}), \
|
||||||
|
patch.object(ExtensionCatalog, "download_extension", return_value=zip_path), \
|
||||||
|
patch.object(ExtensionManager, "install_from_zip", fake_install_from_zip):
|
||||||
|
result = runner.invoke(app, ["extension", "update", "test-ext"], input="y\n", catch_exceptions=True)
|
||||||
|
|
||||||
|
assert result.exit_code == 0, result.output
|
||||||
|
|
||||||
|
updated = ExtensionManager(project_dir).registry.get("test-ext")
|
||||||
|
assert updated["version"] == "2.0.0"
|
||||||
|
assert updated["installed_at"] == original_installed_at
|
||||||
|
restored_config_content = (
|
||||||
|
project_dir / ".specify" / "extensions" / "test-ext" / "linear-config.yml"
|
||||||
|
).read_text()
|
||||||
|
assert restored_config_content == original_config_content
|
||||||
|
|
||||||
|
def test_update_failure_rolls_back_registry_hooks_and_commands(self, tmp_path):
|
||||||
|
"""Failed update should restore original registry, hooks, and command files."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
from unittest.mock import patch
|
||||||
|
from specify_cli import app
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
project_dir = tmp_path / "project"
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / ".specify").mkdir()
|
||||||
|
(project_dir / ".claude" / "commands").mkdir(parents=True)
|
||||||
|
|
||||||
|
manager = ExtensionManager(project_dir)
|
||||||
|
v1_dir = self._create_extension_source(tmp_path, "1.0.0")
|
||||||
|
manager.install_from_directory(v1_dir, "0.1.0")
|
||||||
|
|
||||||
|
backup_registry_entry = manager.registry.get("test-ext")
|
||||||
|
hooks_before = yaml.safe_load((project_dir / ".specify" / "extensions.yml").read_text())
|
||||||
|
|
||||||
|
registered_commands = backup_registry_entry.get("registered_commands", {})
|
||||||
|
command_files = []
|
||||||
|
registrar = CommandRegistrar()
|
||||||
|
for agent_name, cmd_names in registered_commands.items():
|
||||||
|
if agent_name not in registrar.AGENT_CONFIGS:
|
||||||
|
continue
|
||||||
|
agent_cfg = registrar.AGENT_CONFIGS[agent_name]
|
||||||
|
commands_dir = project_dir / agent_cfg["dir"]
|
||||||
|
for cmd_name in cmd_names:
|
||||||
|
cmd_path = commands_dir / f"{cmd_name}{agent_cfg['extension']}"
|
||||||
|
command_files.append(cmd_path)
|
||||||
|
|
||||||
|
assert command_files, "Expected at least one registered command file"
|
||||||
|
for cmd_file in command_files:
|
||||||
|
assert cmd_file.exists(), f"Expected command file to exist before update: {cmd_file}"
|
||||||
|
|
||||||
|
zip_path = tmp_path / "test-ext-update.zip"
|
||||||
|
self._create_catalog_zip(zip_path, "2.0.0")
|
||||||
|
|
||||||
|
with patch.object(Path, "cwd", return_value=project_dir), \
|
||||||
|
patch.object(ExtensionCatalog, "get_extension_info", return_value={
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": "2.0.0",
|
||||||
|
"_install_allowed": True,
|
||||||
|
}), \
|
||||||
|
patch.object(ExtensionCatalog, "download_extension", return_value=zip_path), \
|
||||||
|
patch.object(ExtensionManager, "install_from_zip", side_effect=RuntimeError("install failed")):
|
||||||
|
result = runner.invoke(app, ["extension", "update", "test-ext"], input="y\n", catch_exceptions=True)
|
||||||
|
|
||||||
|
assert result.exit_code == 1, result.output
|
||||||
|
|
||||||
|
restored_entry = ExtensionManager(project_dir).registry.get("test-ext")
|
||||||
|
assert restored_entry == backup_registry_entry
|
||||||
|
|
||||||
|
hooks_after = yaml.safe_load((project_dir / ".specify" / "extensions.yml").read_text())
|
||||||
|
assert hooks_after == hooks_before
|
||||||
|
|
||||||
|
for cmd_file in command_files:
|
||||||
|
assert cmd_file.exists(), f"Expected command file to be restored after rollback: {cmd_file}"
|
||||||
|
|||||||
1712
tests/test_presets.py
Normal file
1712
tests/test_presets.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user