mirror of
https://github.com/github/spec-kit.git
synced 2026-03-21 12:53:08 +00:00
Compare commits
10 Commits
v0.2.1
...
c883952b43
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c883952b43 | ||
|
|
b9c1a1c7bb | ||
|
|
46bc65b1ce | ||
|
|
017e1c4c2f | ||
|
|
7562664fd1 | ||
|
|
976c9981a4 | ||
|
|
d3fc056743 | ||
|
|
58ce653908 | ||
|
|
82f8a13f83 | ||
|
|
0f1cbd74fe |
@@ -382,7 +382,7 @@ function Build-Variant {
|
|||||||
}
|
}
|
||||||
'qwen' {
|
'qwen' {
|
||||||
$cmdDir = Join-Path $baseDir ".qwen/commands"
|
$cmdDir = Join-Path $baseDir ".qwen/commands"
|
||||||
Generate-Commands -Agent 'qwen' -Extension 'toml' -ArgFormat '{{args}}' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'qwen' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
if (Test-Path "agent_templates/qwen/QWEN.md") {
|
if (Test-Path "agent_templates/qwen/QWEN.md") {
|
||||||
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
|
Copy-Item -Path "agent_templates/qwen/QWEN.md" -Destination (Join-Path $baseDir "QWEN.md")
|
||||||
}
|
}
|
||||||
@@ -442,7 +442,7 @@ function Build-Variant {
|
|||||||
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
if (Test-Path $tabnineTemplate) { Copy-Item $tabnineTemplate (Join-Path $baseDir 'TABNINE.md') }
|
||||||
}
|
}
|
||||||
'agy' {
|
'agy' {
|
||||||
$cmdDir = Join-Path $baseDir ".agent/workflows"
|
$cmdDir = Join-Path $baseDir ".agent/commands"
|
||||||
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
Generate-Commands -Agent 'agy' -Extension 'md' -ArgFormat '$ARGUMENTS' -OutputDir $cmdDir -ScriptVariant $Script
|
||||||
}
|
}
|
||||||
'vibe' {
|
'vibe' {
|
||||||
|
|||||||
@@ -240,7 +240,7 @@ build_variant() {
|
|||||||
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
generate_commands cursor-agent md "\$ARGUMENTS" "$base_dir/.cursor/commands" "$script" ;;
|
||||||
qwen)
|
qwen)
|
||||||
mkdir -p "$base_dir/.qwen/commands"
|
mkdir -p "$base_dir/.qwen/commands"
|
||||||
generate_commands qwen toml "{{args}}" "$base_dir/.qwen/commands" "$script"
|
generate_commands qwen md "\$ARGUMENTS" "$base_dir/.qwen/commands" "$script"
|
||||||
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
[[ -f agent_templates/qwen/QWEN.md ]] && cp agent_templates/qwen/QWEN.md "$base_dir/QWEN.md" ;;
|
||||||
opencode)
|
opencode)
|
||||||
mkdir -p "$base_dir/.opencode/command"
|
mkdir -p "$base_dir/.opencode/command"
|
||||||
@@ -280,8 +280,8 @@ build_variant() {
|
|||||||
mkdir -p "$base_dir/.kiro/prompts"
|
mkdir -p "$base_dir/.kiro/prompts"
|
||||||
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
generate_commands kiro-cli md "\$ARGUMENTS" "$base_dir/.kiro/prompts" "$script" ;;
|
||||||
agy)
|
agy)
|
||||||
mkdir -p "$base_dir/.agent/workflows"
|
mkdir -p "$base_dir/.agent/commands"
|
||||||
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/workflows" "$script" ;;
|
generate_commands agy md "\$ARGUMENTS" "$base_dir/.agent/commands" "$script" ;;
|
||||||
bob)
|
bob)
|
||||||
mkdir -p "$base_dir/.bob/commands"
|
mkdir -p "$base_dir/.bob/commands"
|
||||||
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
generate_commands bob md "\$ARGUMENTS" "$base_dir/.bob/commands" "$script" ;;
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
|||||||
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
||||||
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
||||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
||||||
| **Qwen Code** | `.qwen/commands/` | TOML | `qwen` | Alibaba's Qwen Code CLI |
|
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
|
||||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
||||||
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
|
| **Codex CLI** | `.codex/commands/` | Markdown | `codex` | Codex CLI |
|
||||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
||||||
@@ -88,7 +88,7 @@ This eliminates the need for special-case mappings throughout the codebase.
|
|||||||
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
- `folder`: Directory where agent-specific files are stored (relative to project root)
|
||||||
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
- `commands_subdir`: Subdirectory name within the agent folder where command/prompt files are stored (default: `"commands"`)
|
||||||
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
- Most agents use `"commands"` (e.g., `.claude/commands/`)
|
||||||
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode, agy), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
- Some agents use alternative names: `"agents"` (copilot), `"workflows"` (windsurf, kilocode), `"prompts"` (codex, kiro-cli), `"command"` (opencode - singular)
|
||||||
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
- This field enables `--ai-skills` to locate command templates correctly for skill generation
|
||||||
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
- `install_url`: Installation documentation URL (set to `None` for IDE-based agents)
|
||||||
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
- `requires_cli`: Whether the agent requires a CLI tool check during initialization
|
||||||
@@ -339,7 +339,7 @@ Work within integrated development environments:
|
|||||||
|
|
||||||
### Markdown Format
|
### Markdown Format
|
||||||
|
|
||||||
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code
|
Used by: Claude, Cursor, opencode, Windsurf, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen
|
||||||
|
|
||||||
**Standard format:**
|
**Standard format:**
|
||||||
|
|
||||||
@@ -364,7 +364,7 @@ Command content with {SCRIPT} and $ARGUMENTS placeholders.
|
|||||||
|
|
||||||
### TOML Format
|
### TOML Format
|
||||||
|
|
||||||
Used by: Gemini, Qwen, Tabnine
|
Used by: Gemini, Tabnine
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
description = "Command description"
|
description = "Command description"
|
||||||
|
|||||||
45
CHANGELOG.md
45
CHANGELOG.md
@@ -7,6 +7,51 @@ Recent changes to the Specify CLI and templates are documented here.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [0.2.1] - 2026-03-11
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Added February 2026 newsletter (#1812)
|
||||||
|
- feat: add Kimi Code CLI agent support (#1790)
|
||||||
|
- docs: fix broken links in quickstart guide (#1759) (#1797)
|
||||||
|
- docs: add catalog cli help documentation (#1793) (#1794)
|
||||||
|
- fix: use quiet checkout to avoid exception on git checkout (#1792)
|
||||||
|
- feat(extensions): support .extensionignore to exclude files during install (#1781)
|
||||||
|
- feat: add Codex support for extension command registration (#1767)
|
||||||
|
- chore: bump version to 0.2.0 (#1786)
|
||||||
|
- fix: sync agent list comments with actual supported agents (#1785)
|
||||||
|
- feat(extensions): support multiple active catalogs simultaneously (#1720)
|
||||||
|
- Pavel/add tabnine cli support (#1503)
|
||||||
|
- Add Understanding extension to community catalog (#1778)
|
||||||
|
- Add ralph extension to community catalog (#1780)
|
||||||
|
- Update README with project initialization instructions (#1772)
|
||||||
|
- feat: add review extension to community catalog (#1775)
|
||||||
|
- Add fleet extension to community catalog (#1771)
|
||||||
|
- Integration of Mistral vibe support into speckit (#1725)
|
||||||
|
- fix: Remove duplicate options in specify.md (#1765)
|
||||||
|
- fix: use global branch numbering instead of per-short-name detection (#1757)
|
||||||
|
- Add Community Walkthroughs section to README (#1766)
|
||||||
|
- feat(extensions): add Jira Integration to community catalog (#1764)
|
||||||
|
- Add Azure DevOps Integration extension to community catalog (#1734)
|
||||||
|
- Fix docs: update Antigravity link and add initialization example (#1748)
|
||||||
|
- fix: wire after_tasks and after_implement hook events into command templates (#1702)
|
||||||
|
- make c ignores consistent with c++ (#1747)
|
||||||
|
- chore: bump version to 0.1.13 (#1746)
|
||||||
|
- feat: add kiro-cli and AGENT_CONFIG consistency coverage (#1690)
|
||||||
|
- feat: add verify extension to community catalog (#1726)
|
||||||
|
- Add Retrospective Extension to community catalog README table (#1741)
|
||||||
|
- fix(scripts): add empty description validation and branch checkout error handling (#1559)
|
||||||
|
- fix: correct Copilot extension command registration (#1724)
|
||||||
|
- fix(implement): remove Makefile from C ignore patterns (#1558)
|
||||||
|
- Add sync extension to community catalog (#1728)
|
||||||
|
- fix(checklist): clarify file handling behavior for append vs create (#1556)
|
||||||
|
- fix(clarify): correct conflicting question limit from 10 to 5 (#1557)
|
||||||
|
- chore: bump version to 0.1.12 (#1737)
|
||||||
|
- fix: use RELEASE_PAT so tag push triggers release workflow (#1736)
|
||||||
|
- fix: release-trigger uses release branch + PR instead of direct push to main (#1733)
|
||||||
|
- fix: Split release process to sync pyproject.toml version with git tags (#1732)
|
||||||
|
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
@@ -154,7 +154,9 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
|
|
||||||
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
- **[Greenfield Spring Boot + React platform](https://github.com/mnriem/spec-kit-spring-react-demo)** — Builds an LLM performance analytics platform (REST API, graphs, iteration tracking) from scratch using Spring Boot, embedded React, PostgreSQL, and Docker Compose, with a clarify step and a cross-artifact consistency analysis pass included.
|
||||||
|
|
||||||
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
- **[Brownfield ASP.NET CMS extension](https://github.com/mnriem/spec-kit-aspnet-brownfield-demo)** — Extends an existing open-source .NET CMS (CarrotCakeCMS-Core, ~307,000 lines of C#, Razor, SQL, JavaScript, and config files) with two new features — cross-platform Docker Compose infrastructure and a token-authenticated headless REST API — demonstrating how spec-kit fits into existing codebases without prior specs or a constitution.
|
||||||
|
|
||||||
|
- **[Brownfield Java runtime extension](https://github.com/mnriem/spec-kit-java-brownfield-demo)** — Extends an existing open-source Jakarta EE runtime (Piranha, ~420,000 lines of Java, XML, JSP, HTML, and config files across 180 Maven modules) with a password-protected Server Admin Console, demonstrating spec-kit on a large multi-module Java project with no prior specs or constitution.
|
||||||
|
|
||||||
## 🤖 Supported AI Agents
|
## 🤖 Supported AI Agents
|
||||||
|
|
||||||
@@ -181,7 +183,7 @@ See Spec-Driven Development in action across different scenarios with these comm
|
|||||||
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
| [Mistral Vibe](https://github.com/mistralai/mistral-vibe) | ✅ | |
|
||||||
| [Kimi Code](https://code.kimi.com/) | ✅ | |
|
| [Kimi Code](https://code.kimi.com/) | ✅ | |
|
||||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
| [Windsurf](https://windsurf.com/) | ✅ | |
|
||||||
| [Antigravity (agy)](https://antigravity.google/) | ✅ | |
|
| [Antigravity (agy)](https://antigravity.google/) | ✅ | Requires `--ai-skills` |
|
||||||
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
| Generic | ✅ | Bring your own agent — use `--ai generic --ai-commands-dir <path>` for unsupported agents |
|
||||||
|
|
||||||
## 🔧 Specify CLI Reference
|
## 🔧 Specify CLI Reference
|
||||||
@@ -246,7 +248,7 @@ specify init my-project --ai vibe
|
|||||||
specify init my-project --ai bob
|
specify init my-project --ai bob
|
||||||
|
|
||||||
# Initialize with Antigravity support
|
# Initialize with Antigravity support
|
||||||
specify init my-project --ai agy
|
specify init my-project --ai agy --ai-skills
|
||||||
|
|
||||||
# Initialize with an unsupported agent (generic / bring your own agent)
|
# Initialize with an unsupported agent (generic / bring your own agent)
|
||||||
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
specify init my-project --ai generic --ai-commands-dir .myagent/commands/
|
||||||
|
|||||||
@@ -76,6 +76,7 @@ The following community-contributed extensions are available in [`catalog.commun
|
|||||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||||
|
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
| Ralph Loop | Autonomous implementation loop using AI agent CLI | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
# RFC: Spec Kit Extension System
|
# RFC: Spec Kit Extension System
|
||||||
|
|
||||||
**Status**: Draft
|
**Status**: Implemented
|
||||||
**Author**: Stats Perform Engineering
|
**Author**: Stats Perform Engineering
|
||||||
**Created**: 2026-01-28
|
**Created**: 2026-01-28
|
||||||
**Updated**: 2026-01-28
|
**Updated**: 2026-03-11
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -24,8 +24,9 @@
|
|||||||
13. [Security Considerations](#security-considerations)
|
13. [Security Considerations](#security-considerations)
|
||||||
14. [Migration Strategy](#migration-strategy)
|
14. [Migration Strategy](#migration-strategy)
|
||||||
15. [Implementation Phases](#implementation-phases)
|
15. [Implementation Phases](#implementation-phases)
|
||||||
16. [Open Questions](#open-questions)
|
16. [Resolved Questions](#resolved-questions)
|
||||||
17. [Appendices](#appendices)
|
17. [Open Questions (Remaining)](#open-questions-remaining)
|
||||||
|
18. [Appendices](#appendices)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -1504,203 +1505,225 @@ AI agent registers both names, so old scripts work.
|
|||||||
|
|
||||||
## Implementation Phases
|
## Implementation Phases
|
||||||
|
|
||||||
### Phase 1: Core Extension System (Week 1-2)
|
### Phase 1: Core Extension System ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Basic extension infrastructure
|
**Goal**: Basic extension infrastructure
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Extension manifest schema (`extension.yml`)
|
- [x] Extension manifest schema (`extension.yml`)
|
||||||
- [ ] Extension directory structure
|
- [x] Extension directory structure
|
||||||
- [ ] CLI commands:
|
- [x] CLI commands:
|
||||||
- [ ] `specify extension list`
|
- [x] `specify extension list`
|
||||||
- [ ] `specify extension add` (from URL)
|
- [x] `specify extension add` (from URL and local `--dev`)
|
||||||
- [ ] `specify extension remove`
|
- [x] `specify extension remove`
|
||||||
- [ ] Extension registry (`.specify/extensions/.registry`)
|
- [x] Extension registry (`.specify/extensions/.registry`)
|
||||||
- [ ] Command registration (Claude only initially)
|
- [x] Command registration (Claude and 15+ other agents)
|
||||||
- [ ] Basic validation (manifest schema, compatibility)
|
- [x] Basic validation (manifest schema, compatibility)
|
||||||
- [ ] Documentation (extension development guide)
|
- [x] Documentation (extension development guide)
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Unit tests for manifest parsing
|
- [x] Unit tests for manifest parsing
|
||||||
- [ ] Integration test: Install dummy extension
|
- [x] Integration test: Install dummy extension
|
||||||
- [ ] Integration test: Register commands with Claude
|
- [x] Integration test: Register commands with Claude
|
||||||
|
|
||||||
### Phase 2: Jira Extension (Week 3)
|
### Phase 2: Jira Extension ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: First production extension
|
**Goal**: First production extension
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Create `spec-kit-jira` repository
|
- [x] Create `spec-kit-jira` repository
|
||||||
- [ ] Port Jira functionality to extension
|
- [x] Port Jira functionality to extension
|
||||||
- [ ] Create `jira-config.yml` template
|
- [x] Create `jira-config.yml` template
|
||||||
- [ ] Commands:
|
- [x] Commands:
|
||||||
- [ ] `specstoissues.md`
|
- [x] `specstoissues.md`
|
||||||
- [ ] `discover-fields.md`
|
- [x] `discover-fields.md`
|
||||||
- [ ] `sync-status.md`
|
- [x] `sync-status.md`
|
||||||
- [ ] Helper scripts
|
- [x] Helper scripts
|
||||||
- [ ] Documentation (README, configuration guide, examples)
|
- [x] Documentation (README, configuration guide, examples)
|
||||||
- [ ] Release v1.0.0
|
- [x] Release v3.0.0
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Test on `eng-msa-ts` project
|
- [x] Test on `eng-msa-ts` project
|
||||||
- [ ] Verify spec→Epic, phase→Story, task→Issue mapping
|
- [x] Verify spec→Epic, phase→Story, task→Issue mapping
|
||||||
- [ ] Test configuration loading and validation
|
- [x] Test configuration loading and validation
|
||||||
- [ ] Test custom field application
|
- [x] Test custom field application
|
||||||
|
|
||||||
### Phase 3: Extension Catalog (Week 4)
|
### Phase 3: Extension Catalog ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Discovery and distribution
|
**Goal**: Discovery and distribution
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Central catalog (`extensions/catalog.json` in spec-kit repo)
|
- [x] Central catalog (`extensions/catalog.json` in spec-kit repo)
|
||||||
- [ ] Catalog fetch and parsing
|
- [x] Community catalog (`extensions/catalog.community.json`)
|
||||||
- [ ] CLI commands:
|
- [x] Catalog fetch and parsing with multi-catalog support
|
||||||
- [ ] `specify extension search`
|
- [x] CLI commands:
|
||||||
- [ ] `specify extension info`
|
- [x] `specify extension search`
|
||||||
- [ ] Catalog publishing process (GitHub Action)
|
- [x] `specify extension info`
|
||||||
- [ ] Documentation (how to publish extensions)
|
- [x] `specify extension catalog list`
|
||||||
|
- [x] `specify extension catalog add`
|
||||||
|
- [x] `specify extension catalog remove`
|
||||||
|
- [x] Documentation (how to publish extensions)
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Test catalog fetch
|
- [x] Test catalog fetch
|
||||||
- [ ] Test extension search/filtering
|
- [x] Test extension search/filtering
|
||||||
- [ ] Test catalog caching
|
- [x] Test catalog caching
|
||||||
|
- [x] Test multi-catalog merge with priority
|
||||||
|
|
||||||
### Phase 4: Advanced Features (Week 5-6)
|
### Phase 4: Advanced Features ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Hooks, updates, multi-agent support
|
**Goal**: Hooks, updates, multi-agent support
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Hook system (`hooks` in extension.yml)
|
- [x] Hook system (`hooks` in extension.yml)
|
||||||
- [ ] Hook registration and execution
|
- [x] Hook registration and execution
|
||||||
- [ ] Project extensions config (`.specify/extensions.yml`)
|
- [x] Project extensions config (`.specify/extensions.yml`)
|
||||||
- [ ] CLI commands:
|
- [x] CLI commands:
|
||||||
- [ ] `specify extension update`
|
- [x] `specify extension update` (with atomic backup/restore)
|
||||||
- [ ] `specify extension enable/disable`
|
- [x] `specify extension enable/disable`
|
||||||
- [ ] Command registration for multiple agents (Gemini, Copilot)
|
- [x] Command registration for multiple agents (15+ agents including Claude, Copilot, Gemini, Cursor, etc.)
|
||||||
- [ ] Extension update notifications
|
- [x] Extension update notifications (version comparison)
|
||||||
- [ ] Configuration layer resolution (project, local, env)
|
- [x] Configuration layer resolution (project, local, env)
|
||||||
|
|
||||||
|
**Additional features implemented beyond original RFC**:
|
||||||
|
|
||||||
|
- [x] **Display name resolution**: All commands accept extension display names in addition to IDs
|
||||||
|
- [x] **Ambiguous name handling**: User-friendly tables when multiple extensions match a name
|
||||||
|
- [x] **Atomic update with rollback**: Full backup of extension dir, commands, hooks, and registry with automatic rollback on failure
|
||||||
|
- [x] **Pre-install ID validation**: Validates extension ID from ZIP before installing (security)
|
||||||
|
- [x] **Enabled state preservation**: Disabled extensions stay disabled after update
|
||||||
|
- [x] **Registry update/restore methods**: Clean API for enable/disable and rollback operations
|
||||||
|
- [x] **Catalog error fallback**: `extension info` falls back to local info when catalog unavailable
|
||||||
|
- [x] **`_install_allowed` flag**: Discovery-only catalogs can't be used for installation
|
||||||
|
- [x] **Cache invalidation**: Cache invalidated when `SPECKIT_CATALOG_URL` changes
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] Test hooks in core commands
|
- [x] Test hooks in core commands
|
||||||
- [ ] Test extension updates (preserve config)
|
- [x] Test extension updates (preserve config)
|
||||||
- [ ] Test multi-agent registration
|
- [x] Test multi-agent registration
|
||||||
|
- [x] Test atomic rollback on update failure
|
||||||
|
- [x] Test enabled state preservation
|
||||||
|
- [x] Test display name resolution
|
||||||
|
|
||||||
### Phase 5: Polish & Documentation (Week 7)
|
### Phase 5: Polish & Documentation ✅ COMPLETED
|
||||||
|
|
||||||
**Goal**: Production ready
|
**Goal**: Production ready
|
||||||
|
|
||||||
**Deliverables**:
|
**Deliverables**:
|
||||||
|
|
||||||
- [ ] Comprehensive documentation:
|
- [x] Comprehensive documentation:
|
||||||
- [ ] User guide (installing/using extensions)
|
- [x] User guide (EXTENSION-USER-GUIDE.md)
|
||||||
- [ ] Extension development guide
|
- [x] Extension development guide (EXTENSION-DEV-GUIDE.md)
|
||||||
- [ ] Extension API reference
|
- [x] Extension API reference (EXTENSION-API-REFERENCE.md)
|
||||||
- [ ] Migration guide (core → extension)
|
- [x] Error messages and validation improvements
|
||||||
- [ ] Error messages and validation improvements
|
- [x] CLI help text updates
|
||||||
- [ ] CLI help text updates
|
|
||||||
- [ ] Example extension template (cookiecutter)
|
|
||||||
- [ ] Blog post / announcement
|
|
||||||
- [ ] Video tutorial
|
|
||||||
|
|
||||||
**Testing**:
|
**Testing**:
|
||||||
|
|
||||||
- [ ] End-to-end testing on multiple projects
|
- [x] End-to-end testing on multiple projects
|
||||||
- [ ] Community beta testing
|
- [x] 163 unit tests passing
|
||||||
- [ ] Performance testing (large projects)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Open Questions
|
## Resolved Questions
|
||||||
|
|
||||||
### 1. Extension Namespace
|
The following questions from the original RFC have been resolved during implementation:
|
||||||
|
|
||||||
|
### 1. Extension Namespace ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Should extension commands use namespace prefix?
|
**Question**: Should extension commands use namespace prefix?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option C** - Both prefixed and aliases are supported. Commands use `speckit.{extension}.{command}` as canonical name, with optional aliases defined in manifest.
|
||||||
|
|
||||||
- A) Prefixed: `/speckit.jira.specstoissues` (explicit, avoids conflicts)
|
**Implementation**: The `aliases` field in `extension.yml` allows extensions to register additional command names.
|
||||||
- B) Short alias: `/jira.specstoissues` (shorter, less verbose)
|
|
||||||
- C) Both: Register both names, prefer prefixed in docs
|
|
||||||
|
|
||||||
**Recommendation**: C (both), prefixed is canonical
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 2. Config File Location
|
### 2. Config File Location ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Where should extension configs live?
|
**Question**: Where should extension configs live?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - Extension directory (`.specify/extensions/{ext-id}/{ext-id}-config.yml`). This keeps extensions self-contained and easier to manage.
|
||||||
|
|
||||||
- A) Extension directory: `.specify/extensions/jira/jira-config.yml` (encapsulated)
|
**Implementation**: Each extension has its own config file within its directory, with layered resolution (defaults → project → local → env vars).
|
||||||
- B) Root level: `.specify/jira-config.yml` (more visible)
|
|
||||||
- C) Unified: `.specify/extensions.yml` (all extension configs in one file)
|
|
||||||
|
|
||||||
**Recommendation**: A (extension directory), cleaner separation
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 3. Command File Format
|
### 3. Command File Format ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Should extensions use universal format or agent-specific?
|
**Question**: Should extensions use universal format or agent-specific?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - Universal Markdown format. Extensions write commands once, CLI converts to agent-specific format during registration.
|
||||||
|
|
||||||
- A) Universal Markdown: Extensions write once, CLI converts per-agent
|
**Implementation**: `CommandRegistrar` class handles conversion to 15+ agent formats (Claude, Copilot, Gemini, Cursor, etc.).
|
||||||
- B) Agent-specific: Extensions provide separate files for each agent
|
|
||||||
- C) Hybrid: Universal default, agent-specific overrides
|
|
||||||
|
|
||||||
**Recommendation**: A (universal), reduces duplication
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 4. Hook Execution Model
|
### 4. Hook Execution Model ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: How should hooks execute?
|
**Question**: How should hooks execute?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - Hooks are registered in `.specify/extensions.yml` and executed by the AI agent when it sees the hook trigger. Hook state (enabled/disabled) is managed per-extension.
|
||||||
|
|
||||||
- A) AI agent interprets: Core commands output `EXECUTE_COMMAND: name`
|
**Implementation**: `HookExecutor` class manages hook registration and state in `extensions.yml`.
|
||||||
- B) CLI executes: Core commands call `specify extension hook after_tasks`
|
|
||||||
- C) Agent built-in: Extension system built into AI agent (Claude SDK)
|
|
||||||
|
|
||||||
**Recommendation**: A initially (simpler), move to C long-term
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 5. Extension Distribution
|
### 5. Extension Distribution ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: How should extensions be packaged?
|
**Question**: How should extensions be packaged?
|
||||||
|
|
||||||
**Options**:
|
**Decision**: **Option A** - ZIP archives downloaded from GitHub releases (via catalog `download_url`). Local development uses `--dev` flag with directory path.
|
||||||
|
|
||||||
- A) ZIP archives: Downloaded from GitHub releases
|
**Implementation**: `ExtensionManager.install_from_zip()` handles ZIP extraction and validation.
|
||||||
- B) Git repos: Cloned directly (`git clone`)
|
|
||||||
- C) Python packages: Installable via `uv tool install`
|
|
||||||
|
|
||||||
**Recommendation**: A (ZIP), simpler for non-Python extensions in future
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 6. Multi-Version Support
|
### 6. Multi-Version Support ✅ RESOLVED
|
||||||
|
|
||||||
**Question**: Can multiple versions of same extension coexist?
|
**Question**: Can multiple versions of same extension coexist?
|
||||||
|
|
||||||
|
**Decision**: **Option A** - Single version only. Updates replace the existing version with atomic rollback on failure.
|
||||||
|
|
||||||
|
**Implementation**: `extension update` performs atomic backup/restore to ensure safe updates.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Open Questions (Remaining)
|
||||||
|
|
||||||
|
### 1. Sandboxing / Permissions (Future)
|
||||||
|
|
||||||
|
**Question**: Should extensions declare required permissions?
|
||||||
|
|
||||||
**Options**:
|
**Options**:
|
||||||
|
|
||||||
- A) Single version: Only one version installed at a time
|
- A) No sandboxing (current): Extensions run with same privileges as AI agent
|
||||||
- B) Multi-version: Side-by-side versions (`.specify/extensions/jira@1.0/`, `.specify/extensions/jira@2.0/`)
|
- B) Permission declarations: Extensions declare `filesystem:read`, `network:external`, etc.
|
||||||
- C) Per-branch: Different branches use different versions
|
- C) Opt-in sandboxing: Organizations can enable permission enforcement
|
||||||
|
|
||||||
**Recommendation**: A initially (simpler), consider B in future if needed
|
**Status**: Deferred to future version. Currently using trust-based model where users trust extension authors.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Package Signatures (Future)
|
||||||
|
|
||||||
|
**Question**: Should extensions be cryptographically signed?
|
||||||
|
|
||||||
|
**Options**:
|
||||||
|
|
||||||
|
- A) No signatures (current): Trust based on catalog source
|
||||||
|
- B) GPG/Sigstore signatures: Verify package integrity
|
||||||
|
- C) Catalog-level verification: Catalog maintainers verify packages
|
||||||
|
|
||||||
|
**Status**: Deferred to future version. `checksum` field is available in catalog schema but not enforced.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"schema_version": "1.0",
|
"schema_version": "1.0",
|
||||||
"updated_at": "2026-03-09T00:00:00Z",
|
"updated_at": "2026-03-13T12:00:00Z",
|
||||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||||
"extensions": {
|
"extensions": {
|
||||||
"azure-devops": {
|
"azure-devops": {
|
||||||
@@ -74,6 +74,37 @@
|
|||||||
"created_at": "2026-02-22T00:00:00Z",
|
"created_at": "2026-02-22T00:00:00Z",
|
||||||
"updated_at": "2026-02-22T00:00:00Z"
|
"updated_at": "2026-02-22T00:00:00Z"
|
||||||
},
|
},
|
||||||
|
"doctor": {
|
||||||
|
"name": "Project Health Check",
|
||||||
|
"id": "doctor",
|
||||||
|
"description": "Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git.",
|
||||||
|
"author": "KhawarHabibKhan",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-doctor/archive/refs/tags/v1.0.0.zip",
|
||||||
|
"repository": "https://github.com/KhawarHabibKhan/spec-kit-doctor",
|
||||||
|
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-doctor",
|
||||||
|
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-doctor/blob/main/README.md",
|
||||||
|
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-doctor/blob/main/CHANGELOG.md",
|
||||||
|
"license": "MIT",
|
||||||
|
"requires": {
|
||||||
|
"speckit_version": ">=0.1.0"
|
||||||
|
},
|
||||||
|
"provides": {
|
||||||
|
"commands": 1,
|
||||||
|
"hooks": 0
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"diagnostics",
|
||||||
|
"health-check",
|
||||||
|
"validation",
|
||||||
|
"project-structure"
|
||||||
|
],
|
||||||
|
"verified": false,
|
||||||
|
"downloads": 0,
|
||||||
|
"stars": 0,
|
||||||
|
"created_at": "2026-03-13T00:00:00Z",
|
||||||
|
"updated_at": "2026-03-13T00:00:00Z"
|
||||||
|
},
|
||||||
"fleet": {
|
"fleet": {
|
||||||
"name": "Fleet Orchestrator",
|
"name": "Fleet Orchestrator",
|
||||||
"id": "fleet",
|
"id": "fleet",
|
||||||
|
|||||||
@@ -1,6 +1,21 @@
|
|||||||
{
|
{
|
||||||
"schema_version": "1.0",
|
"schema_version": "1.0",
|
||||||
"updated_at": "2026-02-03T00:00:00Z",
|
"updated_at": "2026-03-10T00:00:00Z",
|
||||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json",
|
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json",
|
||||||
"extensions": {}
|
"extensions": {
|
||||||
}
|
"selftest": {
|
||||||
|
"name": "Spec Kit Self-Test Utility",
|
||||||
|
"id": "selftest",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "Verifies catalog extensions by programmatically walking through the discovery, installation, and registration lifecycle.",
|
||||||
|
"author": "spec-kit-core",
|
||||||
|
"repository": "https://github.com/github/spec-kit",
|
||||||
|
"download_url": "https://github.com/github/spec-kit/releases/download/selftest-v1.0.0/selftest.zip",
|
||||||
|
"tags": [
|
||||||
|
"testing",
|
||||||
|
"core",
|
||||||
|
"utility"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
69
extensions/selftest/commands/selftest.md
Normal file
69
extensions/selftest/commands/selftest.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
---
|
||||||
|
description: "Validate the lifecycle of an extension from the catalog."
|
||||||
|
---
|
||||||
|
|
||||||
|
# Extension Self-Test: `$ARGUMENTS`
|
||||||
|
|
||||||
|
This command drives a self-test simulating the developer experience with the `$ARGUMENTS` extension.
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Validate the end-to-end lifecycle (discovery, installation, registration) for the extension: `$ARGUMENTS`.
|
||||||
|
If `$ARGUMENTS` is empty, you must tell the user to provide an extension name, for example: `/speckit.selftest.extension linear`.
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
### Step 1: Catalog Discovery Validation
|
||||||
|
|
||||||
|
Check if the extension exists in the Spec Kit catalog.
|
||||||
|
Execute this command and verify that it completes successfully and that the returned extension ID exactly matches `$ARGUMENTS`. If the command fails or the ID does not match `$ARGUMENTS`, fail the test.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify extension info "$ARGUMENTS"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Simulate Installation
|
||||||
|
|
||||||
|
First, try to add the extension to the current workspace configuration directly. If the catalog provides the extension as `install_allowed: false` (discovery-only), this step is *expected* to fail.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify extension add "$ARGUMENTS"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, simulate adding the extension by installing it from its catalog download URL, which should bypass the restriction.
|
||||||
|
Obtain the extension's `download_url` from the catalog metadata (for example, via a catalog info command or UI), then run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
specify extension add "$ARGUMENTS" --from "<download_url>"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Registration Verification
|
||||||
|
|
||||||
|
Once the `add` command completes, verify the installation by checking the project configuration.
|
||||||
|
Use terminal tools (like `cat`) to verify that the following file contains a record for `$ARGUMENTS`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat .specify/extensions/.registry/$ARGUMENTS.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Verification Report
|
||||||
|
|
||||||
|
Analyze the standard output of the three steps.
|
||||||
|
Generate a terminal-style test output format detailing the results of discovery, installation, and registration. Return this directly to the user.
|
||||||
|
|
||||||
|
Example output format:
|
||||||
|
```text
|
||||||
|
============================= test session starts ==============================
|
||||||
|
collected 3 items
|
||||||
|
|
||||||
|
test_selftest_discovery.py::test_catalog_search [PASS/FAIL]
|
||||||
|
Details: [Provide execution result of specify extension search]
|
||||||
|
|
||||||
|
test_selftest_installation.py::test_extension_add [PASS/FAIL]
|
||||||
|
Details: [Provide execution result of specify extension add]
|
||||||
|
|
||||||
|
test_selftest_registration.py::test_config_verification [PASS/FAIL]
|
||||||
|
Details: [Provide execution result of registry record verification]
|
||||||
|
|
||||||
|
============================== [X] passed in ... ==============================
|
||||||
|
```
|
||||||
16
extensions/selftest/extension.yml
Normal file
16
extensions/selftest/extension.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
schema_version: "1.0"
|
||||||
|
extension:
|
||||||
|
id: selftest
|
||||||
|
name: Spec Kit Self-Test Utility
|
||||||
|
version: 1.0.0
|
||||||
|
description: Verifies catalog extensions by programmatically walking through the discovery, installation, and registration lifecycle.
|
||||||
|
author: spec-kit-core
|
||||||
|
repository: https://github.com/github/spec-kit
|
||||||
|
license: MIT
|
||||||
|
requires:
|
||||||
|
speckit_version: ">=0.2.0"
|
||||||
|
provides:
|
||||||
|
commands:
|
||||||
|
- name: speckit.selftest.extension
|
||||||
|
file: commands/selftest.md
|
||||||
|
description: Validate the lifecycle of an extension from the catalog.
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "specify-cli"
|
name = "specify-cli"
|
||||||
version = "0.2.0"
|
version = "0.2.1"
|
||||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||||
requires-python = ">=3.11"
|
requires-python = ">=3.11"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
|||||||
@@ -79,15 +79,28 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get feature paths and validate branch
|
# Get feature paths and validate branch
|
||||||
eval $(get_feature_paths)
|
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
||||||
|
eval "$_paths_output"
|
||||||
|
unset _paths_output
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||||
|
|
||||||
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
|
||||||
if $PATHS_ONLY; then
|
if $PATHS_ONLY; then
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
# Minimal JSON paths payload (no validation performed)
|
# Minimal JSON paths payload (no validation performed)
|
||||||
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
if has_jq; then
|
||||||
"$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS"
|
jq -cn \
|
||||||
|
--arg repo_root "$REPO_ROOT" \
|
||||||
|
--arg branch "$CURRENT_BRANCH" \
|
||||||
|
--arg feature_dir "$FEATURE_DIR" \
|
||||||
|
--arg feature_spec "$FEATURE_SPEC" \
|
||||||
|
--arg impl_plan "$IMPL_PLAN" \
|
||||||
|
--arg tasks "$TASKS" \
|
||||||
|
'{REPO_ROOT:$repo_root,BRANCH:$branch,FEATURE_DIR:$feature_dir,FEATURE_SPEC:$feature_spec,IMPL_PLAN:$impl_plan,TASKS:$tasks}'
|
||||||
|
else
|
||||||
|
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
|
||||||
|
"$(json_escape "$REPO_ROOT")" "$(json_escape "$CURRENT_BRANCH")" "$(json_escape "$FEATURE_DIR")" "$(json_escape "$FEATURE_SPEC")" "$(json_escape "$IMPL_PLAN")" "$(json_escape "$TASKS")"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "REPO_ROOT: $REPO_ROOT"
|
echo "REPO_ROOT: $REPO_ROOT"
|
||||||
echo "BRANCH: $CURRENT_BRANCH"
|
echo "BRANCH: $CURRENT_BRANCH"
|
||||||
@@ -141,14 +154,25 @@ fi
|
|||||||
# Output results
|
# Output results
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
# Build JSON array of documents
|
# Build JSON array of documents
|
||||||
if [[ ${#docs[@]} -eq 0 ]]; then
|
if has_jq; then
|
||||||
json_docs="[]"
|
if [[ ${#docs[@]} -eq 0 ]]; then
|
||||||
|
json_docs="[]"
|
||||||
|
else
|
||||||
|
json_docs=$(printf '%s\n' "${docs[@]}" | jq -R . | jq -s .)
|
||||||
|
fi
|
||||||
|
jq -cn \
|
||||||
|
--arg feature_dir "$FEATURE_DIR" \
|
||||||
|
--argjson docs "$json_docs" \
|
||||||
|
'{FEATURE_DIR:$feature_dir,AVAILABLE_DOCS:$docs}'
|
||||||
else
|
else
|
||||||
json_docs=$(printf '"%s",' "${docs[@]}")
|
if [[ ${#docs[@]} -eq 0 ]]; then
|
||||||
json_docs="[${json_docs%,}]"
|
json_docs="[]"
|
||||||
|
else
|
||||||
|
json_docs=$(printf '"%s",' "${docs[@]}")
|
||||||
|
json_docs="[${json_docs%,}]"
|
||||||
|
fi
|
||||||
|
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$(json_escape "$FEATURE_DIR")" "$json_docs"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
|
|
||||||
else
|
else
|
||||||
# Text output
|
# Text output
|
||||||
echo "FEATURE_DIR:$FEATURE_DIR"
|
echo "FEATURE_DIR:$FEATURE_DIR"
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ find_feature_dir_by_prefix() {
|
|||||||
# Multiple matches - this shouldn't happen with proper naming convention
|
# Multiple matches - this shouldn't happen with proper naming convention
|
||||||
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
||||||
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
||||||
echo "$specs_dir/$branch_name" # Return something to avoid breaking the script
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,21 +134,42 @@ get_feature_paths() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Use prefix-based lookup to support multiple branches per spec
|
# Use prefix-based lookup to support multiple branches per spec
|
||||||
local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch")
|
local feature_dir
|
||||||
|
if ! feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch"); then
|
||||||
|
echo "ERROR: Failed to resolve feature directory" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
cat <<EOF
|
# Use printf '%q' to safely quote values, preventing shell injection
|
||||||
REPO_ROOT='$repo_root'
|
# via crafted branch names or paths containing special characters
|
||||||
CURRENT_BRANCH='$current_branch'
|
printf 'REPO_ROOT=%q\n' "$repo_root"
|
||||||
HAS_GIT='$has_git_repo'
|
printf 'CURRENT_BRANCH=%q\n' "$current_branch"
|
||||||
FEATURE_DIR='$feature_dir'
|
printf 'HAS_GIT=%q\n' "$has_git_repo"
|
||||||
FEATURE_SPEC='$feature_dir/spec.md'
|
printf 'FEATURE_DIR=%q\n' "$feature_dir"
|
||||||
IMPL_PLAN='$feature_dir/plan.md'
|
printf 'FEATURE_SPEC=%q\n' "$feature_dir/spec.md"
|
||||||
TASKS='$feature_dir/tasks.md'
|
printf 'IMPL_PLAN=%q\n' "$feature_dir/plan.md"
|
||||||
RESEARCH='$feature_dir/research.md'
|
printf 'TASKS=%q\n' "$feature_dir/tasks.md"
|
||||||
DATA_MODEL='$feature_dir/data-model.md'
|
printf 'RESEARCH=%q\n' "$feature_dir/research.md"
|
||||||
QUICKSTART='$feature_dir/quickstart.md'
|
printf 'DATA_MODEL=%q\n' "$feature_dir/data-model.md"
|
||||||
CONTRACTS_DIR='$feature_dir/contracts'
|
printf 'QUICKSTART=%q\n' "$feature_dir/quickstart.md"
|
||||||
EOF
|
printf 'CONTRACTS_DIR=%q\n' "$feature_dir/contracts"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if jq is available for safe JSON construction
|
||||||
|
has_jq() {
|
||||||
|
command -v jq >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
||||||
|
# Handles backslash, double-quote, and control characters (newline, tab, carriage return).
|
||||||
|
json_escape() {
|
||||||
|
local s="$1"
|
||||||
|
s="${s//\\/\\\\}"
|
||||||
|
s="${s//\"/\\\"}"
|
||||||
|
s="${s//$'\n'/\\n}"
|
||||||
|
s="${s//$'\t'/\\t}"
|
||||||
|
s="${s//$'\r'/\\r}"
|
||||||
|
printf '%s' "$s"
|
||||||
}
|
}
|
||||||
|
|
||||||
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||||
|
|||||||
@@ -162,6 +162,17 @@ clean_branch_name() {
|
|||||||
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
|
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Escape a string for safe embedding in a JSON value (fallback when jq is unavailable).
|
||||||
|
json_escape() {
|
||||||
|
local s="$1"
|
||||||
|
s="${s//\\/\\\\}"
|
||||||
|
s="${s//\"/\\\"}"
|
||||||
|
s="${s//$'\n'/\\n}"
|
||||||
|
s="${s//$'\t'/\\t}"
|
||||||
|
s="${s//$'\r'/\\r}"
|
||||||
|
printf '%s' "$s"
|
||||||
|
}
|
||||||
|
|
||||||
# Resolve repository root. Prefer git information when available, but fall back
|
# Resolve repository root. Prefer git information when available, but fall back
|
||||||
# to searching for repository markers so the workflow still functions in repositories that
|
# to searching for repository markers so the workflow still functions in repositories that
|
||||||
# were initialised with --no-git.
|
# were initialised with --no-git.
|
||||||
@@ -300,14 +311,22 @@ TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md"
|
|||||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||||
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
|
||||||
|
|
||||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
# Inform the user how to persist the feature variable in their own shell
|
||||||
export SPECIFY_FEATURE="$BRANCH_NAME"
|
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
|
||||||
|
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
|
if command -v jq >/dev/null 2>&1; then
|
||||||
|
jq -cn \
|
||||||
|
--arg branch_name "$BRANCH_NAME" \
|
||||||
|
--arg spec_file "$SPEC_FILE" \
|
||||||
|
--arg feature_num "$FEATURE_NUM" \
|
||||||
|
'{BRANCH_NAME:$branch_name,SPEC_FILE:$spec_file,FEATURE_NUM:$feature_num}'
|
||||||
|
else
|
||||||
|
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$(json_escape "$BRANCH_NAME")" "$(json_escape "$SPEC_FILE")" "$(json_escape "$FEATURE_NUM")"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "BRANCH_NAME: $BRANCH_NAME"
|
echo "BRANCH_NAME: $BRANCH_NAME"
|
||||||
echo "SPEC_FILE: $SPEC_FILE"
|
echo "SPEC_FILE: $SPEC_FILE"
|
||||||
echo "FEATURE_NUM: $FEATURE_NUM"
|
echo "FEATURE_NUM: $FEATURE_NUM"
|
||||||
echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME"
|
printf '# To persist in your shell: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -28,7 +28,9 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
# Get all paths and variables from common functions
|
||||||
eval $(get_feature_paths)
|
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
||||||
|
eval "$_paths_output"
|
||||||
|
unset _paths_output
|
||||||
|
|
||||||
# Check if we're on a proper feature branch (only for git repos)
|
# Check if we're on a proper feature branch (only for git repos)
|
||||||
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
|
||||||
@@ -49,8 +51,18 @@ fi
|
|||||||
|
|
||||||
# Output results
|
# Output results
|
||||||
if $JSON_MODE; then
|
if $JSON_MODE; then
|
||||||
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
if has_jq; then
|
||||||
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT"
|
jq -cn \
|
||||||
|
--arg feature_spec "$FEATURE_SPEC" \
|
||||||
|
--arg impl_plan "$IMPL_PLAN" \
|
||||||
|
--arg specs_dir "$FEATURE_DIR" \
|
||||||
|
--arg branch "$CURRENT_BRANCH" \
|
||||||
|
--arg has_git "$HAS_GIT" \
|
||||||
|
'{FEATURE_SPEC:$feature_spec,IMPL_PLAN:$impl_plan,SPECS_DIR:$specs_dir,BRANCH:$branch,HAS_GIT:$has_git}'
|
||||||
|
else
|
||||||
|
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
|
||||||
|
"$(json_escape "$FEATURE_SPEC")" "$(json_escape "$IMPL_PLAN")" "$(json_escape "$FEATURE_DIR")" "$(json_escape "$CURRENT_BRANCH")" "$(json_escape "$HAS_GIT")"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
echo "FEATURE_SPEC: $FEATURE_SPEC"
|
||||||
echo "IMPL_PLAN: $IMPL_PLAN"
|
echo "IMPL_PLAN: $IMPL_PLAN"
|
||||||
|
|||||||
@@ -53,7 +53,9 @@ SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
source "$SCRIPT_DIR/common.sh"
|
source "$SCRIPT_DIR/common.sh"
|
||||||
|
|
||||||
# Get all paths and variables from common functions
|
# Get all paths and variables from common functions
|
||||||
eval $(get_feature_paths)
|
_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; }
|
||||||
|
eval "$_paths_output"
|
||||||
|
unset _paths_output
|
||||||
|
|
||||||
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
|
||||||
AGENT_TYPE="${1:-}"
|
AGENT_TYPE="${1:-}"
|
||||||
@@ -71,12 +73,14 @@ AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
|
|||||||
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
|
||||||
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
|
||||||
QODER_FILE="$REPO_ROOT/QODER.md"
|
QODER_FILE="$REPO_ROOT/QODER.md"
|
||||||
AMP_FILE="$REPO_ROOT/AGENTS.md"
|
# AMP, Kiro CLI, and IBM Bob all share AGENTS.md — use AGENTS_FILE to avoid
|
||||||
|
# updating the same file multiple times.
|
||||||
|
AMP_FILE="$AGENTS_FILE"
|
||||||
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
SHAI_FILE="$REPO_ROOT/SHAI.md"
|
||||||
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
TABNINE_FILE="$REPO_ROOT/TABNINE.md"
|
||||||
KIRO_FILE="$REPO_ROOT/AGENTS.md"
|
KIRO_FILE="$AGENTS_FILE"
|
||||||
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
AGY_FILE="$REPO_ROOT/.agent/rules/specify-rules.md"
|
||||||
BOB_FILE="$REPO_ROOT/AGENTS.md"
|
BOB_FILE="$AGENTS_FILE"
|
||||||
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
VIBE_FILE="$REPO_ROOT/.vibe/agents/specify-agents.md"
|
||||||
KIMI_FILE="$REPO_ROOT/KIMI.md"
|
KIMI_FILE="$REPO_ROOT/KIMI.md"
|
||||||
|
|
||||||
@@ -112,6 +116,8 @@ log_warning() {
|
|||||||
# Cleanup function for temporary files
|
# Cleanup function for temporary files
|
||||||
cleanup() {
|
cleanup() {
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
# Disarm traps to prevent re-entrant loop
|
||||||
|
trap - EXIT INT TERM
|
||||||
rm -f /tmp/agent_update_*_$$
|
rm -f /tmp/agent_update_*_$$
|
||||||
rm -f /tmp/manual_additions_$$
|
rm -f /tmp/manual_additions_$$
|
||||||
exit $exit_code
|
exit $exit_code
|
||||||
@@ -476,7 +482,7 @@ update_existing_agent_file() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Update timestamp
|
# Update timestamp
|
||||||
if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
if [[ "$line" =~ (\*\*)?Last\ updated(\*\*)?:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
|
||||||
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
|
||||||
else
|
else
|
||||||
echo "$line" >> "$temp_file"
|
echo "$line" >> "$temp_file"
|
||||||
@@ -607,67 +613,67 @@ update_specific_agent() {
|
|||||||
|
|
||||||
case "$agent_type" in
|
case "$agent_type" in
|
||||||
claude)
|
claude)
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
||||||
;;
|
;;
|
||||||
gemini)
|
gemini)
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
update_agent_file "$GEMINI_FILE" "Gemini CLI" || return 1
|
||||||
;;
|
;;
|
||||||
copilot)
|
copilot)
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
update_agent_file "$COPILOT_FILE" "GitHub Copilot" || return 1
|
||||||
;;
|
;;
|
||||||
cursor-agent)
|
cursor-agent)
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
update_agent_file "$CURSOR_FILE" "Cursor IDE" || return 1
|
||||||
;;
|
;;
|
||||||
qwen)
|
qwen)
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code"
|
update_agent_file "$QWEN_FILE" "Qwen Code" || return 1
|
||||||
;;
|
;;
|
||||||
opencode)
|
opencode)
|
||||||
update_agent_file "$AGENTS_FILE" "opencode"
|
update_agent_file "$AGENTS_FILE" "opencode" || return 1
|
||||||
;;
|
;;
|
||||||
codex)
|
codex)
|
||||||
update_agent_file "$AGENTS_FILE" "Codex CLI"
|
update_agent_file "$AGENTS_FILE" "Codex CLI" || return 1
|
||||||
;;
|
;;
|
||||||
windsurf)
|
windsurf)
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
update_agent_file "$WINDSURF_FILE" "Windsurf" || return 1
|
||||||
;;
|
;;
|
||||||
kilocode)
|
kilocode)
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
update_agent_file "$KILOCODE_FILE" "Kilo Code" || return 1
|
||||||
;;
|
;;
|
||||||
auggie)
|
auggie)
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
update_agent_file "$AUGGIE_FILE" "Auggie CLI" || return 1
|
||||||
;;
|
;;
|
||||||
roo)
|
roo)
|
||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
update_agent_file "$ROO_FILE" "Roo Code" || return 1
|
||||||
;;
|
;;
|
||||||
codebuddy)
|
codebuddy)
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI" || return 1
|
||||||
;;
|
;;
|
||||||
qodercli)
|
qodercli)
|
||||||
update_agent_file "$QODER_FILE" "Qoder CLI"
|
update_agent_file "$QODER_FILE" "Qoder CLI" || return 1
|
||||||
;;
|
;;
|
||||||
amp)
|
amp)
|
||||||
update_agent_file "$AMP_FILE" "Amp"
|
update_agent_file "$AMP_FILE" "Amp" || return 1
|
||||||
;;
|
;;
|
||||||
shai)
|
shai)
|
||||||
update_agent_file "$SHAI_FILE" "SHAI"
|
update_agent_file "$SHAI_FILE" "SHAI" || return 1
|
||||||
;;
|
;;
|
||||||
tabnine)
|
tabnine)
|
||||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
update_agent_file "$TABNINE_FILE" "Tabnine CLI" || return 1
|
||||||
;;
|
;;
|
||||||
kiro-cli)
|
kiro-cli)
|
||||||
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
update_agent_file "$KIRO_FILE" "Kiro CLI" || return 1
|
||||||
;;
|
;;
|
||||||
agy)
|
agy)
|
||||||
update_agent_file "$AGY_FILE" "Antigravity"
|
update_agent_file "$AGY_FILE" "Antigravity" || return 1
|
||||||
;;
|
;;
|
||||||
bob)
|
bob)
|
||||||
update_agent_file "$BOB_FILE" "IBM Bob"
|
update_agent_file "$BOB_FILE" "IBM Bob" || return 1
|
||||||
;;
|
;;
|
||||||
vibe)
|
vibe)
|
||||||
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
update_agent_file "$VIBE_FILE" "Mistral Vibe" || return 1
|
||||||
;;
|
;;
|
||||||
kimi)
|
kimi)
|
||||||
update_agent_file "$KIMI_FILE" "Kimi Code"
|
update_agent_file "$KIMI_FILE" "Kimi Code" || return 1
|
||||||
;;
|
;;
|
||||||
generic)
|
generic)
|
||||||
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
log_info "Generic agent: no predefined context file. Use the agent-specific update script for your agent."
|
||||||
@@ -682,106 +688,53 @@ update_specific_agent() {
|
|||||||
|
|
||||||
update_all_existing_agents() {
|
update_all_existing_agents() {
|
||||||
local found_agent=false
|
local found_agent=false
|
||||||
|
local _updated_paths=()
|
||||||
# Check each possible agent file and update if it exists
|
|
||||||
if [[ -f "$CLAUDE_FILE" ]]; then
|
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$GEMINI_FILE" ]]; then
|
|
||||||
update_agent_file "$GEMINI_FILE" "Gemini CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$COPILOT_FILE" ]]; then
|
|
||||||
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$CURSOR_FILE" ]]; then
|
|
||||||
update_agent_file "$CURSOR_FILE" "Cursor IDE"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$QWEN_FILE" ]]; then
|
|
||||||
update_agent_file "$QWEN_FILE" "Qwen Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AGENTS_FILE" ]]; then
|
|
||||||
update_agent_file "$AGENTS_FILE" "Codex/opencode"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$WINDSURF_FILE" ]]; then
|
|
||||||
update_agent_file "$WINDSURF_FILE" "Windsurf"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KILOCODE_FILE" ]]; then
|
|
||||||
update_agent_file "$KILOCODE_FILE" "Kilo Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AUGGIE_FILE" ]]; then
|
# Helper: skip non-existent files and files already updated (dedup by
|
||||||
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
|
# realpath so that variables pointing to the same file — e.g. AMP_FILE,
|
||||||
|
# KIRO_FILE, BOB_FILE all resolving to AGENTS_FILE — are only written once).
|
||||||
|
# Uses a linear array instead of associative array for bash 3.2 compatibility.
|
||||||
|
update_if_new() {
|
||||||
|
local file="$1" name="$2"
|
||||||
|
[[ -f "$file" ]] || return 0
|
||||||
|
local real_path
|
||||||
|
real_path=$(realpath "$file" 2>/dev/null || echo "$file")
|
||||||
|
local p
|
||||||
|
if [[ ${#_updated_paths[@]} -gt 0 ]]; then
|
||||||
|
for p in "${_updated_paths[@]}"; do
|
||||||
|
[[ "$p" == "$real_path" ]] && return 0
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
update_agent_file "$file" "$name" || return 1
|
||||||
|
_updated_paths+=("$real_path")
|
||||||
found_agent=true
|
found_agent=true
|
||||||
fi
|
}
|
||||||
|
|
||||||
if [[ -f "$ROO_FILE" ]]; then
|
|
||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$CODEBUDDY_FILE" ]]; then
|
update_if_new "$CLAUDE_FILE" "Claude Code"
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
update_if_new "$GEMINI_FILE" "Gemini CLI"
|
||||||
found_agent=true
|
update_if_new "$COPILOT_FILE" "GitHub Copilot"
|
||||||
fi
|
update_if_new "$CURSOR_FILE" "Cursor IDE"
|
||||||
|
update_if_new "$QWEN_FILE" "Qwen Code"
|
||||||
|
update_if_new "$AGENTS_FILE" "Codex/opencode"
|
||||||
|
update_if_new "$AMP_FILE" "Amp"
|
||||||
|
update_if_new "$KIRO_FILE" "Kiro CLI"
|
||||||
|
update_if_new "$BOB_FILE" "IBM Bob"
|
||||||
|
update_if_new "$WINDSURF_FILE" "Windsurf"
|
||||||
|
update_if_new "$KILOCODE_FILE" "Kilo Code"
|
||||||
|
update_if_new "$AUGGIE_FILE" "Auggie CLI"
|
||||||
|
update_if_new "$ROO_FILE" "Roo Code"
|
||||||
|
update_if_new "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||||
|
update_if_new "$SHAI_FILE" "SHAI"
|
||||||
|
update_if_new "$TABNINE_FILE" "Tabnine CLI"
|
||||||
|
update_if_new "$QODER_FILE" "Qoder CLI"
|
||||||
|
update_if_new "$AGY_FILE" "Antigravity"
|
||||||
|
update_if_new "$VIBE_FILE" "Mistral Vibe"
|
||||||
|
update_if_new "$KIMI_FILE" "Kimi Code"
|
||||||
|
|
||||||
if [[ -f "$SHAI_FILE" ]]; then
|
|
||||||
update_agent_file "$SHAI_FILE" "SHAI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$TABNINE_FILE" ]]; then
|
|
||||||
update_agent_file "$TABNINE_FILE" "Tabnine CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$QODER_FILE" ]]; then
|
|
||||||
update_agent_file "$QODER_FILE" "Qoder CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KIRO_FILE" ]]; then
|
|
||||||
update_agent_file "$KIRO_FILE" "Kiro CLI"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$AGY_FILE" ]]; then
|
|
||||||
update_agent_file "$AGY_FILE" "Antigravity"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
if [[ -f "$BOB_FILE" ]]; then
|
|
||||||
update_agent_file "$BOB_FILE" "IBM Bob"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$VIBE_FILE" ]]; then
|
|
||||||
update_agent_file "$VIBE_FILE" "Mistral Vibe"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f "$KIMI_FILE" ]]; then
|
|
||||||
update_agent_file "$KIMI_FILE" "Kimi Code"
|
|
||||||
found_agent=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If no agent files exist, create a default Claude file
|
# If no agent files exist, create a default Claude file
|
||||||
if [[ "$found_agent" == false ]]; then
|
if [[ "$found_agent" == false ]]; then
|
||||||
log_info "No existing agent files found, creating default Claude file..."
|
log_info "No existing agent files found, creating default Claude file..."
|
||||||
update_agent_file "$CLAUDE_FILE" "Claude Code"
|
update_agent_file "$CLAUDE_FILE" "Claude Code" || return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
print_summary() {
|
print_summary() {
|
||||||
|
|||||||
@@ -331,7 +331,7 @@ function Update-ExistingAgentFile {
|
|||||||
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') {
|
if ($line -match '(\*\*)?Last updated(\*\*)?: .*\d{4}-\d{2}-\d{2}') {
|
||||||
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -12,6 +12,7 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
import zipfile
|
import zipfile
|
||||||
import shutil
|
import shutil
|
||||||
|
import copy
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional, Dict, List, Any, Callable, Set
|
from typing import Optional, Dict, List, Any, Callable, Set
|
||||||
@@ -228,6 +229,54 @@ class ExtensionRegistry:
|
|||||||
}
|
}
|
||||||
self._save()
|
self._save()
|
||||||
|
|
||||||
|
def update(self, extension_id: str, metadata: dict):
|
||||||
|
"""Update extension metadata in registry, merging with existing entry.
|
||||||
|
|
||||||
|
Merges the provided metadata with the existing entry, preserving any
|
||||||
|
fields not specified in the new metadata. The installed_at timestamp
|
||||||
|
is always preserved from the original entry.
|
||||||
|
|
||||||
|
Use this method instead of add() when updating existing extension
|
||||||
|
metadata (e.g., enabling/disabling) to preserve the original
|
||||||
|
installation timestamp and other existing fields.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extension_id: Extension ID
|
||||||
|
metadata: Extension metadata fields to update (merged with existing)
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
KeyError: If extension is not installed
|
||||||
|
"""
|
||||||
|
if extension_id not in self.data["extensions"]:
|
||||||
|
raise KeyError(f"Extension '{extension_id}' is not installed")
|
||||||
|
# Merge new metadata with existing, preserving original installed_at
|
||||||
|
existing = self.data["extensions"][extension_id]
|
||||||
|
# Merge: existing fields preserved, new fields override
|
||||||
|
merged = {**existing, **metadata}
|
||||||
|
# Always preserve original installed_at based on key existence, not truthiness,
|
||||||
|
# to handle cases where the field exists but may be falsy (legacy/corruption)
|
||||||
|
if "installed_at" in existing:
|
||||||
|
merged["installed_at"] = existing["installed_at"]
|
||||||
|
else:
|
||||||
|
# If not present in existing, explicitly remove from merged if caller provided it
|
||||||
|
merged.pop("installed_at", None)
|
||||||
|
self.data["extensions"][extension_id] = merged
|
||||||
|
self._save()
|
||||||
|
|
||||||
|
def restore(self, extension_id: str, metadata: dict):
|
||||||
|
"""Restore extension metadata to registry without modifying timestamps.
|
||||||
|
|
||||||
|
Use this method for rollback scenarios where you have a complete backup
|
||||||
|
of the registry entry (including installed_at) and want to restore it
|
||||||
|
exactly as it was.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extension_id: Extension ID
|
||||||
|
metadata: Complete extension metadata including installed_at
|
||||||
|
"""
|
||||||
|
self.data["extensions"][extension_id] = dict(metadata)
|
||||||
|
self._save()
|
||||||
|
|
||||||
def remove(self, extension_id: str):
|
def remove(self, extension_id: str):
|
||||||
"""Remove extension from registry.
|
"""Remove extension from registry.
|
||||||
|
|
||||||
@@ -241,21 +290,28 @@ class ExtensionRegistry:
|
|||||||
def get(self, extension_id: str) -> Optional[dict]:
|
def get(self, extension_id: str) -> Optional[dict]:
|
||||||
"""Get extension metadata from registry.
|
"""Get extension metadata from registry.
|
||||||
|
|
||||||
|
Returns a deep copy to prevent callers from accidentally mutating
|
||||||
|
nested internal registry state without going through the write path.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
extension_id: Extension ID
|
extension_id: Extension ID
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Extension metadata or None if not found
|
Deep copy of extension metadata, or None if not found
|
||||||
"""
|
"""
|
||||||
return self.data["extensions"].get(extension_id)
|
entry = self.data["extensions"].get(extension_id)
|
||||||
|
return copy.deepcopy(entry) if entry is not None else None
|
||||||
|
|
||||||
def list(self) -> Dict[str, dict]:
|
def list(self) -> Dict[str, dict]:
|
||||||
"""Get all installed extensions.
|
"""Get all installed extensions.
|
||||||
|
|
||||||
|
Returns a deep copy of the extensions mapping to prevent callers
|
||||||
|
from accidentally mutating nested internal registry state.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary of extension_id -> metadata
|
Dictionary of extension_id -> metadata (deep copies)
|
||||||
"""
|
"""
|
||||||
return self.data["extensions"]
|
return copy.deepcopy(self.data["extensions"])
|
||||||
|
|
||||||
def is_installed(self, extension_id: str) -> bool:
|
def is_installed(self, extension_id: str) -> bool:
|
||||||
"""Check if extension is installed.
|
"""Check if extension is installed.
|
||||||
@@ -600,7 +656,7 @@ class ExtensionManager:
|
|||||||
result.append({
|
result.append({
|
||||||
"id": ext_id,
|
"id": ext_id,
|
||||||
"name": manifest.name,
|
"name": manifest.name,
|
||||||
"version": metadata["version"],
|
"version": metadata.get("version", "unknown"),
|
||||||
"description": manifest.description,
|
"description": manifest.description,
|
||||||
"enabled": metadata.get("enabled", True),
|
"enabled": metadata.get("enabled", True),
|
||||||
"installed_at": metadata.get("installed_at"),
|
"installed_at": metadata.get("installed_at"),
|
||||||
@@ -692,9 +748,9 @@ class CommandRegistrar:
|
|||||||
},
|
},
|
||||||
"qwen": {
|
"qwen": {
|
||||||
"dir": ".qwen/commands",
|
"dir": ".qwen/commands",
|
||||||
"format": "toml",
|
"format": "markdown",
|
||||||
"args": "{{args}}",
|
"args": "$ARGUMENTS",
|
||||||
"extension": ".toml"
|
"extension": ".md"
|
||||||
},
|
},
|
||||||
"opencode": {
|
"opencode": {
|
||||||
"dir": ".opencode/command",
|
"dir": ".opencode/command",
|
||||||
@@ -1112,12 +1168,13 @@ class ExtensionCatalog:
|
|||||||
config_path: Path to extension-catalogs.yml
|
config_path: Path to extension-catalogs.yml
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Ordered list of CatalogEntry objects, or None if file doesn't exist
|
Ordered list of CatalogEntry objects, or None if file doesn't exist.
|
||||||
or contains no valid catalog entries.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValidationError: If any catalog entry has an invalid URL,
|
ValidationError: If any catalog entry has an invalid URL,
|
||||||
the file cannot be parsed, or a priority value is invalid.
|
the file cannot be parsed, a priority value is invalid,
|
||||||
|
or the file exists but contains no valid catalog entries
|
||||||
|
(fail-closed for security).
|
||||||
"""
|
"""
|
||||||
if not config_path.exists():
|
if not config_path.exists():
|
||||||
return None
|
return None
|
||||||
@@ -1129,12 +1186,17 @@ class ExtensionCatalog:
|
|||||||
)
|
)
|
||||||
catalogs_data = data.get("catalogs", [])
|
catalogs_data = data.get("catalogs", [])
|
||||||
if not catalogs_data:
|
if not catalogs_data:
|
||||||
return None
|
# File exists but has no catalogs key or empty list - fail closed
|
||||||
|
raise ValidationError(
|
||||||
|
f"Catalog config {config_path} exists but contains no 'catalogs' entries. "
|
||||||
|
f"Remove the file to use built-in defaults, or add valid catalog entries."
|
||||||
|
)
|
||||||
if not isinstance(catalogs_data, list):
|
if not isinstance(catalogs_data, list):
|
||||||
raise ValidationError(
|
raise ValidationError(
|
||||||
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
|
||||||
)
|
)
|
||||||
entries: List[CatalogEntry] = []
|
entries: List[CatalogEntry] = []
|
||||||
|
skipped_entries: List[int] = []
|
||||||
for idx, item in enumerate(catalogs_data):
|
for idx, item in enumerate(catalogs_data):
|
||||||
if not isinstance(item, dict):
|
if not isinstance(item, dict):
|
||||||
raise ValidationError(
|
raise ValidationError(
|
||||||
@@ -1142,6 +1204,7 @@ class ExtensionCatalog:
|
|||||||
)
|
)
|
||||||
url = str(item.get("url", "")).strip()
|
url = str(item.get("url", "")).strip()
|
||||||
if not url:
|
if not url:
|
||||||
|
skipped_entries.append(idx)
|
||||||
continue
|
continue
|
||||||
self._validate_catalog_url(url)
|
self._validate_catalog_url(url)
|
||||||
try:
|
try:
|
||||||
@@ -1164,7 +1227,14 @@ class ExtensionCatalog:
|
|||||||
description=str(item.get("description", "")),
|
description=str(item.get("description", "")),
|
||||||
))
|
))
|
||||||
entries.sort(key=lambda e: e.priority)
|
entries.sort(key=lambda e: e.priority)
|
||||||
return entries if entries else None
|
if not entries:
|
||||||
|
# All entries were invalid (missing URLs) - fail closed for security
|
||||||
|
raise ValidationError(
|
||||||
|
f"Catalog config {config_path} contains {len(catalogs_data)} entries but none have valid URLs "
|
||||||
|
f"(entries at indices {skipped_entries} were skipped). "
|
||||||
|
f"Each catalog entry must have a 'url' field."
|
||||||
|
)
|
||||||
|
return entries
|
||||||
|
|
||||||
def get_active_catalogs(self) -> List[CatalogEntry]:
|
def get_active_catalogs(self) -> List[CatalogEntry]:
|
||||||
"""Get the ordered list of active catalogs.
|
"""Get the ordered list of active catalogs.
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
c. **Handle Validation Results**:
|
c. **Handle Validation Results**:
|
||||||
|
|
||||||
- **If all items pass**: Mark checklist complete and proceed to step 6
|
- **If all items pass**: Mark checklist complete and proceed to step 7
|
||||||
|
|
||||||
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
||||||
1. List the failing items and specific issues
|
1. List the failing items and specific issues
|
||||||
@@ -178,8 +178,6 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||||
|
|
||||||
## General Guidelines
|
|
||||||
|
|
||||||
## Quick Guidelines
|
## Quick Guidelines
|
||||||
|
|
||||||
- Focus on **WHAT** users need and **WHY**.
|
- Focus on **WHAT** users need and **WHY**.
|
||||||
|
|||||||
@@ -62,7 +62,14 @@ class TestAgentConfigConsistency:
|
|||||||
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
ps_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.ps1").read_text(encoding="utf-8")
|
||||||
|
|
||||||
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
assert re.search(r"'shai'\s*\{.*?\.shai/commands", ps_text, re.S) is not None
|
||||||
assert re.search(r"'agy'\s*\{.*?\.agent/workflows", ps_text, re.S) is not None
|
assert re.search(r"'agy'\s*\{.*?\.agent/commands", ps_text, re.S) is not None
|
||||||
|
|
||||||
|
def test_release_sh_switch_has_shai_and_agy_generation(self):
|
||||||
|
"""Bash release builder must generate files for shai and agy agents."""
|
||||||
|
sh_text = (REPO_ROOT / ".github" / "workflows" / "scripts" / "create-release-packages.sh").read_text(encoding="utf-8")
|
||||||
|
|
||||||
|
assert re.search(r"shai\)\s*\n.*?\.shai/commands", sh_text, re.S) is not None
|
||||||
|
assert re.search(r"agy\)\s*\n.*?\.agent/commands", sh_text, re.S) is not None
|
||||||
|
|
||||||
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
def test_init_ai_help_includes_roo_and_kiro_alias(self):
|
||||||
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
"""CLI help text for --ai should stay in sync with agent config and alias guidance."""
|
||||||
|
|||||||
@@ -132,6 +132,16 @@ def commands_dir_gemini(project_dir):
|
|||||||
return cmd_dir
|
return cmd_dir
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def commands_dir_qwen(project_dir):
|
||||||
|
"""Create a populated .qwen/commands directory (Markdown format)."""
|
||||||
|
cmd_dir = project_dir / ".qwen" / "commands"
|
||||||
|
cmd_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
for name in ["speckit.specify.md", "speckit.plan.md", "speckit.tasks.md"]:
|
||||||
|
(cmd_dir / name).write_text(f"# {name}\nContent here\n")
|
||||||
|
return cmd_dir
|
||||||
|
|
||||||
|
|
||||||
# ===== _get_skills_dir Tests =====
|
# ===== _get_skills_dir Tests =====
|
||||||
|
|
||||||
class TestGetSkillsDir:
|
class TestGetSkillsDir:
|
||||||
@@ -390,6 +400,28 @@ class TestInstallAiSkills:
|
|||||||
# .toml commands should be untouched
|
# .toml commands should be untouched
|
||||||
assert (cmds_dir / "speckit.specify.toml").exists()
|
assert (cmds_dir / "speckit.specify.toml").exists()
|
||||||
|
|
||||||
|
def test_qwen_md_commands_dir_installs_skills(self, project_dir):
|
||||||
|
"""Qwen now uses Markdown format; skills should install directly from .qwen/commands/."""
|
||||||
|
cmds_dir = project_dir / ".qwen" / "commands"
|
||||||
|
cmds_dir.mkdir(parents=True)
|
||||||
|
(cmds_dir / "speckit.specify.md").write_text(
|
||||||
|
"---\ndescription: Create or update the feature specification.\n---\n\n# Specify\n\nBody.\n"
|
||||||
|
)
|
||||||
|
(cmds_dir / "speckit.plan.md").write_text(
|
||||||
|
"---\ndescription: Generate implementation plan.\n---\n\n# Plan\n\nBody.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = install_ai_skills(project_dir, "qwen")
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
skills_dir = project_dir / ".qwen" / "skills"
|
||||||
|
assert skills_dir.exists()
|
||||||
|
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||||
|
assert len(skill_dirs) >= 1
|
||||||
|
# .md commands should be untouched
|
||||||
|
assert (cmds_dir / "speckit.specify.md").exists()
|
||||||
|
assert (cmds_dir / "speckit.plan.md").exists()
|
||||||
|
|
||||||
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
@pytest.mark.parametrize("agent_key", [k for k in AGENT_CONFIG.keys() if k != "generic"])
|
||||||
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
def test_skills_install_for_all_agents(self, temp_dir, agent_key):
|
||||||
"""install_ai_skills should produce skills for every configured agent."""
|
"""install_ai_skills should produce skills for every configured agent."""
|
||||||
@@ -446,6 +478,15 @@ class TestCommandCoexistence:
|
|||||||
remaining = list(commands_dir_gemini.glob("speckit.*"))
|
remaining = list(commands_dir_gemini.glob("speckit.*"))
|
||||||
assert len(remaining) == 3
|
assert len(remaining) == 3
|
||||||
|
|
||||||
|
def test_existing_commands_preserved_qwen(self, project_dir, templates_dir, commands_dir_qwen):
|
||||||
|
"""install_ai_skills must NOT remove pre-existing .qwen/commands files."""
|
||||||
|
assert len(list(commands_dir_qwen.glob("speckit.*"))) == 3
|
||||||
|
|
||||||
|
install_ai_skills(project_dir, "qwen")
|
||||||
|
|
||||||
|
remaining = list(commands_dir_qwen.glob("speckit.*"))
|
||||||
|
assert len(remaining) == 3
|
||||||
|
|
||||||
def test_commands_dir_not_removed(self, project_dir, templates_dir, commands_dir_claude):
|
def test_commands_dir_not_removed(self, project_dir, templates_dir, commands_dir_claude):
|
||||||
"""install_ai_skills must not remove the commands directory."""
|
"""install_ai_skills must not remove the commands directory."""
|
||||||
install_ai_skills(project_dir, "claude")
|
install_ai_skills(project_dir, "claude")
|
||||||
@@ -661,6 +702,59 @@ class TestCliValidation:
|
|||||||
assert "Usage:" in result.output
|
assert "Usage:" in result.output
|
||||||
assert "--ai" in result.output
|
assert "--ai" in result.output
|
||||||
|
|
||||||
|
def test_agy_without_ai_skills_fails(self):
|
||||||
|
"""--ai agy without --ai-skills should fail with exit code 1."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
result = runner.invoke(app, ["init", "test-proj", "--ai", "agy"])
|
||||||
|
|
||||||
|
assert result.exit_code == 1
|
||||||
|
assert "Explicit command support was deprecated in Antigravity version 1.20.5." in result.output
|
||||||
|
assert "--ai-skills" in result.output
|
||||||
|
|
||||||
|
def test_interactive_agy_without_ai_skills_prompts_skills(self, monkeypatch):
|
||||||
|
"""Interactive selector returning agy without --ai-skills should automatically enable --ai-skills."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
|
||||||
|
# Mock select_with_arrows to simulate the user picking 'agy' for AI,
|
||||||
|
# and return a deterministic default for any other prompts to avoid
|
||||||
|
# calling the real interactive implementation.
|
||||||
|
def _fake_select_with_arrows(*args, **kwargs):
|
||||||
|
options = kwargs.get("options")
|
||||||
|
if options is None and len(args) >= 1:
|
||||||
|
options = args[0]
|
||||||
|
|
||||||
|
# If the options include 'agy', simulate selecting it.
|
||||||
|
if isinstance(options, dict) and "agy" in options:
|
||||||
|
return "agy"
|
||||||
|
if isinstance(options, (list, tuple)) and "agy" in options:
|
||||||
|
return "agy"
|
||||||
|
|
||||||
|
# For any other prompt, return a deterministic, non-interactive default:
|
||||||
|
# pick the first option if available.
|
||||||
|
if isinstance(options, dict) and options:
|
||||||
|
return next(iter(options.keys()))
|
||||||
|
if isinstance(options, (list, tuple)) and options:
|
||||||
|
return options[0]
|
||||||
|
|
||||||
|
# If no options are provided, fall back to None (should not occur in normal use).
|
||||||
|
return None
|
||||||
|
|
||||||
|
monkeypatch.setattr("specify_cli.select_with_arrows", _fake_select_with_arrows)
|
||||||
|
|
||||||
|
# Mock download_and_extract_template to prevent real HTTP downloads during testing
|
||||||
|
monkeypatch.setattr("specify_cli.download_and_extract_template", lambda *args, **kwargs: None)
|
||||||
|
# We need to bypass the `git init` step, wait, it has `--no-git` by default in tests maybe?
|
||||||
|
runner = CliRunner()
|
||||||
|
# Create temp dir to avoid directory already exists errors or whatever
|
||||||
|
with runner.isolated_filesystem():
|
||||||
|
result = runner.invoke(app, ["init", "test-proj", "--no-git"])
|
||||||
|
|
||||||
|
# Interactive selection should NOT raise the deprecation error!
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "Explicit command support was deprecated" not in result.output
|
||||||
|
|
||||||
def test_ai_skills_flag_appears_in_help(self):
|
def test_ai_skills_flag_appears_in_help(self):
|
||||||
"""--ai-skills should appear in init --help output."""
|
"""--ai-skills should appear in init --help output."""
|
||||||
from typer.testing import CliRunner
|
from typer.testing import CliRunner
|
||||||
|
|||||||
@@ -277,6 +277,135 @@ class TestExtensionRegistry:
|
|||||||
assert registry2.is_installed("test-ext")
|
assert registry2.is_installed("test-ext")
|
||||||
assert registry2.get("test-ext")["version"] == "1.0.0"
|
assert registry2.get("test-ext")["version"] == "1.0.0"
|
||||||
|
|
||||||
|
def test_update_preserves_installed_at(self, temp_dir):
|
||||||
|
"""Test that update() preserves the original installed_at timestamp."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {"version": "1.0.0", "enabled": True})
|
||||||
|
|
||||||
|
# Get original installed_at
|
||||||
|
original_data = registry.get("test-ext")
|
||||||
|
original_installed_at = original_data["installed_at"]
|
||||||
|
|
||||||
|
# Update with new metadata
|
||||||
|
registry.update("test-ext", {"version": "2.0.0", "enabled": False})
|
||||||
|
|
||||||
|
# Verify installed_at is preserved
|
||||||
|
updated_data = registry.get("test-ext")
|
||||||
|
assert updated_data["installed_at"] == original_installed_at
|
||||||
|
assert updated_data["version"] == "2.0.0"
|
||||||
|
assert updated_data["enabled"] is False
|
||||||
|
|
||||||
|
def test_update_merges_with_existing(self, temp_dir):
|
||||||
|
"""Test that update() merges new metadata with existing fields."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"enabled": True,
|
||||||
|
"registered_commands": {"claude": ["cmd1", "cmd2"]},
|
||||||
|
})
|
||||||
|
|
||||||
|
# Update with partial metadata (only enabled field)
|
||||||
|
registry.update("test-ext", {"enabled": False})
|
||||||
|
|
||||||
|
# Verify existing fields are preserved
|
||||||
|
updated_data = registry.get("test-ext")
|
||||||
|
assert updated_data["enabled"] is False
|
||||||
|
assert updated_data["version"] == "1.0.0" # Preserved
|
||||||
|
assert updated_data["registered_commands"] == {"claude": ["cmd1", "cmd2"]} # Preserved
|
||||||
|
|
||||||
|
def test_update_raises_for_missing_extension(self, temp_dir):
|
||||||
|
"""Test that update() raises KeyError for non-installed extension."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
|
||||||
|
with pytest.raises(KeyError, match="not installed"):
|
||||||
|
registry.update("nonexistent-ext", {"enabled": False})
|
||||||
|
|
||||||
|
def test_restore_overwrites_completely(self, temp_dir):
|
||||||
|
"""Test that restore() overwrites the registry entry completely."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {"version": "2.0.0", "enabled": True})
|
||||||
|
|
||||||
|
# Restore with complete backup data
|
||||||
|
backup_data = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"enabled": False,
|
||||||
|
"installed_at": "2024-01-01T00:00:00+00:00",
|
||||||
|
"registered_commands": {"claude": ["old-cmd"]},
|
||||||
|
}
|
||||||
|
registry.restore("test-ext", backup_data)
|
||||||
|
|
||||||
|
# Verify entry is exactly as restored
|
||||||
|
restored_data = registry.get("test-ext")
|
||||||
|
assert restored_data == backup_data
|
||||||
|
|
||||||
|
def test_restore_can_recreate_removed_entry(self, temp_dir):
|
||||||
|
"""Test that restore() can recreate an entry after remove()."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
registry.add("test-ext", {"version": "1.0.0"})
|
||||||
|
|
||||||
|
# Save backup and remove
|
||||||
|
backup = registry.get("test-ext").copy()
|
||||||
|
registry.remove("test-ext")
|
||||||
|
assert not registry.is_installed("test-ext")
|
||||||
|
|
||||||
|
# Restore should recreate the entry
|
||||||
|
registry.restore("test-ext", backup)
|
||||||
|
assert registry.is_installed("test-ext")
|
||||||
|
assert registry.get("test-ext")["version"] == "1.0.0"
|
||||||
|
|
||||||
|
def test_get_returns_deep_copy(self, temp_dir):
|
||||||
|
"""Test that get() returns deep copies for nested structures."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
metadata = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"registered_commands": {"claude": ["cmd1"]},
|
||||||
|
}
|
||||||
|
registry.add("test-ext", metadata)
|
||||||
|
|
||||||
|
fetched = registry.get("test-ext")
|
||||||
|
fetched["registered_commands"]["claude"].append("cmd2")
|
||||||
|
|
||||||
|
# Internal registry must remain unchanged.
|
||||||
|
internal = registry.data["extensions"]["test-ext"]
|
||||||
|
assert internal["registered_commands"] == {"claude": ["cmd1"]}
|
||||||
|
|
||||||
|
def test_list_returns_deep_copy(self, temp_dir):
|
||||||
|
"""Test that list() returns deep copies for nested structures."""
|
||||||
|
extensions_dir = temp_dir / "extensions"
|
||||||
|
extensions_dir.mkdir()
|
||||||
|
|
||||||
|
registry = ExtensionRegistry(extensions_dir)
|
||||||
|
metadata = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"registered_commands": {"claude": ["cmd1"]},
|
||||||
|
}
|
||||||
|
registry.add("test-ext", metadata)
|
||||||
|
|
||||||
|
listed = registry.list()
|
||||||
|
listed["test-ext"]["registered_commands"]["claude"].append("cmd2")
|
||||||
|
|
||||||
|
# Internal registry must remain unchanged.
|
||||||
|
internal = registry.data["extensions"]["test-ext"]
|
||||||
|
assert internal["registered_commands"] == {"claude": ["cmd1"]}
|
||||||
|
|
||||||
|
|
||||||
# ===== ExtensionManager Tests =====
|
# ===== ExtensionManager Tests =====
|
||||||
|
|
||||||
@@ -412,6 +541,15 @@ class TestCommandRegistrar:
|
|||||||
assert "codex" in CommandRegistrar.AGENT_CONFIGS
|
assert "codex" in CommandRegistrar.AGENT_CONFIGS
|
||||||
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".codex/prompts"
|
assert CommandRegistrar.AGENT_CONFIGS["codex"]["dir"] == ".codex/prompts"
|
||||||
|
|
||||||
|
def test_qwen_agent_config_is_markdown(self):
|
||||||
|
"""Qwen should use Markdown format with $ARGUMENTS (not TOML)."""
|
||||||
|
assert "qwen" in CommandRegistrar.AGENT_CONFIGS
|
||||||
|
cfg = CommandRegistrar.AGENT_CONFIGS["qwen"]
|
||||||
|
assert cfg["dir"] == ".qwen/commands"
|
||||||
|
assert cfg["format"] == "markdown"
|
||||||
|
assert cfg["args"] == "$ARGUMENTS"
|
||||||
|
assert cfg["extension"] == ".md"
|
||||||
|
|
||||||
def test_parse_frontmatter_valid(self):
|
def test_parse_frontmatter_valid(self):
|
||||||
"""Test parsing valid YAML frontmatter."""
|
"""Test parsing valid YAML frontmatter."""
|
||||||
content = """---
|
content = """---
|
||||||
@@ -1402,8 +1540,8 @@ class TestCatalogStack:
|
|||||||
with pytest.raises(ValidationError, match="HTTPS"):
|
with pytest.raises(ValidationError, match="HTTPS"):
|
||||||
catalog.get_active_catalogs()
|
catalog.get_active_catalogs()
|
||||||
|
|
||||||
def test_empty_project_config_falls_back_to_defaults(self, temp_dir):
|
def test_empty_project_config_raises_error(self, temp_dir):
|
||||||
"""Empty catalogs list in config falls back to default stack."""
|
"""Empty catalogs list in config raises ValidationError (fail-closed for security)."""
|
||||||
import yaml as yaml_module
|
import yaml as yaml_module
|
||||||
|
|
||||||
project_dir = self._make_project(temp_dir)
|
project_dir = self._make_project(temp_dir)
|
||||||
@@ -1412,11 +1550,32 @@ class TestCatalogStack:
|
|||||||
yaml_module.dump({"catalogs": []}, f)
|
yaml_module.dump({"catalogs": []}, f)
|
||||||
|
|
||||||
catalog = ExtensionCatalog(project_dir)
|
catalog = ExtensionCatalog(project_dir)
|
||||||
entries = catalog.get_active_catalogs()
|
|
||||||
|
|
||||||
# Falls back to default stack
|
# Fail-closed: empty config should raise, not fall back to defaults
|
||||||
assert len(entries) == 2
|
with pytest.raises(ValidationError) as exc_info:
|
||||||
assert entries[0].url == ExtensionCatalog.DEFAULT_CATALOG_URL
|
catalog.get_active_catalogs()
|
||||||
|
assert "contains no 'catalogs' entries" in str(exc_info.value)
|
||||||
|
|
||||||
|
def test_catalog_entries_without_urls_raises_error(self, temp_dir):
|
||||||
|
"""Catalog entries without URLs raise ValidationError (fail-closed for security)."""
|
||||||
|
import yaml as yaml_module
|
||||||
|
|
||||||
|
project_dir = self._make_project(temp_dir)
|
||||||
|
config_path = project_dir / ".specify" / "extension-catalogs.yml"
|
||||||
|
with open(config_path, "w") as f:
|
||||||
|
yaml_module.dump({
|
||||||
|
"catalogs": [
|
||||||
|
{"name": "no-url-catalog", "priority": 1},
|
||||||
|
{"name": "another-no-url", "description": "Also missing URL"},
|
||||||
|
]
|
||||||
|
}, f)
|
||||||
|
|
||||||
|
catalog = ExtensionCatalog(project_dir)
|
||||||
|
|
||||||
|
# Fail-closed: entries without URLs should raise, not fall back to defaults
|
||||||
|
with pytest.raises(ValidationError) as exc_info:
|
||||||
|
catalog.get_active_catalogs()
|
||||||
|
assert "none have valid URLs" in str(exc_info.value)
|
||||||
|
|
||||||
# --- _load_catalog_config ---
|
# --- _load_catalog_config ---
|
||||||
|
|
||||||
@@ -1943,3 +2102,238 @@ class TestExtensionIgnore:
|
|||||||
assert not (dest / "docs" / "guide.md").exists()
|
assert not (dest / "docs" / "guide.md").exists()
|
||||||
assert not (dest / "docs" / "internal.md").exists()
|
assert not (dest / "docs" / "internal.md").exists()
|
||||||
assert (dest / "docs" / "api.md").exists()
|
assert (dest / "docs" / "api.md").exists()
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtensionAddCLI:
|
||||||
|
"""CLI integration tests for extension add command."""
|
||||||
|
|
||||||
|
def test_add_by_display_name_uses_resolved_id_for_download(self, tmp_path):
|
||||||
|
"""extension add by display name should use resolved ID for download_extension()."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
from specify_cli import app
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
|
||||||
|
# Create project structure
|
||||||
|
project_dir = tmp_path / "test-project"
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / ".specify").mkdir()
|
||||||
|
(project_dir / ".specify" / "extensions").mkdir(parents=True)
|
||||||
|
|
||||||
|
# Mock catalog that returns extension by display name
|
||||||
|
mock_catalog = MagicMock()
|
||||||
|
mock_catalog.get_extension_info.return_value = None # ID lookup fails
|
||||||
|
mock_catalog.search.return_value = [
|
||||||
|
{
|
||||||
|
"id": "acme-jira-integration",
|
||||||
|
"name": "Jira Integration",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "Jira integration extension",
|
||||||
|
"_install_allowed": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Track what ID was passed to download_extension
|
||||||
|
download_called_with = []
|
||||||
|
def mock_download(extension_id):
|
||||||
|
download_called_with.append(extension_id)
|
||||||
|
# Return a path that will fail install (we just want to verify the ID)
|
||||||
|
raise ExtensionError("Mock download - checking ID was resolved")
|
||||||
|
|
||||||
|
mock_catalog.download_extension.side_effect = mock_download
|
||||||
|
|
||||||
|
with patch("specify_cli.extensions.ExtensionCatalog", return_value=mock_catalog), \
|
||||||
|
patch.object(Path, "cwd", return_value=project_dir):
|
||||||
|
result = runner.invoke(
|
||||||
|
app,
|
||||||
|
["extension", "add", "Jira Integration"],
|
||||||
|
catch_exceptions=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.exit_code != 0, (
|
||||||
|
f"Expected non-zero exit code since mock download raises, got {result.exit_code}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify download_extension was called with the resolved ID, not the display name
|
||||||
|
assert len(download_called_with) == 1
|
||||||
|
assert download_called_with[0] == "acme-jira-integration", (
|
||||||
|
f"Expected download_extension to be called with resolved ID 'acme-jira-integration', "
|
||||||
|
f"but was called with '{download_called_with[0]}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtensionUpdateCLI:
|
||||||
|
"""CLI integration tests for extension update command."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_extension_source(base_dir: Path, version: str, include_config: bool = False) -> Path:
|
||||||
|
"""Create a minimal extension source directory for install tests."""
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
ext_dir = base_dir / f"test-ext-{version}"
|
||||||
|
ext_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"extension": {
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": version,
|
||||||
|
"description": "A test extension",
|
||||||
|
},
|
||||||
|
"requires": {"speckit_version": ">=0.1.0"},
|
||||||
|
"provides": {
|
||||||
|
"commands": [
|
||||||
|
{
|
||||||
|
"name": "speckit.test.hello",
|
||||||
|
"file": "commands/hello.md",
|
||||||
|
"description": "Test command",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hooks": {
|
||||||
|
"after_tasks": {
|
||||||
|
"command": "speckit.test.hello",
|
||||||
|
"optional": True,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
(ext_dir / "extension.yml").write_text(yaml.dump(manifest, sort_keys=False))
|
||||||
|
commands_dir = ext_dir / "commands"
|
||||||
|
commands_dir.mkdir(exist_ok=True)
|
||||||
|
(commands_dir / "hello.md").write_text("---\ndescription: Test\n---\n\n$ARGUMENTS\n")
|
||||||
|
if include_config:
|
||||||
|
(ext_dir / "linear-config.yml").write_text("custom: true\nvalue: original\n")
|
||||||
|
return ext_dir
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_catalog_zip(zip_path: Path, version: str):
|
||||||
|
"""Create a minimal ZIP that passes extension_update ID validation."""
|
||||||
|
import zipfile
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
"schema_version": "1.0",
|
||||||
|
"extension": {
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": version,
|
||||||
|
"description": "A test extension",
|
||||||
|
},
|
||||||
|
"requires": {"speckit_version": ">=0.1.0"},
|
||||||
|
"provides": {"commands": [{"name": "speckit.test.hello", "file": "commands/hello.md"}]},
|
||||||
|
}
|
||||||
|
|
||||||
|
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||||
|
zf.writestr("extension.yml", yaml.dump(manifest, sort_keys=False))
|
||||||
|
|
||||||
|
def test_update_success_preserves_installed_at(self, tmp_path):
|
||||||
|
"""Successful update should keep original installed_at and apply new version."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
from unittest.mock import patch
|
||||||
|
from specify_cli import app
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
project_dir = tmp_path / "project"
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / ".specify").mkdir()
|
||||||
|
(project_dir / ".claude" / "commands").mkdir(parents=True)
|
||||||
|
|
||||||
|
manager = ExtensionManager(project_dir)
|
||||||
|
v1_dir = self._create_extension_source(tmp_path, "1.0.0", include_config=True)
|
||||||
|
manager.install_from_directory(v1_dir, "0.1.0")
|
||||||
|
original_installed_at = manager.registry.get("test-ext")["installed_at"]
|
||||||
|
original_config_content = (
|
||||||
|
project_dir / ".specify" / "extensions" / "test-ext" / "linear-config.yml"
|
||||||
|
).read_text()
|
||||||
|
|
||||||
|
zip_path = tmp_path / "test-ext-update.zip"
|
||||||
|
self._create_catalog_zip(zip_path, "2.0.0")
|
||||||
|
v2_dir = self._create_extension_source(tmp_path, "2.0.0")
|
||||||
|
|
||||||
|
def fake_install_from_zip(self_obj, _zip_path, speckit_version):
|
||||||
|
return self_obj.install_from_directory(v2_dir, speckit_version)
|
||||||
|
|
||||||
|
with patch.object(Path, "cwd", return_value=project_dir), \
|
||||||
|
patch.object(ExtensionCatalog, "get_extension_info", return_value={
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": "2.0.0",
|
||||||
|
"_install_allowed": True,
|
||||||
|
}), \
|
||||||
|
patch.object(ExtensionCatalog, "download_extension", return_value=zip_path), \
|
||||||
|
patch.object(ExtensionManager, "install_from_zip", fake_install_from_zip):
|
||||||
|
result = runner.invoke(app, ["extension", "update", "test-ext"], input="y\n", catch_exceptions=True)
|
||||||
|
|
||||||
|
assert result.exit_code == 0, result.output
|
||||||
|
|
||||||
|
updated = ExtensionManager(project_dir).registry.get("test-ext")
|
||||||
|
assert updated["version"] == "2.0.0"
|
||||||
|
assert updated["installed_at"] == original_installed_at
|
||||||
|
restored_config_content = (
|
||||||
|
project_dir / ".specify" / "extensions" / "test-ext" / "linear-config.yml"
|
||||||
|
).read_text()
|
||||||
|
assert restored_config_content == original_config_content
|
||||||
|
|
||||||
|
def test_update_failure_rolls_back_registry_hooks_and_commands(self, tmp_path):
|
||||||
|
"""Failed update should restore original registry, hooks, and command files."""
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
from unittest.mock import patch
|
||||||
|
from specify_cli import app
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
project_dir = tmp_path / "project"
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / ".specify").mkdir()
|
||||||
|
(project_dir / ".claude" / "commands").mkdir(parents=True)
|
||||||
|
|
||||||
|
manager = ExtensionManager(project_dir)
|
||||||
|
v1_dir = self._create_extension_source(tmp_path, "1.0.0")
|
||||||
|
manager.install_from_directory(v1_dir, "0.1.0")
|
||||||
|
|
||||||
|
backup_registry_entry = manager.registry.get("test-ext")
|
||||||
|
hooks_before = yaml.safe_load((project_dir / ".specify" / "extensions.yml").read_text())
|
||||||
|
|
||||||
|
registered_commands = backup_registry_entry.get("registered_commands", {})
|
||||||
|
command_files = []
|
||||||
|
registrar = CommandRegistrar()
|
||||||
|
for agent_name, cmd_names in registered_commands.items():
|
||||||
|
if agent_name not in registrar.AGENT_CONFIGS:
|
||||||
|
continue
|
||||||
|
agent_cfg = registrar.AGENT_CONFIGS[agent_name]
|
||||||
|
commands_dir = project_dir / agent_cfg["dir"]
|
||||||
|
for cmd_name in cmd_names:
|
||||||
|
cmd_path = commands_dir / f"{cmd_name}{agent_cfg['extension']}"
|
||||||
|
command_files.append(cmd_path)
|
||||||
|
|
||||||
|
assert command_files, "Expected at least one registered command file"
|
||||||
|
for cmd_file in command_files:
|
||||||
|
assert cmd_file.exists(), f"Expected command file to exist before update: {cmd_file}"
|
||||||
|
|
||||||
|
zip_path = tmp_path / "test-ext-update.zip"
|
||||||
|
self._create_catalog_zip(zip_path, "2.0.0")
|
||||||
|
|
||||||
|
with patch.object(Path, "cwd", return_value=project_dir), \
|
||||||
|
patch.object(ExtensionCatalog, "get_extension_info", return_value={
|
||||||
|
"id": "test-ext",
|
||||||
|
"name": "Test Extension",
|
||||||
|
"version": "2.0.0",
|
||||||
|
"_install_allowed": True,
|
||||||
|
}), \
|
||||||
|
patch.object(ExtensionCatalog, "download_extension", return_value=zip_path), \
|
||||||
|
patch.object(ExtensionManager, "install_from_zip", side_effect=RuntimeError("install failed")):
|
||||||
|
result = runner.invoke(app, ["extension", "update", "test-ext"], input="y\n", catch_exceptions=True)
|
||||||
|
|
||||||
|
assert result.exit_code == 1, result.output
|
||||||
|
|
||||||
|
restored_entry = ExtensionManager(project_dir).registry.get("test-ext")
|
||||||
|
assert restored_entry == backup_registry_entry
|
||||||
|
|
||||||
|
hooks_after = yaml.safe_load((project_dir / ".specify" / "extensions.yml").read_text())
|
||||||
|
assert hooks_after == hooks_before
|
||||||
|
|
||||||
|
for cmd_file in command_files:
|
||||||
|
assert cmd_file.exists(), f"Expected command file to be restored after rollback: {cmd_file}"
|
||||||
|
|||||||
Reference in New Issue
Block a user