mirror of
https://github.com/github/spec-kit.git
synced 2026-04-01 02:03:09 +00:00
Compare commits
40 Commits
v0.4.0
...
copilot/ag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8168306467 | ||
|
|
07a7ad8757 | ||
|
|
dcd93e699a | ||
|
|
a84598617f | ||
|
|
a2f03ceacf | ||
|
|
7ccbf6913a | ||
|
|
868bfd06c4 | ||
|
|
069554271b | ||
|
|
b19a7eedfa | ||
|
|
9cb3f3d1ad | ||
|
|
f8da535d71 | ||
|
|
edaa5a7ff1 | ||
|
|
5be705e414 | ||
|
|
796b4f47c4 | ||
|
|
6b1f45c50c | ||
|
|
8778c26dcf | ||
|
|
41d1f4b0ac | ||
|
|
9c2481fd67 | ||
|
|
8520241dfe | ||
|
|
362868a342 | ||
|
|
d7206126e0 | ||
|
|
b22f381c0d | ||
|
|
ccc44dd00a | ||
|
|
2c2fea8783 | ||
|
|
4b4bd735a3 | ||
|
|
36019ebf1b | ||
|
|
fb152eb824 | ||
|
|
00e5dc1f91 | ||
|
|
eeda669c19 | ||
|
|
ebc61067e8 | ||
|
|
2c2936022c | ||
|
|
816c1160e9 | ||
|
|
bc766c3101 | ||
|
|
f132f748e3 | ||
|
|
ee65758e2b | ||
|
|
a01180955d | ||
|
|
b1ba972978 | ||
|
|
24247c24c9 | ||
|
|
dc7f09a711 | ||
|
|
b72a5850fe |
@@ -12,7 +12,7 @@ body:
|
||||
- Review the [Extension Publishing Guide](https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-PUBLISHING-GUIDE.md)
|
||||
- Ensure your extension has a valid `extension.yml` manifest
|
||||
- Create a GitHub release with a version tag (e.g., v1.0.0)
|
||||
- Test installation: `specify extension add --from <your-release-url>`
|
||||
- Test installation: `specify extension add <extension-name> --from <your-release-url>`
|
||||
|
||||
- type: input
|
||||
id: extension-id
|
||||
@@ -229,7 +229,7 @@ body:
|
||||
placeholder: |
|
||||
```bash
|
||||
# Install extension
|
||||
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
|
||||
# Use a command
|
||||
/speckit.your-extension.command-name arg1 arg2
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -64,5 +64,5 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
uses: actions/deploy-pages@v5
|
||||
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run markdownlint-cli2
|
||||
uses: DavidAnson/markdownlint-cli2-action@v19
|
||||
uses: DavidAnson/markdownlint-cli2-action@v23
|
||||
with:
|
||||
globs: |
|
||||
'**/*.md'
|
||||
|
||||
22
.github/workflows/release-trigger.yml
vendored
22
.github/workflows/release-trigger.yml
vendored
@@ -100,18 +100,16 @@ jobs:
|
||||
COMMITS="- Initial release"
|
||||
fi
|
||||
|
||||
# Create new changelog entry
|
||||
{
|
||||
head -n 8 CHANGELOG.md
|
||||
echo ""
|
||||
echo "## [${{ steps.version.outputs.version }}] - $DATE"
|
||||
echo ""
|
||||
echo "### Changes"
|
||||
echo ""
|
||||
echo "$COMMITS"
|
||||
echo ""
|
||||
tail -n +9 CHANGELOG.md
|
||||
} > CHANGELOG.md.tmp
|
||||
# Create new changelog entry — insert after the marker comment
|
||||
NEW_ENTRY=$(printf '%s\n' \
|
||||
"" \
|
||||
"## [${{ steps.version.outputs.version }}] - $DATE" \
|
||||
"" \
|
||||
"### Changed" \
|
||||
"" \
|
||||
"$COMMITS")
|
||||
|
||||
awk -v entry="$NEW_ENTRY" '/<!-- insert new changelog below this comment -->/ { print; print entry; next } {print}' CHANGELOG.md > CHANGELOG.md.tmp
|
||||
mv CHANGELOG.md.tmp CHANGELOG.md
|
||||
|
||||
echo "✅ Updated CHANGELOG.md with commits since $PREVIOUS_TAG"
|
||||
|
||||
@@ -202,8 +202,7 @@ agent: $basename
|
||||
}
|
||||
|
||||
# Create skills in <skills_dir>\<name>\SKILL.md format.
|
||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
||||
# current dotted-name exception (e.g. speckit.plan).
|
||||
# Skills use hyphenated names (e.g. speckit-plan).
|
||||
#
|
||||
# Technical debt note:
|
||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
||||
@@ -463,7 +462,7 @@ function Build-Variant {
|
||||
'kimi' {
|
||||
$skillsDir = Join-Path $baseDir ".kimi/skills"
|
||||
New-Item -ItemType Directory -Force -Path $skillsDir | Out-Null
|
||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'kimi' -Separator '.'
|
||||
New-Skills -SkillsDir $skillsDir -ScriptVariant $Script -AgentName 'kimi'
|
||||
}
|
||||
'trae' {
|
||||
$rulesDir = Join-Path $baseDir ".trae/rules"
|
||||
@@ -498,13 +497,13 @@ $AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode'
|
||||
$AllScripts = @('sh', 'ps')
|
||||
|
||||
function Normalize-List {
|
||||
param([string]$Input)
|
||||
param([string]$Value)
|
||||
|
||||
if ([string]::IsNullOrEmpty($Input)) {
|
||||
if ([string]::IsNullOrEmpty($Value)) {
|
||||
return @()
|
||||
}
|
||||
|
||||
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||
$items = $Value -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||
return $items
|
||||
}
|
||||
|
||||
@@ -527,7 +526,7 @@ function Validate-Subset {
|
||||
|
||||
# Determine agent list
|
||||
if (-not [string]::IsNullOrEmpty($Agents)) {
|
||||
$AgentList = Normalize-List -Input $Agents
|
||||
$AgentList = Normalize-List -Value $Agents
|
||||
if (-not (Validate-Subset -Type 'agent' -Allowed $AllAgents -Items $AgentList)) {
|
||||
exit 1
|
||||
}
|
||||
@@ -537,7 +536,7 @@ if (-not [string]::IsNullOrEmpty($Agents)) {
|
||||
|
||||
# Determine script list
|
||||
if (-not [string]::IsNullOrEmpty($Scripts)) {
|
||||
$ScriptList = Normalize-List -Input $Scripts
|
||||
$ScriptList = Normalize-List -Value $Scripts
|
||||
if (-not (Validate-Subset -Type 'script' -Allowed $AllScripts -Items $ScriptList)) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -140,8 +140,7 @@ EOF
|
||||
}
|
||||
|
||||
# Create skills in <skills_dir>/<name>/SKILL.md format.
|
||||
# Most agents use hyphenated names (e.g. speckit-plan); Kimi is the
|
||||
# current dotted-name exception (e.g. speckit.plan).
|
||||
# Skills use hyphenated names (e.g. speckit-plan).
|
||||
#
|
||||
# Technical debt note:
|
||||
# Keep SKILL.md frontmatter aligned with `install_ai_skills()` and extension
|
||||
@@ -321,7 +320,7 @@ build_variant() {
|
||||
generate_commands vibe md "\$ARGUMENTS" "$base_dir/.vibe/prompts" "$script" ;;
|
||||
kimi)
|
||||
mkdir -p "$base_dir/.kimi/skills"
|
||||
create_skills "$base_dir/.kimi/skills" "$script" "kimi" "." ;;
|
||||
create_skills "$base_dir/.kimi/skills" "$script" "kimi" ;;
|
||||
trae)
|
||||
mkdir -p "$base_dir/.trae/rules"
|
||||
generate_commands trae md "\$ARGUMENTS" "$base_dir/.trae/rules" "$script" ;;
|
||||
|
||||
1075
CHANGELOG.md
1075
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
87
README.md
87
README.md
@@ -22,7 +22,10 @@
|
||||
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
||||
- [⚡ Get Started](#-get-started)
|
||||
- [📽️ Video Overview](#️-video-overview)
|
||||
- [🧩 Community Extensions](#-community-extensions)
|
||||
- [🎨 Community Presets](#-community-presets)
|
||||
- [🚶 Community Walkthroughs](#-community-walkthroughs)
|
||||
- [🛠️ Community Friends](#️-community-friends)
|
||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||
- [🧩 Making Spec Kit Your Own: Extensions & Presets](#-making-spec-kit-your-own-extensions--presets)
|
||||
@@ -155,6 +158,76 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
||||
|
||||
[](https://www.youtube.com/watch?v=a9eR1xsfvHg&pp=0gcJCckJAYcqIYzv)
|
||||
|
||||
## 🧩 Community Extensions
|
||||
|
||||
The following community-contributed extensions are available in [`catalog.community.json`](extensions/catalog.community.json):
|
||||
|
||||
**Categories:**
|
||||
|
||||
- `docs` — reads, validates, or generates spec artifacts
|
||||
- `code` — reviews, validates, or modifies source code
|
||||
- `process` — orchestrates workflow across phases
|
||||
- `integration` — syncs with external platforms
|
||||
- `visibility` — reports on project health or progress
|
||||
|
||||
**Effect:**
|
||||
|
||||
- `Read-only` — produces reports without modifying files
|
||||
- `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
|
||||
| Extension | Purpose | Category | Effect | URL |
|
||||
|-----------|---------|----------|--------|-----|
|
||||
| AI-Driven Engineering (AIDE) | A structured 7-step workflow for building new projects from scratch with AI assistants — from vision through implementation | `process` | Read+Write | [aide](https://github.com/mnriem/spec-kit-extensions/tree/main/aide) |
|
||||
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Checkpoint Extension | Commit the changes made during the middle of the implementation, so you don't end up with just one very large commit at the end | `code` | Read+Write | [spec-kit-checkpoint](https://github.com/aaronrsun/spec-kit-checkpoint) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Cognitive Squad | Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing | `docs` | Read+Write | [cognitive-squad](https://github.com/Testimonial/cognitive-squad) |
|
||||
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
|
||||
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
|
||||
| Extensify | Create and validate extensions and extension catalogs | `process` | Read+Write | [extensify](https://github.com/mnriem/spec-kit-extensions/tree/main/extensify) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
||||
| MAQA — Multi-Agent & Quality Assurance | Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins. Optional CI gate. | `process` | Read+Write | [spec-kit-maqa-ext](https://github.com/GenieRobot/spec-kit-maqa-ext) |
|
||||
| MAQA Azure DevOps Integration | Azure DevOps Boards integration for MAQA — syncs User Stories and Task children as features progress | `integration` | Read+Write | [spec-kit-maqa-azure-devops](https://github.com/GenieRobot/spec-kit-maqa-azure-devops) |
|
||||
| MAQA CI/CD Gate | Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green. | `process` | Read+Write | [spec-kit-maqa-ci](https://github.com/GenieRobot/spec-kit-maqa-ci) |
|
||||
| MAQA GitHub Projects Integration | GitHub Projects v2 integration for MAQA — syncs draft issues and Status columns as features progress | `integration` | Read+Write | [spec-kit-maqa-github-projects](https://github.com/GenieRobot/spec-kit-maqa-github-projects) |
|
||||
| MAQA Jira Integration | Jira integration for MAQA — syncs Stories and Subtasks as features progress through the board | `integration` | Read+Write | [spec-kit-maqa-jira](https://github.com/GenieRobot/spec-kit-maqa-jira) |
|
||||
| MAQA Linear Integration | Linear integration for MAQA — syncs issues and sub-issues across workflow states as features progress | `integration` | Read+Write | [spec-kit-maqa-linear](https://github.com/GenieRobot/spec-kit-maqa-linear) |
|
||||
| MAQA Trello Integration | Trello board integration for MAQA — populates board from specs, moves cards, real-time checklist ticking | `integration` | Read+Write | [spec-kit-maqa-trello](https://github.com/GenieRobot/spec-kit-maqa-trello) |
|
||||
| Onboard | Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step | `process` | Read+Write | [spec-kit-onboard](https://github.com/dmux/spec-kit-onboard) |
|
||||
| Plan Review Gate | Require spec.md and plan.md to be merged via MR/PR before allowing task generation | `process` | Read-only | [spec-kit-plan-review-gate](https://github.com/luno/spec-kit-plan-review-gate) |
|
||||
| Presetify | Create and validate presets and preset catalogs | `process` | Read+Write | [presetify](https://github.com/mnriem/spec-kit-extensions/tree/main/presetify) |
|
||||
| Product Forge | Full product lifecycle: research → product spec → SpecKit → implement → verify → test | `process` | Read+Write | [speckit-product-forge](https://github.com/VaiYav/speckit-product-forge) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
||||
| Superpowers Bridge | Orchestrates obra/superpowers skills within the spec-kit SDD workflow across the full lifecycle (clarification, TDD, review, verification, critique, debugging, branch completion) | `process` | Read+Write | [superpowers-bridge](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/superpowers-bridge) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
|
||||
|
||||
To submit your own extension, see the [Extension Publishing Guide](extensions/EXTENSION-PUBLISHING-GUIDE.md).
|
||||
|
||||
## 🎨 Community Presets
|
||||
|
||||
The following community-contributed presets customize how Spec Kit behaves — overriding templates, commands, and terminology without changing any tooling. Presets are available in [`catalog.community.json`](presets/catalog.community.json):
|
||||
|
||||
| Preset | Purpose | Provides | Requires | URL |
|
||||
|--------|---------|----------|----------|-----|
|
||||
| AIDE In-Place Migration | Adapts the AIDE extension workflow for in-place technology migrations (X → Y pattern) — adds migration objectives, verification gates, knowledge documents, and behavioral equivalence criteria | 2 templates, 8 commands | AIDE extension | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) |
|
||||
| Pirate Speak (Full) | Transforms all Spec Kit output into pirate speak — specs become "Voyage Manifests", plans become "Battle Plans", tasks become "Crew Assignments" | 6 templates, 9 commands | — | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) |
|
||||
|
||||
To build and publish your own preset, see the [Presets Publishing Guide](presets/PUBLISHING.md).
|
||||
|
||||
## 🚶 Community Walkthroughs
|
||||
|
||||
See Spec-Driven Development in action across different scenarios with these community-contributed walkthroughs:
|
||||
@@ -171,6 +244,16 @@ See Spec-Driven Development in action across different scenarios with these comm
|
||||
|
||||
- **[Greenfield Spring Boot MVC with a custom preset](https://github.com/mnriem/spec-kit-pirate-speak-preset-demo)** — Builds a Spring Boot MVC application from scratch using a custom pirate-speak preset, demonstrating how presets can reshape the entire spec-kit experience: specifications become "Voyage Manifests," plans become "Battle Plans," and tasks become "Crew Assignments" — all generated in full pirate vernacular without changing any tooling.
|
||||
|
||||
- **[Greenfield Spring Boot + React with a custom extension](https://github.com/mnriem/spec-kit-aide-extension-demo)** — Walks through the **AIDE extension**, a community extension that adds an alternative spec-driven workflow to spec-kit with high-level specs (vision) and low-level specs (work items) organized in a 7-step iterative lifecycle: vision → roadmap → progress tracking → work queue → work items → execution → feedback loops. Uses a family trading platform (Spring Boot 4, React 19, PostgreSQL, Docker Compose) as the scenario to illustrate how the extension mechanism lets you plug in a different style of spec-driven development without changing any core tooling — truly utilizing the "Kit" in Spec Kit.
|
||||
|
||||
## 🛠️ Community Friends
|
||||
|
||||
Community projects that extend, visualize, or build on Spec Kit:
|
||||
|
||||
- **[cc-sdd](https://github.com/rhuss/cc-sdd)** - A Claude Code plugin that adds composable traits on top of Spec Kit with [Superpowers](https://github.com/obra/superpowers)-based quality gates, spec/code review, git worktree isolation, and parallel implementation via agent teams.
|
||||
|
||||
- **[Spec Kit Assistant](https://marketplace.visualstudio.com/items?itemName=rfsales.speckit-assistant)** — A VS Code extension that provides a visual orchestrator for the full SDD workflow (constitution → specification → planning → tasks → implementation) with phase status visualization, an interactive task checklist, DAG visualization, and support for Claude, Gemini, GitHub Copilot, and OpenAI backends. Requires the `specify` CLI in your PATH.
|
||||
|
||||
## 🤖 Supported AI Agents
|
||||
|
||||
| Agent | Support | Notes |
|
||||
@@ -229,7 +312,7 @@ The `specify` command supports the following options:
|
||||
| `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) |
|
||||
| `--debug` | Flag | Enable detailed debug output for troubleshooting |
|
||||
| `--github-token` | Option | GitHub token for API requests (or set GH_TOKEN/GITHUB_TOKEN env variable) |
|
||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`) |
|
||||
| `--ai-skills` | Flag | Install Prompt.MD templates as agent skills in agent-specific `skills/` directory (requires `--ai`). Extension commands are also auto-registered as skills when extensions are added later. |
|
||||
| `--branch-numbering` | Option | Branch numbering strategy: `sequential` (default — `001`, `002`, `003`) or `timestamp` (`YYYYMMDD-HHMMSS`). Timestamp mode is useful for distributed teams to avoid numbering conflicts |
|
||||
|
||||
### Examples
|
||||
@@ -380,7 +463,7 @@ specify extension add <extension-name>
|
||||
|
||||
For example, extensions could add Jira integration, post-implementation code review, V-Model test traceability, or project health diagnostics.
|
||||
|
||||
See the [Extensions README](./extensions/README.md) for the full guide, the complete community catalog, and how to build and publish your own.
|
||||
See the [Extensions README](./extensions/README.md) for the full guide and how to build and publish your own. Browse the [community extensions](#-community-extensions) above for what's available.
|
||||
|
||||
### Presets — Customize Existing Workflows
|
||||
|
||||
|
||||
79
TESTING.md
Normal file
79
TESTING.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Manual Testing Guide
|
||||
|
||||
Any change that affects a slash command's behavior requires manually testing that command through an AI agent and submitting results with the PR.
|
||||
|
||||
## Process
|
||||
|
||||
1. **Identify affected commands** — use the [prompt below](#determining-which-tests-to-run) to have your agent analyze your changed files and determine which commands need testing.
|
||||
2. **Set up a test project** — scaffold from your local branch (see [Setup](#setup)).
|
||||
3. **Run each affected command** — invoke it in your agent, verify it completes successfully, and confirm it produces the expected output (files created, scripts executed, artifacts populated).
|
||||
4. **Run prerequisites first** — commands that depend on earlier commands (e.g., `/speckit.tasks` requires `/speckit.plan` which requires `/speckit.specify`) must be run in order.
|
||||
5. **Report results** — paste the [reporting template](#reporting-results) into your PR with pass/fail for each command tested.
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Install the CLI from your local branch
|
||||
cd <spec-kit-repo>
|
||||
uv venv .venv
|
||||
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
||||
uv pip install -e .
|
||||
|
||||
# Initialize a test project using your local changes
|
||||
specify init /tmp/speckit-test --ai <agent> --offline
|
||||
cd /tmp/speckit-test
|
||||
|
||||
# Open in your agent
|
||||
```
|
||||
|
||||
## Reporting results
|
||||
|
||||
Paste this into your PR:
|
||||
|
||||
~~~markdown
|
||||
## Manual test results
|
||||
|
||||
**Agent**: [e.g., GitHub Copilot in VS Code] | **OS/Shell**: [e.g., macOS/zsh]
|
||||
|
||||
| Command tested | Notes |
|
||||
|----------------|-------|
|
||||
| `/speckit.command` | |
|
||||
~~~
|
||||
|
||||
## Determining which tests to run
|
||||
|
||||
Copy this prompt into your agent. Include the agent's response (selected tests plus a brief explanation of the mapping) in your PR.
|
||||
|
||||
~~~text
|
||||
Read TESTING.md, then run `git diff --name-only main` to get my changed files.
|
||||
For each changed file, determine which slash commands it affects by reading
|
||||
the command templates in templates/commands/ to understand what each command
|
||||
invokes. Use these mapping rules:
|
||||
|
||||
- templates/commands/X.md → the command it defines
|
||||
- scripts/bash/Y.sh or scripts/powershell/Y.ps1 → every command that invokes that script (grep templates/commands/ for the script name). Also check transitive dependencies: if the changed script is sourced by other scripts (e.g., common.sh is sourced by create-new-feature.sh, check-prerequisites.sh, setup-plan.sh, update-agent-context.sh), then every command invoking those downstream scripts is also affected
|
||||
- templates/Z-template.md → every command that consumes that template during execution
|
||||
- src/specify_cli/*.py → CLI commands (`specify init`, `specify check`, `specify extension *`, `specify preset *`); test the affected CLI command and, for init/scaffolding changes, at minimum test /speckit.specify
|
||||
- extensions/X/commands/* → the extension command it defines
|
||||
- extensions/X/scripts/* → every extension command that invokes that script
|
||||
- extensions/X/extension.yml or config-template.yml → every command in that extension. Also check if the manifest defines hooks (look for `hooks:` entries like `before_specify`, `after_implement`, etc.) — if so, the core commands those hooks attach to are also affected
|
||||
- presets/*/* → test preset scaffolding via `specify init` with the preset
|
||||
- pyproject.toml → packaging/bundling; test `specify init` and verify bundled assets
|
||||
|
||||
Include prerequisite tests (e.g., T5 requires T3 requires T1).
|
||||
|
||||
Output in this format:
|
||||
|
||||
### Test selection reasoning
|
||||
|
||||
| Changed file | Affects | Test | Why |
|
||||
|---|---|---|---|
|
||||
| (path) | (command) | T# | (reason) |
|
||||
|
||||
### Required tests
|
||||
|
||||
Number each test sequentially (T1, T2, ...). List prerequisite tests first.
|
||||
|
||||
- T1: /speckit.command — (reason)
|
||||
- T2: /speckit.command — (reason)
|
||||
~~~
|
||||
@@ -44,7 +44,7 @@ provides:
|
||||
- name: string # Required, pattern: ^speckit\.[a-z0-9-]+\.[a-z0-9-]+$
|
||||
file: string # Required, relative path to command file
|
||||
description: string # Required
|
||||
aliases: [string] # Optional, array of alternate names
|
||||
aliases: [string] # Optional, same pattern as name; namespace must match extension.id and must not shadow core or installed extension commands
|
||||
|
||||
config: # Optional, array of config files
|
||||
- name: string # Config file name
|
||||
|
||||
@@ -41,7 +41,7 @@ provides:
|
||||
- name: "speckit.my-ext.hello" # Must follow pattern: speckit.{ext-id}.{cmd}
|
||||
file: "commands/hello.md"
|
||||
description: "Say hello"
|
||||
aliases: ["speckit.hello"] # Optional aliases
|
||||
aliases: ["speckit.my-ext.hi"] # Optional aliases, same pattern
|
||||
|
||||
config: # Optional: Config files
|
||||
- name: "my-ext-config.yml"
|
||||
@@ -186,7 +186,7 @@ What the extension provides.
|
||||
- `name`: Command name (must match `speckit.{ext-id}.{command}`)
|
||||
- `file`: Path to command file (relative to extension root)
|
||||
- `description`: Command description (optional)
|
||||
- `aliases`: Alternative command names (optional, array)
|
||||
- `aliases`: Alternative command names (optional, array; each must match `speckit.{ext-id}.{command}`)
|
||||
|
||||
### Optional Fields
|
||||
|
||||
@@ -514,7 +514,7 @@ zip -r spec-kit-my-ext-1.0.0.zip extension.yml commands/ scripts/ docs/
|
||||
Users install with:
|
||||
|
||||
```bash
|
||||
specify extension add --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
|
||||
```
|
||||
|
||||
### Option 3: Community Reference Catalog
|
||||
@@ -523,7 +523,7 @@ Submit to the community catalog for public discovery:
|
||||
|
||||
1. **Fork** spec-kit repository
|
||||
2. **Add entry** to `extensions/catalog.community.json`
|
||||
3. **Update** `extensions/README.md` with your extension
|
||||
3. **Update** the Community Extensions table in `README.md` with your extension
|
||||
4. **Create PR** following the [Extension Publishing Guide](EXTENSION-PUBLISHING-GUIDE.md)
|
||||
5. **After merge**, your extension becomes available:
|
||||
- Users can browse `catalog.community.json` to discover your extension
|
||||
|
||||
@@ -122,7 +122,7 @@ Test that users can install from your release:
|
||||
specify extension add --dev /path/to/your-extension
|
||||
|
||||
# Test from GitHub archive
|
||||
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
---
|
||||
@@ -204,9 +204,9 @@ Edit `extensions/catalog.community.json` and add your extension:
|
||||
- Use current timestamp for `created_at` and `updated_at`
|
||||
- Update the top-level `updated_at` to current time
|
||||
|
||||
### 3. Update Extensions README
|
||||
### 3. Update Community Extensions Table
|
||||
|
||||
Add your extension to the Available Extensions table in `extensions/README.md`:
|
||||
Add your extension to the Community Extensions table in the project root `README.md`:
|
||||
|
||||
```markdown
|
||||
| Your Extension Name | Brief description of what it does | `<category>` | <effect> | [repo-name](https://github.com/your-org/spec-kit-your-extension) |
|
||||
@@ -234,7 +234,7 @@ Insert your extension in alphabetical order in the table.
|
||||
git checkout -b add-your-extension
|
||||
|
||||
# Commit your changes
|
||||
git add extensions/catalog.community.json extensions/README.md
|
||||
git add extensions/catalog.community.json README.md
|
||||
git commit -m "Add your-extension to community catalog
|
||||
|
||||
- Extension ID: your-extension
|
||||
@@ -273,7 +273,7 @@ Brief description of what your extension does.
|
||||
- [x] All commands working
|
||||
- [x] No security vulnerabilities
|
||||
- [x] Added to extensions/catalog.community.json
|
||||
- [x] Added to extensions/README.md Available Extensions table
|
||||
- [x] Added to Community Extensions table in README.md
|
||||
|
||||
### Testing
|
||||
Tested on:
|
||||
|
||||
@@ -160,7 +160,7 @@ This will:
|
||||
|
||||
```bash
|
||||
# From GitHub release
|
||||
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
### Install from Local Directory (Development)
|
||||
@@ -187,6 +187,21 @@ Provided commands:
|
||||
Check: .specify/extensions/jira/
|
||||
```
|
||||
|
||||
### Automatic Agent Skill Registration
|
||||
|
||||
If your project was initialized with `--ai-skills`, extension commands are **automatically registered as agent skills** during installation. This ensures that extensions are discoverable by agents that use the [agentskills.io](https://agentskills.io) skill specification.
|
||||
|
||||
```text
|
||||
✓ Extension installed successfully!
|
||||
|
||||
Jira Integration (v1.0.0)
|
||||
...
|
||||
|
||||
✓ 3 agent skill(s) auto-registered
|
||||
```
|
||||
|
||||
When an extension is removed, its corresponding skills are also cleaned up automatically. Pre-existing skills that were manually customized are never overwritten.
|
||||
|
||||
---
|
||||
|
||||
## Using Extensions
|
||||
@@ -199,8 +214,8 @@ Extensions add commands that appear in your AI agent (Claude Code):
|
||||
# In Claude Code
|
||||
> /speckit.jira.specstoissues
|
||||
|
||||
# Or use short alias (if provided)
|
||||
> /speckit.specstoissues
|
||||
# Or use a namespaced alias (if provided)
|
||||
> /speckit.jira.sync
|
||||
```
|
||||
|
||||
### Extension Configuration
|
||||
@@ -722,7 +737,7 @@ You can still install extensions not in your catalog using `--from`:
|
||||
specify extension add jira
|
||||
|
||||
# Direct URL (bypasses catalog)
|
||||
specify extension add --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
|
||||
|
||||
# Local development
|
||||
specify extension add --dev /path/to/extension
|
||||
@@ -792,7 +807,7 @@ specify extension add --dev /path/to/extension
|
||||
2. Install older version of extension:
|
||||
|
||||
```bash
|
||||
specify extension add --from https://github.com/org/ext/archive/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/org/ext/archive/v1.0.0.zip
|
||||
```
|
||||
|
||||
### MCP Tool Not Available
|
||||
|
||||
@@ -59,7 +59,7 @@ Populate your `catalog.json` with approved extensions:
|
||||
Skip catalog curation - team members install directly using URLs:
|
||||
|
||||
```bash
|
||||
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
**Benefits**: Quick for one-off testing or private extensions
|
||||
@@ -68,36 +68,9 @@ specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/ta
|
||||
|
||||
## Available Community Extensions
|
||||
|
||||
The following community-contributed extensions are available in [`catalog.community.json`](catalog.community.json):
|
||||
See the [Community Extensions](../README.md#-community-extensions) section in the main README for the full list of available community-contributed extensions.
|
||||
|
||||
**Categories:** `docs` — reads, validates, or generates spec artifacts · `code` — reviews, validates, or modifies source code · `process` — orchestrates workflow across phases · `integration` — syncs with external platforms · `visibility` — reports on project health or progress
|
||||
|
||||
**Effect:** `Read-only` — produces reports without modifying files · `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
|
||||
| Extension | Purpose | Category | Effect | URL |
|
||||
|-----------|---------|----------|--------|-----|
|
||||
| Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) |
|
||||
| Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) |
|
||||
| Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) |
|
||||
| Cognitive Squad | Multi-agent cognitive system with Triadic Model: understanding, internalization, application — with quality gates, backpropagation verification, and self-healing | `docs` | Read+Write | [cognitive-squad](https://github.com/Testimonial/cognitive-squad) |
|
||||
| Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) |
|
||||
| DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) |
|
||||
| Fleet Orchestrator | Orchestrate a full feature lifecycle with human-in-the-loop gates across all SpecKit phases | `process` | Read+Write | [spec-kit-fleet](https://github.com/sharathsatish/spec-kit-fleet) |
|
||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
| Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) |
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
| Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) |
|
||||
| Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) |
|
||||
For the raw catalog data, see [`catalog.community.json`](catalog.community.json).
|
||||
|
||||
|
||||
## Adding Your Extension
|
||||
@@ -135,7 +108,7 @@ specify extension search # See what's in your catalog
|
||||
specify extension add <extension-name> # Install by name
|
||||
|
||||
# Direct from URL (bypasses catalog)
|
||||
specify extension add --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
|
||||
specify extension add <extension-name> --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
|
||||
|
||||
# List installed extensions
|
||||
specify extension list
|
||||
|
||||
@@ -223,7 +223,7 @@ provides:
|
||||
- name: "speckit.jira.specstoissues"
|
||||
file: "commands/specstoissues.md"
|
||||
description: "Create Jira hierarchy from spec and tasks"
|
||||
aliases: ["speckit.specstoissues"] # Alternate names
|
||||
aliases: ["speckit.jira.sync"] # Alternate names
|
||||
|
||||
- name: "speckit.jira.discover-fields"
|
||||
file: "commands/discover-fields.md"
|
||||
@@ -1517,7 +1517,7 @@ specify extension add github-projects
|
||||
/speckit.github.taskstoissues
|
||||
```
|
||||
|
||||
**Compatibility shim** (if needed):
|
||||
**Migration alias** (if needed):
|
||||
|
||||
```yaml
|
||||
# extension.yml
|
||||
@@ -1525,10 +1525,10 @@ provides:
|
||||
commands:
|
||||
- name: "speckit.github.taskstoissues"
|
||||
file: "commands/taskstoissues.md"
|
||||
aliases: ["speckit.taskstoissues"] # Backward compatibility
|
||||
aliases: ["speckit.github.sync-taskstoissues"] # Alternate namespaced entry point
|
||||
```
|
||||
|
||||
AI agent registers both names, so old scripts work.
|
||||
AI agents register both names, so callers can migrate to the alternate alias without relying on deprecated global shortcuts like `/speckit.taskstoissues`.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,8 +1,41 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-03-19T12:08:20Z",
|
||||
"updated_at": "2026-03-30T00:00:00Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||
"extensions": {
|
||||
"aide": {
|
||||
"name": "AI-Driven Engineering (AIDE)",
|
||||
"id": "aide",
|
||||
"description": "A structured 7-step workflow for building new projects from scratch with AI assistants — from vision through implementation.",
|
||||
"author": "mnriem",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-extensions/releases/download/aide-v1.0.0/aide.zip",
|
||||
"repository": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-extensions/blob/main/aide/README.md",
|
||||
"changelog": "https://github.com/mnriem/spec-kit-extensions/blob/main/aide/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 7,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"workflow",
|
||||
"project-management",
|
||||
"ai-driven",
|
||||
"new-project",
|
||||
"planning",
|
||||
"experimental"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"archive": {
|
||||
"name": "Archive Extension",
|
||||
"id": "archive",
|
||||
@@ -73,6 +106,35 @@
|
||||
"created_at": "2026-03-03T00:00:00Z",
|
||||
"updated_at": "2026-03-03T00:00:00Z"
|
||||
},
|
||||
"checkpoint": {
|
||||
"name": "Checkpoint Extension",
|
||||
"id": "checkpoint",
|
||||
"description": "An extension to commit the changes made during the middle of the implementation, so you don't end up with just one very large commit at the end.",
|
||||
"author": "aaronrsun",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/aaronrsun/spec-kit-checkpoint/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/aaronrsun/spec-kit-checkpoint",
|
||||
"homepage": "https://github.com/aaronrsun/spec-kit-checkpoint",
|
||||
"documentation": "https://github.com/aaronrsun/spec-kit-checkpoint/blob/main/README.md",
|
||||
"changelog": "https://github.com/aaronrsun/spec-kit-checkpoint/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"checkpoint",
|
||||
"commit"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-22T00:00:00Z",
|
||||
"updated_at": "2026-03-22T00:00:00Z"
|
||||
},
|
||||
"cleanup": {
|
||||
"name": "Cleanup Extension",
|
||||
"id": "cleanup",
|
||||
@@ -180,7 +242,7 @@
|
||||
"updated_at": "2026-03-19T12:08:20Z"
|
||||
},
|
||||
"docguard": {
|
||||
"name": "DocGuard \u2014 CDD Enforcement",
|
||||
"name": "DocGuard — CDD Enforcement",
|
||||
"id": "docguard",
|
||||
"description": "Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies.",
|
||||
"author": "raccioly",
|
||||
@@ -252,6 +314,37 @@
|
||||
"created_at": "2026-03-13T00:00:00Z",
|
||||
"updated_at": "2026-03-13T00:00:00Z"
|
||||
},
|
||||
"extensify": {
|
||||
"name": "Extensify",
|
||||
"id": "extensify",
|
||||
"description": "Create and validate extensions and extension catalogs.",
|
||||
"author": "mnriem",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-extensions/releases/download/extensify-v1.0.0/extensify.zip",
|
||||
"repository": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-extensions/blob/main/extensify/README.md",
|
||||
"changelog": "https://github.com/mnriem/spec-kit-extensions/blob/main/extensify/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"extensions",
|
||||
"workflow",
|
||||
"validation",
|
||||
"experimental"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"fleet": {
|
||||
"name": "Fleet Orchestrator",
|
||||
"id": "fleet",
|
||||
@@ -344,6 +437,390 @@
|
||||
"created_at": "2026-03-05T00:00:00Z",
|
||||
"updated_at": "2026-03-05T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"maqa": {
|
||||
"name": "MAQA — Multi-Agent & Quality Assurance",
|
||||
"id": "maqa",
|
||||
"description": "Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins (Trello, Linear, GitHub Projects, Jira, Azure DevOps). Optional CI gate.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.3",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-ext/releases/download/maqa-v0.1.3/maqa.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-ext",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-ext",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-ext/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-ext/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"multi-agent",
|
||||
"orchestration",
|
||||
"quality-assurance",
|
||||
"workflow",
|
||||
"parallel",
|
||||
"tdd"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-azure-devops": {
|
||||
"name": "MAQA Azure DevOps Integration",
|
||||
"id": "maqa-azure-devops",
|
||||
"description": "Azure DevOps Boards integration for the MAQA extension. Populates work items from specs, moves User Stories across columns as features progress, real-time Task child ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/releases/download/maqa-azure-devops-v0.1.0/maqa-azure-devops.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"azure-devops",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-ci": {
|
||||
"name": "MAQA CI/CD Gate",
|
||||
"id": "maqa-ci",
|
||||
"description": "CI/CD pipeline gate for the MAQA extension. Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-ci/releases/download/maqa-ci-v0.1.0/maqa-ci.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-ci",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-ci",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-ci/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-ci/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"ci-cd",
|
||||
"github-actions",
|
||||
"circleci",
|
||||
"gitlab-ci",
|
||||
"quality-gate",
|
||||
"maqa"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-github-projects": {
|
||||
"name": "MAQA GitHub Projects Integration",
|
||||
"id": "maqa-github-projects",
|
||||
"description": "GitHub Projects v2 integration for the MAQA extension. Populates draft issues from specs, moves items across Status columns as features progress, real-time task list ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/releases/download/maqa-github-projects-v0.1.0/maqa-github-projects.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-github-projects",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-github-projects",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"github-projects",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-jira": {
|
||||
"name": "MAQA Jira Integration",
|
||||
"id": "maqa-jira",
|
||||
"description": "Jira integration for the MAQA extension. Populates Stories from specs, moves issues across board columns as features progress, real-time Subtask ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-jira/releases/download/maqa-jira-v0.1.0/maqa-jira.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-jira",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-jira",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-jira/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-jira/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"jira",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-linear": {
|
||||
"name": "MAQA Linear Integration",
|
||||
"id": "maqa-linear",
|
||||
"description": "Linear integration for the MAQA extension. Populates issues from specs, moves items across workflow states as features progress, real-time sub-issue ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-linear/releases/download/maqa-linear-v0.1.0/maqa-linear.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-linear",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-linear",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-linear/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-linear/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"linear",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-trello": {
|
||||
"name": "MAQA Trello Integration",
|
||||
"id": "maqa-trello",
|
||||
"description": "Trello board integration for the MAQA extension. Populates board from specs, moves cards between lists as features progress, real-time checklist ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.1",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-trello/releases/download/maqa-trello-v0.1.1/maqa-trello.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-trello",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-trello",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-trello/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-trello/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"trello",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-26T00:00:00Z"
|
||||
},
|
||||
"onboard": {
|
||||
"name": "Onboard",
|
||||
"id": "onboard",
|
||||
"description": "Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step.",
|
||||
"author": "Rafael Sales",
|
||||
"version": "2.1.0",
|
||||
"download_url": "https://github.com/dmux/spec-kit-onboard/archive/refs/tags/v2.1.0.zip",
|
||||
"repository": "https://github.com/dmux/spec-kit-onboard",
|
||||
"homepage": "https://github.com/dmux/spec-kit-onboard",
|
||||
"documentation": "https://github.com/dmux/spec-kit-onboard/blob/main/README.md",
|
||||
"changelog": "https://github.com/dmux/spec-kit-onboard/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 7,
|
||||
"hooks": 3
|
||||
},
|
||||
"tags": [
|
||||
"onboarding",
|
||||
"learning",
|
||||
"mentoring",
|
||||
"developer-experience",
|
||||
"gamification",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-26T00:00:00Z"
|
||||
},
|
||||
"plan-review-gate": {
|
||||
"name": "Plan Review Gate",
|
||||
"id": "plan-review-gate",
|
||||
"description": "Require spec.md and plan.md to be merged via MR/PR before allowing task generation",
|
||||
"author": "luno",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/luno/spec-kit-plan-review-gate/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/luno/spec-kit-plan-review-gate",
|
||||
"homepage": "https://github.com/luno/spec-kit-plan-review-gate",
|
||||
"documentation": "https://github.com/luno/spec-kit-plan-review-gate/blob/main/README.md",
|
||||
"changelog": "https://github.com/luno/spec-kit-plan-review-gate/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"review",
|
||||
"quality",
|
||||
"workflow",
|
||||
"gate"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T08:22:30Z",
|
||||
"updated_at": "2026-03-27T08:22:30Z"
|
||||
},
|
||||
"presetify": {
|
||||
"name": "Presetify",
|
||||
"id": "presetify",
|
||||
"description": "Create and validate presets and preset catalogs.",
|
||||
"author": "mnriem",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-extensions/releases/download/presetify-v1.0.0/presetify.zip",
|
||||
"repository": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-extensions",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-extensions/blob/main/presetify/README.md",
|
||||
"changelog": "https://github.com/mnriem/spec-kit-extensions/blob/main/presetify/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"presets",
|
||||
"workflow",
|
||||
"templates",
|
||||
"experimental"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"product-forge": {
|
||||
"name": "Product Forge",
|
||||
"id": "product-forge",
|
||||
"description": "Full product lifecycle: research \u2192 product spec \u2192 SpecKit \u2192 implement \u2192 verify \u2192 test",
|
||||
"author": "VaiYav",
|
||||
"version": "1.1.1",
|
||||
"download_url": "https://github.com/VaiYav/speckit-product-forge/archive/refs/tags/v1.1.1.zip",
|
||||
"repository": "https://github.com/VaiYav/speckit-product-forge",
|
||||
"homepage": "https://github.com/VaiYav/speckit-product-forge",
|
||||
"documentation": "https://github.com/VaiYav/speckit-product-forge/blob/main/README.md",
|
||||
"changelog": "https://github.com/VaiYav/speckit-product-forge/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 10,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"process",
|
||||
"research",
|
||||
"product-spec",
|
||||
"lifecycle",
|
||||
"testing"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-28T00:00:00Z",
|
||||
"updated_at": "2026-03-28T00:00:00Z"
|
||||
},
|
||||
"ralph": {
|
||||
"name": "Ralph Loop",
|
||||
"id": "ralph",
|
||||
@@ -514,6 +991,81 @@
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"superb": {
|
||||
"name": "Superpowers Bridge",
|
||||
"id": "superb",
|
||||
"description": "Orchestrates obra/superpowers skills within the spec-kit SDD workflow. Thin bridge commands delegate to superpowers' authoritative SKILL.md files at runtime (with graceful fallback), while bridge-original commands provide spec-kit-native value. Eight commands cover the full lifecycle: intent clarification, TDD enforcement, task review, verification, critique, systematic debugging, branch completion, and review response. Hook-bound commands fire automatically; standalone commands are invoked when needed.",
|
||||
"author": "rbbtsn0w",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/RbBtSn0w/spec-kit-extensions/releases/download/superpowers-bridge-v1.0.0/superpowers-bridge.zip",
|
||||
"repository": "https://github.com/RbBtSn0w/spec-kit-extensions",
|
||||
"homepage": "https://github.com/RbBtSn0w/spec-kit-extensions",
|
||||
"documentation": "https://github.com/RbBtSn0w/spec-kit-extensions/blob/main/superpowers-bridge/README.md",
|
||||
"changelog": "https://github.com/RbBtSn0w/spec-kit-extensions/blob/main/superpowers-bridge/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.4.3",
|
||||
"tools": [
|
||||
{
|
||||
"name": "superpowers",
|
||||
"version": ">=5.0.0",
|
||||
"required": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 8,
|
||||
"hooks": 4
|
||||
},
|
||||
"tags": [
|
||||
"methodology",
|
||||
"tdd",
|
||||
"code-review",
|
||||
"workflow",
|
||||
"superpowers",
|
||||
"brainstorming",
|
||||
"verification",
|
||||
"debugging",
|
||||
"branch-management"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-30T00:00:00Z",
|
||||
"updated_at": "2026-03-30T00:00:00Z"
|
||||
},
|
||||
"sync": {
|
||||
"name": "Spec Sync",
|
||||
"id": "sync",
|
||||
@@ -549,7 +1101,7 @@
|
||||
"understanding": {
|
||||
"name": "Understanding",
|
||||
"id": "understanding",
|
||||
"description": "Automated requirements quality analysis \u2014 validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"author": "Ladislav Bihari",
|
||||
"version": "3.4.0",
|
||||
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
||||
@@ -587,38 +1139,6 @@
|
||||
"created_at": "2026-03-07T00:00:00Z",
|
||||
"updated_at": "2026-03-07T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"v-model": {
|
||||
"name": "V-Model Extension Pack",
|
||||
"id": "v-model",
|
||||
@@ -651,37 +1171,6 @@
|
||||
"created_at": "2026-02-20T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"verify": {
|
||||
"name": "Verify Extension",
|
||||
"id": "verify",
|
||||
|
||||
@@ -47,8 +47,8 @@ provides:
|
||||
- name: "speckit.my-extension.example"
|
||||
file: "commands/example.md"
|
||||
description: "Example command that demonstrates functionality"
|
||||
# Optional: Add aliases for shorter command names
|
||||
aliases: ["speckit.example"]
|
||||
# Optional: Add aliases in the same namespaced format
|
||||
aliases: ["speckit.my-extension.example-short"]
|
||||
|
||||
# ADD MORE COMMANDS: Copy this block for each command
|
||||
# - name: "speckit.my-extension.another-command"
|
||||
|
||||
@@ -1,6 +1,58 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-03-09T00:00:00Z",
|
||||
"updated_at": "2026-03-24T00:00:00Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json",
|
||||
"presets": {}
|
||||
"presets": {
|
||||
"aide-in-place": {
|
||||
"name": "AIDE In-Place Migration",
|
||||
"id": "aide-in-place",
|
||||
"version": "1.0.0",
|
||||
"description": "Adapts the AIDE workflow for in-place technology migrations (X → Y pattern). Overrides vision, roadmap, progress, and work item commands with migration-specific guidance.",
|
||||
"author": "mnriem",
|
||||
"repository": "https://github.com/mnriem/spec-kit-presets",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-presets/releases/download/aide-in-place-v1.0.0/aide-in-place.zip",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-presets",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-presets/blob/main/aide-in-place/README.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.2.0",
|
||||
"extensions": ["aide"]
|
||||
},
|
||||
"provides": {
|
||||
"templates": 2,
|
||||
"commands": 8
|
||||
},
|
||||
"tags": [
|
||||
"migration",
|
||||
"in-place",
|
||||
"brownfield",
|
||||
"aide"
|
||||
]
|
||||
},
|
||||
"pirate": {
|
||||
"name": "Pirate Speak (Full)",
|
||||
"id": "pirate",
|
||||
"version": "1.0.0",
|
||||
"description": "Arrr! Transforms all Spec Kit output into pirate speak. Specs, plans, and tasks be written fer scallywags.",
|
||||
"author": "mnriem",
|
||||
"repository": "https://github.com/mnriem/spec-kit-presets",
|
||||
"download_url": "https://github.com/mnriem/spec-kit-presets/releases/download/pirate-v1.0.0/pirate.zip",
|
||||
"homepage": "https://github.com/mnriem/spec-kit-presets",
|
||||
"documentation": "https://github.com/mnriem/spec-kit-presets/blob/main/pirate/README.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"templates": 6,
|
||||
"commands": 9
|
||||
},
|
||||
"tags": [
|
||||
"pirate",
|
||||
"theme",
|
||||
"fun",
|
||||
"experimental"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.4.0"
|
||||
version = "0.4.3"
|
||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
|
||||
@@ -1,15 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
# Common functions and variables for all scripts
|
||||
|
||||
# Get repository root, with fallback for non-git repositories
|
||||
# Find repository root by searching upward for .specify directory
|
||||
# This is the primary marker for spec-kit projects
|
||||
find_specify_root() {
|
||||
local dir="${1:-$(pwd)}"
|
||||
# Normalize to absolute path to prevent infinite loop with relative paths
|
||||
# Use -- to handle paths starting with - (e.g., -P, -L)
|
||||
dir="$(cd -- "$dir" 2>/dev/null && pwd)" || return 1
|
||||
local prev_dir=""
|
||||
while true; do
|
||||
if [ -d "$dir/.specify" ]; then
|
||||
echo "$dir"
|
||||
return 0
|
||||
fi
|
||||
# Stop if we've reached filesystem root or dirname stops changing
|
||||
if [ "$dir" = "/" ] || [ "$dir" = "$prev_dir" ]; then
|
||||
break
|
||||
fi
|
||||
prev_dir="$dir"
|
||||
dir="$(dirname "$dir")"
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get repository root, prioritizing .specify directory over git
|
||||
# This prevents using a parent git repo when spec-kit is initialized in a subdirectory
|
||||
get_repo_root() {
|
||||
# First, look for .specify directory (spec-kit's own marker)
|
||||
local specify_root
|
||||
if specify_root=$(find_specify_root); then
|
||||
echo "$specify_root"
|
||||
return
|
||||
fi
|
||||
|
||||
# Fallback to git if no .specify found
|
||||
if git rev-parse --show-toplevel >/dev/null 2>&1; then
|
||||
git rev-parse --show-toplevel
|
||||
else
|
||||
# Fall back to script location for non-git repos
|
||||
return
|
||||
fi
|
||||
|
||||
# Final fallback to script location for non-git repos
|
||||
local script_dir="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
(cd "$script_dir/../../.." && pwd)
|
||||
fi
|
||||
}
|
||||
|
||||
# Get current branch, with fallback for non-git repositories
|
||||
@@ -20,14 +53,14 @@ get_current_branch() {
|
||||
return
|
||||
fi
|
||||
|
||||
# Then check git if available
|
||||
if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
|
||||
git rev-parse --abbrev-ref HEAD
|
||||
# Then check git if available at the spec-kit root (not parent)
|
||||
local repo_root=$(get_repo_root)
|
||||
if has_git; then
|
||||
git -C "$repo_root" rev-parse --abbrev-ref HEAD
|
||||
return
|
||||
fi
|
||||
|
||||
# For non-git repos, try to find the latest feature directory
|
||||
local repo_root=$(get_repo_root)
|
||||
local specs_dir="$repo_root/specs"
|
||||
|
||||
if [[ -d "$specs_dir" ]]; then
|
||||
@@ -68,9 +101,17 @@ get_current_branch() {
|
||||
echo "main" # Final fallback
|
||||
}
|
||||
|
||||
# Check if we have git available
|
||||
# Check if we have git available at the spec-kit root level
|
||||
# Returns true only if git is installed and the repo root is inside a git work tree
|
||||
# Handles both regular repos (.git directory) and worktrees/submodules (.git file)
|
||||
has_git() {
|
||||
git rev-parse --show-toplevel >/dev/null 2>&1
|
||||
# First check if git command is available (before calling get_repo_root which may use git)
|
||||
command -v git >/dev/null 2>&1 || return 1
|
||||
local repo_root=$(get_repo_root)
|
||||
# Check if .git exists (directory or file for worktrees/submodules)
|
||||
[ -e "$repo_root/.git" ] || return 1
|
||||
# Verify it's actually a valid git work tree
|
||||
git -C "$repo_root" rev-parse --is-inside-work-tree >/dev/null 2>&1
|
||||
}
|
||||
|
||||
check_feature_branch() {
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
ALLOW_EXISTING=false
|
||||
SHORT_NAME=""
|
||||
BRANCH_NUMBER=""
|
||||
USE_TIMESTAMP=false
|
||||
@@ -14,6 +15,9 @@ while [ $i -le $# ]; do
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
;;
|
||||
--allow-existing-branch)
|
||||
ALLOW_EXISTING=true
|
||||
;;
|
||||
--short-name)
|
||||
if [ $((i + 1)) -gt $# ]; then
|
||||
echo 'Error: --short-name requires a value' >&2
|
||||
@@ -45,10 +49,11 @@ while [ $i -le $# ]; do
|
||||
USE_TIMESTAMP=true
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
echo "Usage: $0 [--json] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --json Output in JSON format"
|
||||
echo " --allow-existing-branch Switch to branch if it already exists instead of failing"
|
||||
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||
echo " --number N Specify branch number manually (overrides auto-detection)"
|
||||
echo " --timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
@@ -69,7 +74,7 @@ done
|
||||
|
||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
echo "Usage: $0 [--json] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -80,19 +85,6 @@ if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Function to find the repository root by searching for existing project markers
|
||||
find_repo_root() {
|
||||
local dir="$1"
|
||||
while [ "$dir" != "/" ]; do
|
||||
if [ -d "$dir/.git" ] || [ -d "$dir/.specify" ]; then
|
||||
echo "$dir"
|
||||
return 0
|
||||
fi
|
||||
dir="$(dirname "$dir")"
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to get highest number from specs directory
|
||||
get_highest_from_specs() {
|
||||
local specs_dir="$1"
|
||||
@@ -102,9 +94,9 @@ get_highest_from_specs() {
|
||||
for dir in "$specs_dir"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
# Only match sequential prefixes (###-*), skip timestamp dirs
|
||||
if echo "$dirname" | grep -q '^[0-9]\{3\}-'; then
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\{3\}')
|
||||
# Match sequential prefixes (>=3 digits), but skip timestamp dirs.
|
||||
if echo "$dirname" | grep -Eq '^[0-9]{3,}-' && ! echo "$dirname" | grep -Eq '^[0-9]{8}-[0-9]{6}-'; then
|
||||
number=$(echo "$dirname" | grep -Eo '^[0-9]+')
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
@@ -128,9 +120,9 @@ get_highest_from_branches() {
|
||||
# Clean branch name: remove leading markers and remote prefixes
|
||||
clean_branch=$(echo "$branch" | sed 's/^[* ]*//; s|^remotes/[^/]*/||')
|
||||
|
||||
# Extract feature number if branch matches pattern ###-*
|
||||
if echo "$clean_branch" | grep -q '^[0-9]\{3\}-'; then
|
||||
number=$(echo "$clean_branch" | grep -o '^[0-9]\{3\}' || echo "0")
|
||||
# Extract sequential feature number (>=3 digits), skip timestamp branches.
|
||||
if echo "$clean_branch" | grep -Eq '^[0-9]{3,}-' && ! echo "$clean_branch" | grep -Eq '^[0-9]{8}-[0-9]{6}-'; then
|
||||
number=$(echo "$clean_branch" | grep -Eo '^[0-9]+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
@@ -171,21 +163,16 @@ clean_branch_name() {
|
||||
echo "$name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//'
|
||||
}
|
||||
|
||||
# Resolve repository root. Prefer git information when available, but fall back
|
||||
# to searching for repository markers so the workflow still functions in repositories that
|
||||
# were initialised with --no-git.
|
||||
# Resolve repository root using common.sh functions which prioritize .specify over git
|
||||
SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
|
||||
if git rev-parse --show-toplevel >/dev/null 2>&1; then
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
REPO_ROOT=$(get_repo_root)
|
||||
|
||||
# Check if git is available at this repo root (not a parent)
|
||||
if has_git; then
|
||||
HAS_GIT=true
|
||||
else
|
||||
REPO_ROOT="$(find_repo_root "$SCRIPT_DIR")"
|
||||
if [ -z "$REPO_ROOT" ]; then
|
||||
echo "Error: Could not determine repository root. Please run this script from within the repository." >&2
|
||||
exit 1
|
||||
fi
|
||||
HAS_GIT=false
|
||||
fi
|
||||
|
||||
@@ -305,12 +292,19 @@ if [ "$HAS_GIT" = true ]; then
|
||||
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
||||
# Check if branch already exists
|
||||
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
||||
if [ "$USE_TIMESTAMP" = true ]; then
|
||||
if [ "$ALLOW_EXISTING" = true ]; then
|
||||
# Switch to the existing branch instead of failing
|
||||
if ! git checkout "$BRANCH_NAME" 2>/dev/null; then
|
||||
>&2 echo "Error: Failed to switch to existing branch '$BRANCH_NAME'. Please resolve any local changes or conflicts and try again."
|
||||
exit 1
|
||||
fi
|
||||
elif [ "$USE_TIMESTAMP" = true ]; then
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Rerun to get a new timestamp or use a different --short-name."
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
@@ -323,14 +317,16 @@ fi
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||
if [ ! -f "$SPEC_FILE" ]; then
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
||||
touch "$SPEC_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Inform the user how to persist the feature variable in their own shell
|
||||
printf '# To persist: export SPECIFY_FEATURE=%q\n' "$BRANCH_NAME" >&2
|
||||
|
||||
@@ -63,7 +63,7 @@ AGENT_TYPE="${1:-}"
|
||||
# Agent-specific file paths
|
||||
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
|
||||
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/agents/copilot-instructions.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
||||
CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
|
||||
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
||||
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
||||
|
||||
@@ -1,7 +1,39 @@
|
||||
#!/usr/bin/env pwsh
|
||||
# Common PowerShell functions analogous to common.sh
|
||||
|
||||
# Find repository root by searching upward for .specify directory
|
||||
# This is the primary marker for spec-kit projects
|
||||
function Find-SpecifyRoot {
|
||||
param([string]$StartDir = (Get-Location).Path)
|
||||
|
||||
# Normalize to absolute path to prevent issues with relative paths
|
||||
# Use -LiteralPath to handle paths with wildcard characters ([, ], *, ?)
|
||||
$resolved = Resolve-Path -LiteralPath $StartDir -ErrorAction SilentlyContinue
|
||||
$current = if ($resolved) { $resolved.Path } else { $null }
|
||||
if (-not $current) { return $null }
|
||||
|
||||
while ($true) {
|
||||
if (Test-Path -LiteralPath (Join-Path $current ".specify") -PathType Container) {
|
||||
return $current
|
||||
}
|
||||
$parent = Split-Path $current -Parent
|
||||
if ([string]::IsNullOrEmpty($parent) -or $parent -eq $current) {
|
||||
return $null
|
||||
}
|
||||
$current = $parent
|
||||
}
|
||||
}
|
||||
|
||||
# Get repository root, prioritizing .specify directory over git
|
||||
# This prevents using a parent git repo when spec-kit is initialized in a subdirectory
|
||||
function Get-RepoRoot {
|
||||
# First, look for .specify directory (spec-kit's own marker)
|
||||
$specifyRoot = Find-SpecifyRoot
|
||||
if ($specifyRoot) {
|
||||
return $specifyRoot
|
||||
}
|
||||
|
||||
# Fallback to git if no .specify found
|
||||
try {
|
||||
$result = git rev-parse --show-toplevel 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
@@ -11,8 +43,9 @@ function Get-RepoRoot {
|
||||
# Git command failed
|
||||
}
|
||||
|
||||
# Fall back to script location for non-git repos
|
||||
return (Resolve-Path (Join-Path $PSScriptRoot "../../..")).Path
|
||||
# Final fallback to script location for non-git repos
|
||||
# Use -LiteralPath to handle paths with wildcard characters
|
||||
return (Resolve-Path -LiteralPath (Join-Path $PSScriptRoot "../../..")).Path
|
||||
}
|
||||
|
||||
function Get-CurrentBranch {
|
||||
@@ -21,18 +54,20 @@ function Get-CurrentBranch {
|
||||
return $env:SPECIFY_FEATURE
|
||||
}
|
||||
|
||||
# Then check git if available
|
||||
# Then check git if available at the spec-kit root (not parent)
|
||||
$repoRoot = Get-RepoRoot
|
||||
if (Test-HasGit) {
|
||||
try {
|
||||
$result = git rev-parse --abbrev-ref HEAD 2>$null
|
||||
$result = git -C $repoRoot rev-parse --abbrev-ref HEAD 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
return $result
|
||||
}
|
||||
} catch {
|
||||
# Git command failed
|
||||
}
|
||||
}
|
||||
|
||||
# For non-git repos, try to find the latest feature directory
|
||||
$repoRoot = Get-RepoRoot
|
||||
$specsDir = Join-Path $repoRoot "specs"
|
||||
|
||||
if (Test-Path $specsDir) {
|
||||
@@ -69,9 +104,23 @@ function Get-CurrentBranch {
|
||||
return "main"
|
||||
}
|
||||
|
||||
# Check if we have git available at the spec-kit root level
|
||||
# Returns true only if git is installed and the repo root is inside a git work tree
|
||||
# Handles both regular repos (.git directory) and worktrees/submodules (.git file)
|
||||
function Test-HasGit {
|
||||
# First check if git command is available (before calling Get-RepoRoot which may use git)
|
||||
if (-not (Get-Command git -ErrorAction SilentlyContinue)) {
|
||||
return $false
|
||||
}
|
||||
$repoRoot = Get-RepoRoot
|
||||
# Check if .git exists (directory or file for worktrees/submodules)
|
||||
# Use -LiteralPath to handle paths with wildcard characters
|
||||
if (-not (Test-Path -LiteralPath (Join-Path $repoRoot ".git"))) {
|
||||
return $false
|
||||
}
|
||||
# Verify it's actually a valid git work tree
|
||||
try {
|
||||
git rev-parse --show-toplevel 2>$null | Out-Null
|
||||
$null = git -C $repoRoot rev-parse --is-inside-work-tree 2>$null
|
||||
return ($LASTEXITCODE -eq 0)
|
||||
} catch {
|
||||
return $false
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[switch]$Json,
|
||||
[switch]$AllowExistingBranch,
|
||||
[string]$ShortName,
|
||||
[Parameter()]
|
||||
[int]$Number = 0,
|
||||
[long]$Number = 0,
|
||||
[switch]$Timestamp,
|
||||
[switch]$Help,
|
||||
[Parameter(Position = 0, ValueFromRemainingArguments = $true)]
|
||||
@@ -15,10 +16,11 @@ $ErrorActionPreference = 'Stop'
|
||||
|
||||
# Show help if requested
|
||||
if ($Help) {
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host ""
|
||||
Write-Host "Options:"
|
||||
Write-Host " -Json Output in JSON format"
|
||||
Write-Host " -AllowExistingBranch Switch to branch if it already exists instead of failing"
|
||||
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
||||
Write-Host " -Timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
@@ -33,7 +35,7 @@ if ($Help) {
|
||||
|
||||
# Check if feature description provided
|
||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -45,39 +47,18 @@ if ([string]::IsNullOrWhiteSpace($featureDesc)) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Resolve repository root. Prefer git information when available, but fall back
|
||||
# to searching for repository markers so the workflow still functions in repositories that
|
||||
# were initialized with --no-git.
|
||||
function Find-RepositoryRoot {
|
||||
param(
|
||||
[string]$StartDir,
|
||||
[string[]]$Markers = @('.git', '.specify')
|
||||
)
|
||||
$current = Resolve-Path $StartDir
|
||||
while ($true) {
|
||||
foreach ($marker in $Markers) {
|
||||
if (Test-Path (Join-Path $current $marker)) {
|
||||
return $current
|
||||
}
|
||||
}
|
||||
$parent = Split-Path $current -Parent
|
||||
if ($parent -eq $current) {
|
||||
# Reached filesystem root without finding markers
|
||||
return $null
|
||||
}
|
||||
$current = $parent
|
||||
}
|
||||
}
|
||||
|
||||
function Get-HighestNumberFromSpecs {
|
||||
param([string]$SpecsDir)
|
||||
|
||||
$highest = 0
|
||||
[long]$highest = 0
|
||||
if (Test-Path $SpecsDir) {
|
||||
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
|
||||
if ($_.Name -match '^(\d{3})-') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
# Match sequential prefixes (>=3 digits), but skip timestamp dirs.
|
||||
if ($_.Name -match '^(\d{3,})-' -and $_.Name -notmatch '^\d{8}-\d{6}-') {
|
||||
[long]$num = 0
|
||||
if ([long]::TryParse($matches[1], [ref]$num) -and $num -gt $highest) {
|
||||
$highest = $num
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -87,7 +68,7 @@ function Get-HighestNumberFromSpecs {
|
||||
function Get-HighestNumberFromBranches {
|
||||
param()
|
||||
|
||||
$highest = 0
|
||||
[long]$highest = 0
|
||||
try {
|
||||
$branches = git branch -a 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
@@ -95,10 +76,12 @@ function Get-HighestNumberFromBranches {
|
||||
# Clean branch name: remove leading markers and remote prefixes
|
||||
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
|
||||
|
||||
# Extract feature number if branch matches pattern ###-*
|
||||
if ($cleanBranch -match '^(\d{3})-') {
|
||||
$num = [int]$matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
# Extract sequential feature number (>=3 digits), skip timestamp branches.
|
||||
if ($cleanBranch -match '^(\d{3,})-' -and $cleanBranch -notmatch '^\d{8}-\d{6}-') {
|
||||
[long]$num = 0
|
||||
if ([long]::TryParse($matches[1], [ref]$num) -and $num -gt $highest) {
|
||||
$highest = $num
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,26 +122,14 @@ function ConvertTo-CleanBranchName {
|
||||
|
||||
return $Name.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
|
||||
}
|
||||
$fallbackRoot = (Find-RepositoryRoot -StartDir $PSScriptRoot)
|
||||
if (-not $fallbackRoot) {
|
||||
Write-Error "Error: Could not determine repository root. Please run this script from within the repository."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Load common functions (includes Resolve-Template)
|
||||
# Load common functions (includes Get-RepoRoot, Test-HasGit, Resolve-Template)
|
||||
. "$PSScriptRoot/common.ps1"
|
||||
|
||||
try {
|
||||
$repoRoot = git rev-parse --show-toplevel 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
$hasGit = $true
|
||||
} else {
|
||||
throw "Git not available"
|
||||
}
|
||||
} catch {
|
||||
$repoRoot = $fallbackRoot
|
||||
$hasGit = $false
|
||||
}
|
||||
# Use common.ps1 functions which prioritize .specify over git
|
||||
$repoRoot = Get-RepoRoot
|
||||
|
||||
# Check if git is available at this repo root (not a parent)
|
||||
$hasGit = Test-HasGit
|
||||
|
||||
Set-Location $repoRoot
|
||||
|
||||
@@ -282,12 +253,20 @@ if ($hasGit) {
|
||||
# Check if branch already exists
|
||||
$existingBranch = git branch --list $branchName 2>$null
|
||||
if ($existingBranch) {
|
||||
if ($Timestamp) {
|
||||
if ($AllowExistingBranch) {
|
||||
# Switch to the existing branch instead of failing
|
||||
git checkout -q $branchName 2>$null | Out-Null
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Error "Error: Branch '$branchName' exists but could not be checked out. Resolve any uncommitted changes or conflicts and try again."
|
||||
exit 1
|
||||
}
|
||||
} elseif ($Timestamp) {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Rerun to get a new timestamp or use a different -ShortName."
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||
}
|
||||
exit 1
|
||||
}
|
||||
} else {
|
||||
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
@@ -300,13 +279,15 @@ if ($hasGit) {
|
||||
$featureDir = Join-Path $specsDir $branchName
|
||||
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||
|
||||
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||
$specFile = Join-Path $featureDir 'spec.md'
|
||||
if (-not (Test-Path -PathType Leaf $specFile)) {
|
||||
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||
if ($template -and (Test-Path $template)) {
|
||||
Copy-Item $template $specFile -Force
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
||||
$env:SPECIFY_FEATURE = $branchName
|
||||
@@ -326,4 +307,3 @@ if ($Json) {
|
||||
Write-Output "HAS_GIT: $hasGit"
|
||||
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ $NEW_PLAN = $IMPL_PLAN
|
||||
# Agent file paths
|
||||
$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md'
|
||||
$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md'
|
||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md'
|
||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/copilot-instructions.md'
|
||||
$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
||||
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
||||
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
|
||||
@@ -345,6 +345,7 @@ AI_ASSISTANT_HELP = _build_ai_assistant_help()
|
||||
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
||||
|
||||
CLAUDE_LOCAL_PATH = Path.home() / ".claude" / "local" / "claude"
|
||||
CLAUDE_NPM_LOCAL_PATH = Path.home() / ".claude" / "local" / "node_modules" / ".bin" / "claude"
|
||||
|
||||
BANNER = """
|
||||
███████╗██████╗ ███████╗ ██████╗██╗███████╗██╗ ██╗
|
||||
@@ -605,13 +606,15 @@ def check_tool(tool: str, tracker: StepTracker = None) -> bool:
|
||||
Returns:
|
||||
True if tool is found, False otherwise
|
||||
"""
|
||||
# Special handling for Claude CLI after `claude migrate-installer`
|
||||
# Special handling for Claude CLI local installs
|
||||
# See: https://github.com/github/spec-kit/issues/123
|
||||
# The migrate-installer command REMOVES the original executable from PATH
|
||||
# and creates an alias at ~/.claude/local/claude instead
|
||||
# This path should be prioritized over other claude executables in PATH
|
||||
# See: https://github.com/github/spec-kit/issues/550
|
||||
# Claude Code can be installed in two local paths:
|
||||
# 1. ~/.claude/local/claude (after `claude migrate-installer`)
|
||||
# 2. ~/.claude/local/node_modules/.bin/claude (npm-local install, e.g. via nvm)
|
||||
# Neither path may be on the system PATH, so we check them explicitly.
|
||||
if tool == "claude":
|
||||
if CLAUDE_LOCAL_PATH.exists() and CLAUDE_LOCAL_PATH.is_file():
|
||||
if CLAUDE_LOCAL_PATH.is_file() or CLAUDE_NPM_LOCAL_PATH.is_file():
|
||||
if tracker:
|
||||
tracker.complete(tool, "available")
|
||||
return True
|
||||
@@ -1490,12 +1493,6 @@ def load_init_options(project_path: Path) -> dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
# Agent-specific skill directory overrides for agents whose skills directory
|
||||
# doesn't follow the standard <agent_folder>/skills/ pattern
|
||||
AGENT_SKILLS_DIR_OVERRIDES = {
|
||||
"codex": ".agents/skills", # Codex agent layout override
|
||||
}
|
||||
|
||||
# Default skills directory for agents not in AGENT_CONFIG
|
||||
DEFAULT_SKILLS_DIR = ".agents/skills"
|
||||
|
||||
@@ -1528,13 +1525,9 @@ SKILL_DESCRIPTIONS = {
|
||||
def _get_skills_dir(project_path: Path, selected_ai: str) -> Path:
|
||||
"""Resolve the agent-specific skills directory for the given AI assistant.
|
||||
|
||||
Uses ``AGENT_SKILLS_DIR_OVERRIDES`` first, then falls back to
|
||||
``AGENT_CONFIG[agent]["folder"] + "skills"``, and finally to
|
||||
``DEFAULT_SKILLS_DIR``.
|
||||
Uses ``AGENT_CONFIG[agent]["folder"] + "skills"`` and falls back to
|
||||
``DEFAULT_SKILLS_DIR`` for unknown agents.
|
||||
"""
|
||||
if selected_ai in AGENT_SKILLS_DIR_OVERRIDES:
|
||||
return project_path / AGENT_SKILLS_DIR_OVERRIDES[selected_ai]
|
||||
|
||||
agent_config = AGENT_CONFIG.get(selected_ai, {})
|
||||
agent_folder = agent_config.get("folder", "")
|
||||
if agent_folder:
|
||||
@@ -1648,10 +1641,7 @@ def install_ai_skills(
|
||||
command_name = command_name[len("speckit."):]
|
||||
if command_name.endswith(".agent"):
|
||||
command_name = command_name[:-len(".agent")]
|
||||
if selected_ai == "kimi":
|
||||
skill_name = f"speckit.{command_name}"
|
||||
else:
|
||||
skill_name = f"speckit-{command_name}"
|
||||
skill_name = f"speckit-{command_name.replace('.', '-')}"
|
||||
|
||||
# Create skill directory (additive — never removes existing content)
|
||||
skill_dir = skills_dir / skill_name
|
||||
@@ -1730,8 +1720,64 @@ def _has_bundled_skills(project_path: Path, selected_ai: str) -> bool:
|
||||
if not skills_dir.is_dir():
|
||||
return False
|
||||
|
||||
pattern = "speckit.*/SKILL.md" if selected_ai == "kimi" else "speckit-*/SKILL.md"
|
||||
return any(skills_dir.glob(pattern))
|
||||
return any(skills_dir.glob("speckit-*/SKILL.md"))
|
||||
|
||||
|
||||
def _migrate_legacy_kimi_dotted_skills(skills_dir: Path) -> tuple[int, int]:
|
||||
"""Migrate legacy Kimi dotted skill dirs (speckit.xxx) to hyphenated format.
|
||||
|
||||
Temporary migration helper:
|
||||
- Intended removal window: after 2026-06-25.
|
||||
- Purpose: one-time cleanup for projects initialized before Kimi moved to
|
||||
hyphenated skills (speckit-xxx).
|
||||
|
||||
Returns:
|
||||
Tuple[migrated_count, removed_count]
|
||||
- migrated_count: old dotted dir renamed to hyphenated dir
|
||||
- removed_count: old dotted dir deleted when equivalent hyphenated dir existed
|
||||
"""
|
||||
if not skills_dir.is_dir():
|
||||
return (0, 0)
|
||||
|
||||
migrated_count = 0
|
||||
removed_count = 0
|
||||
|
||||
for legacy_dir in sorted(skills_dir.glob("speckit.*")):
|
||||
if not legacy_dir.is_dir():
|
||||
continue
|
||||
if not (legacy_dir / "SKILL.md").exists():
|
||||
continue
|
||||
|
||||
suffix = legacy_dir.name[len("speckit."):]
|
||||
if not suffix:
|
||||
continue
|
||||
|
||||
target_dir = skills_dir / f"speckit-{suffix.replace('.', '-')}"
|
||||
|
||||
if not target_dir.exists():
|
||||
shutil.move(str(legacy_dir), str(target_dir))
|
||||
migrated_count += 1
|
||||
continue
|
||||
|
||||
# If the new target already exists, avoid destructive cleanup unless
|
||||
# both SKILL.md files are byte-identical.
|
||||
target_skill = target_dir / "SKILL.md"
|
||||
legacy_skill = legacy_dir / "SKILL.md"
|
||||
if target_skill.is_file():
|
||||
try:
|
||||
if target_skill.read_bytes() == legacy_skill.read_bytes():
|
||||
# Preserve legacy directory when it contains extra user files.
|
||||
has_extra_entries = any(
|
||||
child.name != "SKILL.md" for child in legacy_dir.iterdir()
|
||||
)
|
||||
if not has_extra_entries:
|
||||
shutil.rmtree(legacy_dir)
|
||||
removed_count += 1
|
||||
except OSError:
|
||||
# Best-effort migration: preserve legacy dir on read failures.
|
||||
pass
|
||||
|
||||
return (migrated_count, removed_count)
|
||||
|
||||
|
||||
AGENT_SKILLS_MIGRATIONS = {
|
||||
@@ -2094,16 +2140,33 @@ def init(
|
||||
|
||||
ensure_constitution_from_template(project_path, tracker=tracker)
|
||||
|
||||
if ai_skills:
|
||||
# Determine skills directory and migrate any legacy Kimi dotted skills.
|
||||
migrated_legacy_kimi_skills = 0
|
||||
removed_legacy_kimi_skills = 0
|
||||
skills_dir: Optional[Path] = None
|
||||
if selected_ai in NATIVE_SKILLS_AGENTS:
|
||||
skills_dir = _get_skills_dir(project_path, selected_ai)
|
||||
if selected_ai == "kimi" and skills_dir.is_dir():
|
||||
(
|
||||
migrated_legacy_kimi_skills,
|
||||
removed_legacy_kimi_skills,
|
||||
) = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
if ai_skills:
|
||||
if selected_ai in NATIVE_SKILLS_AGENTS:
|
||||
bundled_found = _has_bundled_skills(project_path, selected_ai)
|
||||
if bundled_found:
|
||||
detail = f"bundled skills → {skills_dir.relative_to(project_path)}"
|
||||
if migrated_legacy_kimi_skills or removed_legacy_kimi_skills:
|
||||
detail += (
|
||||
f" (migrated {migrated_legacy_kimi_skills}, "
|
||||
f"removed {removed_legacy_kimi_skills} legacy Kimi dotted skills)"
|
||||
)
|
||||
if tracker:
|
||||
tracker.start("ai-skills")
|
||||
tracker.complete("ai-skills", f"bundled skills → {skills_dir.relative_to(project_path)}")
|
||||
tracker.complete("ai-skills", detail)
|
||||
else:
|
||||
console.print(f"[green]✓[/green] Using bundled agent skills in {skills_dir.relative_to(project_path)}/")
|
||||
console.print(f"[green]✓[/green] Using {detail}")
|
||||
else:
|
||||
# Compatibility fallback: convert command templates to skills
|
||||
# when an older template archive does not include native skills.
|
||||
@@ -2288,7 +2351,7 @@ def init(
|
||||
if codex_skill_mode:
|
||||
return f"$speckit-{name}"
|
||||
if kimi_skill_mode:
|
||||
return f"/skill:speckit.{name}"
|
||||
return f"/skill:speckit-{name}"
|
||||
return f"/speckit.{name}"
|
||||
|
||||
steps_lines.append(f"{step_num}. Start using {usage_label} with your AI agent:")
|
||||
@@ -3594,6 +3657,15 @@ def extension_add(
|
||||
for cmd in manifest.commands:
|
||||
console.print(f" • {cmd['name']} - {cmd.get('description', '')}")
|
||||
|
||||
# Report agent skills registration
|
||||
reg_meta = manager.registry.get(manifest.id)
|
||||
reg_skills = reg_meta.get("registered_skills", []) if reg_meta else []
|
||||
# Normalize to guard against corrupted registry entries
|
||||
if not isinstance(reg_skills, list):
|
||||
reg_skills = []
|
||||
if reg_skills:
|
||||
console.print(f"\n[green]✓[/green] {len(reg_skills)} agent skill(s) auto-registered")
|
||||
|
||||
console.print("\n[yellow]⚠[/yellow] Configuration may be required")
|
||||
console.print(f" Check: .specify/extensions/{manifest.id}/")
|
||||
|
||||
@@ -3632,14 +3704,19 @@ def extension_remove(
|
||||
installed = manager.list_installed()
|
||||
extension_id, display_name = _resolve_installed_extension(extension, installed, "remove")
|
||||
|
||||
# Get extension info for command count
|
||||
# Get extension info for command and skill counts
|
||||
ext_manifest = manager.get_extension(extension_id)
|
||||
cmd_count = len(ext_manifest.commands) if ext_manifest else 0
|
||||
reg_meta = manager.registry.get(extension_id)
|
||||
raw_skills = reg_meta.get("registered_skills") if reg_meta else None
|
||||
skill_count = len(raw_skills) if isinstance(raw_skills, list) else 0
|
||||
|
||||
# Confirm removal
|
||||
if not force:
|
||||
console.print("\n[yellow]⚠ This will remove:[/yellow]")
|
||||
console.print(f" • {cmd_count} commands from AI agent")
|
||||
if skill_count:
|
||||
console.print(f" • {skill_count} agent skill(s)")
|
||||
console.print(f" • Extension directory: .specify/extensions/{extension_id}/")
|
||||
if not keep_config:
|
||||
console.print(" • Config files (will be backed up)")
|
||||
|
||||
@@ -10,6 +10,8 @@ from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
import platform
|
||||
import re
|
||||
from copy import deepcopy
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -211,24 +213,52 @@ class CommandRegistrar:
|
||||
return f"---\n{yaml_str}---\n"
|
||||
|
||||
def _adjust_script_paths(self, frontmatter: dict) -> dict:
|
||||
"""Adjust script paths from extension-relative to repo-relative.
|
||||
"""Normalize script paths in frontmatter to generated project locations.
|
||||
|
||||
Rewrites known repo-relative and top-level script paths under the
|
||||
`scripts` and `agent_scripts` keys (for example `../../scripts/`,
|
||||
`../../templates/`, `../../memory/`, `scripts/`, `templates/`, and
|
||||
`memory/`) to the `.specify/...` paths used in generated projects.
|
||||
|
||||
Args:
|
||||
frontmatter: Frontmatter dictionary
|
||||
|
||||
Returns:
|
||||
Modified frontmatter with adjusted paths
|
||||
Modified frontmatter with normalized project paths
|
||||
"""
|
||||
frontmatter = deepcopy(frontmatter)
|
||||
|
||||
for script_key in ("scripts", "agent_scripts"):
|
||||
scripts = frontmatter.get(script_key)
|
||||
if not isinstance(scripts, dict):
|
||||
continue
|
||||
|
||||
for key, script_path in scripts.items():
|
||||
if isinstance(script_path, str) and script_path.startswith("../../scripts/"):
|
||||
scripts[key] = f".specify/scripts/{script_path[14:]}"
|
||||
if isinstance(script_path, str):
|
||||
scripts[key] = self._rewrite_project_relative_paths(script_path)
|
||||
return frontmatter
|
||||
|
||||
@staticmethod
|
||||
def _rewrite_project_relative_paths(text: str) -> str:
|
||||
"""Rewrite repo-relative paths to their generated project locations."""
|
||||
if not isinstance(text, str) or not text:
|
||||
return text
|
||||
|
||||
for old, new in (
|
||||
("../../memory/", ".specify/memory/"),
|
||||
("../../scripts/", ".specify/scripts/"),
|
||||
("../../templates/", ".specify/templates/"),
|
||||
):
|
||||
text = text.replace(old, new)
|
||||
|
||||
# Only rewrite top-level style references so extension-local paths like
|
||||
# ".specify/extensions/<ext>/scripts/..." remain intact.
|
||||
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?memory/', r"\1.specify/memory/", text)
|
||||
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?scripts/', r"\1.specify/scripts/", text)
|
||||
text = re.sub(r'(^|[\s`"\'(])(?:\.?/)?templates/', r"\1.specify/templates/", text)
|
||||
|
||||
return text.replace(".specify/.specify/", ".specify/").replace(".specify.specify/", ".specify/")
|
||||
|
||||
def render_markdown_command(
|
||||
self,
|
||||
frontmatter: dict,
|
||||
@@ -277,9 +307,25 @@ class CommandRegistrar:
|
||||
toml_lines.append(f"# Source: {source_id}")
|
||||
toml_lines.append("")
|
||||
|
||||
# Keep TOML output valid even when body contains triple-quote delimiters.
|
||||
# Prefer multiline forms, then fall back to escaped basic string.
|
||||
if '"""' not in body:
|
||||
toml_lines.append('prompt = """')
|
||||
toml_lines.append(body)
|
||||
toml_lines.append('"""')
|
||||
elif "'''" not in body:
|
||||
toml_lines.append("prompt = '''")
|
||||
toml_lines.append(body)
|
||||
toml_lines.append("'''")
|
||||
else:
|
||||
escaped_body = (
|
||||
body.replace("\\", "\\\\")
|
||||
.replace('"', '\\"')
|
||||
.replace("\n", "\\n")
|
||||
.replace("\r", "\\r")
|
||||
.replace("\t", "\\t")
|
||||
)
|
||||
toml_lines.append(f'prompt = "{escaped_body}"')
|
||||
|
||||
return "\n".join(toml_lines)
|
||||
|
||||
@@ -308,8 +354,8 @@ class CommandRegistrar:
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
|
||||
if agent_name == "codex":
|
||||
body = self._resolve_codex_skill_placeholders(frontmatter, body, project_root)
|
||||
if agent_name in {"codex", "kimi"}:
|
||||
body = self.resolve_skill_placeholders(agent_name, frontmatter, body, project_root)
|
||||
|
||||
description = frontmatter.get("description", f"Spec-kit workflow command: {skill_name}")
|
||||
skill_frontmatter = {
|
||||
@@ -324,13 +370,8 @@ class CommandRegistrar:
|
||||
return self.render_frontmatter(skill_frontmatter) + "\n" + body
|
||||
|
||||
@staticmethod
|
||||
def _resolve_codex_skill_placeholders(frontmatter: dict, body: str, project_root: Path) -> str:
|
||||
"""Resolve script placeholders for Codex skill overrides.
|
||||
|
||||
This intentionally scopes the fix to Codex, which is the newly
|
||||
migrated runtime path in this PR. Existing Kimi behavior is left
|
||||
unchanged for now.
|
||||
"""
|
||||
def resolve_skill_placeholders(agent_name: str, frontmatter: dict, body: str, project_root: Path) -> str:
|
||||
"""Resolve script placeholders for skills-backed agents."""
|
||||
try:
|
||||
from . import load_init_options
|
||||
except ImportError:
|
||||
@@ -346,7 +387,11 @@ class CommandRegistrar:
|
||||
if not isinstance(agent_scripts, dict):
|
||||
agent_scripts = {}
|
||||
|
||||
script_variant = load_init_options(project_root).get("script")
|
||||
init_opts = load_init_options(project_root)
|
||||
if not isinstance(init_opts, dict):
|
||||
init_opts = {}
|
||||
|
||||
script_variant = init_opts.get("script")
|
||||
if script_variant not in {"sh", "ps"}:
|
||||
fallback_order = []
|
||||
default_variant = "ps" if platform.system().lower().startswith("win") else "sh"
|
||||
@@ -376,7 +421,8 @@ class CommandRegistrar:
|
||||
agent_script_command = agent_script_command.replace("{ARGS}", "$ARGUMENTS")
|
||||
body = body.replace("{AGENT_SCRIPT}", agent_script_command)
|
||||
|
||||
return body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", "codex")
|
||||
body = body.replace("{ARGS}", "$ARGUMENTS").replace("__AGENT__", agent_name)
|
||||
return CommandRegistrar._rewrite_project_relative_paths(body)
|
||||
|
||||
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
|
||||
"""Convert argument placeholder format.
|
||||
@@ -400,8 +446,9 @@ class CommandRegistrar:
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
short_name = short_name.replace(".", "-")
|
||||
|
||||
return f"speckit.{short_name}" if agent_name == "kimi" else f"speckit-{short_name}"
|
||||
return f"speckit-{short_name}"
|
||||
|
||||
def register_commands(
|
||||
self,
|
||||
|
||||
@@ -25,6 +25,49 @@ import yaml
|
||||
from packaging import version as pkg_version
|
||||
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
||||
|
||||
_FALLBACK_CORE_COMMAND_NAMES = frozenset({
|
||||
"analyze",
|
||||
"checklist",
|
||||
"clarify",
|
||||
"constitution",
|
||||
"implement",
|
||||
"plan",
|
||||
"specify",
|
||||
"tasks",
|
||||
"taskstoissues",
|
||||
})
|
||||
EXTENSION_COMMAND_NAME_PATTERN = re.compile(r"^speckit\.([a-z0-9-]+)\.([a-z0-9-]+)$")
|
||||
|
||||
|
||||
def _load_core_command_names() -> frozenset[str]:
|
||||
"""Discover bundled core command names from the packaged templates.
|
||||
|
||||
Prefer the wheel-time ``core_pack`` bundle when present, and fall back to
|
||||
the source checkout when running from the repository. If neither is
|
||||
available, use the baked-in fallback set so validation still works.
|
||||
"""
|
||||
candidate_dirs = [
|
||||
Path(__file__).parent / "core_pack" / "commands",
|
||||
Path(__file__).resolve().parent.parent.parent / "templates" / "commands",
|
||||
]
|
||||
|
||||
for commands_dir in candidate_dirs:
|
||||
if not commands_dir.is_dir():
|
||||
continue
|
||||
|
||||
command_names = {
|
||||
command_file.stem
|
||||
for command_file in commands_dir.iterdir()
|
||||
if command_file.is_file() and command_file.suffix == ".md"
|
||||
}
|
||||
if command_names:
|
||||
return frozenset(command_names)
|
||||
|
||||
return _FALLBACK_CORE_COMMAND_NAMES
|
||||
|
||||
|
||||
CORE_COMMAND_NAMES = _load_core_command_names()
|
||||
|
||||
|
||||
class ExtensionError(Exception):
|
||||
"""Base exception for extension-related errors."""
|
||||
@@ -149,7 +192,7 @@ class ExtensionManifest:
|
||||
raise ValidationError("Command missing 'name' or 'file'")
|
||||
|
||||
# Validate command name format
|
||||
if not re.match(r'^speckit\.[a-z0-9-]+\.[a-z0-9-]+$', cmd["name"]):
|
||||
if EXTENSION_COMMAND_NAME_PATTERN.match(cmd["name"]) is None:
|
||||
raise ValidationError(
|
||||
f"Invalid command name '{cmd['name']}': "
|
||||
"must follow pattern 'speckit.{extension}.{command}'"
|
||||
@@ -446,6 +489,126 @@ class ExtensionManager:
|
||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||
self.registry = ExtensionRegistry(self.extensions_dir)
|
||||
|
||||
@staticmethod
|
||||
def _collect_manifest_command_names(manifest: ExtensionManifest) -> Dict[str, str]:
|
||||
"""Collect command and alias names declared by a manifest.
|
||||
|
||||
Performs install-time validation for extension-specific constraints:
|
||||
- commands and aliases must use the canonical `speckit.{extension}.{command}` shape
|
||||
- commands and aliases must use this extension's namespace
|
||||
- command namespaces must not shadow core commands
|
||||
- duplicate command/alias names inside one manifest are rejected
|
||||
|
||||
Args:
|
||||
manifest: Parsed extension manifest
|
||||
|
||||
Returns:
|
||||
Mapping of declared command/alias name -> kind ("command"/"alias")
|
||||
|
||||
Raises:
|
||||
ValidationError: If any declared name is invalid
|
||||
"""
|
||||
if manifest.id in CORE_COMMAND_NAMES:
|
||||
raise ValidationError(
|
||||
f"Extension ID '{manifest.id}' conflicts with core command namespace '{manifest.id}'"
|
||||
)
|
||||
|
||||
declared_names: Dict[str, str] = {}
|
||||
|
||||
for cmd in manifest.commands:
|
||||
primary_name = cmd["name"]
|
||||
aliases = cmd.get("aliases", [])
|
||||
|
||||
if aliases is None:
|
||||
aliases = []
|
||||
if not isinstance(aliases, list):
|
||||
raise ValidationError(
|
||||
f"Aliases for command '{primary_name}' must be a list"
|
||||
)
|
||||
|
||||
for kind, name in [("command", primary_name)] + [
|
||||
("alias", alias) for alias in aliases
|
||||
]:
|
||||
if not isinstance(name, str):
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} for command '{primary_name}' must be a string"
|
||||
)
|
||||
|
||||
match = EXTENSION_COMMAND_NAME_PATTERN.match(name)
|
||||
if match is None:
|
||||
raise ValidationError(
|
||||
f"Invalid {kind} '{name}': "
|
||||
"must follow pattern 'speckit.{extension}.{command}'"
|
||||
)
|
||||
|
||||
namespace = match.group(1)
|
||||
if namespace != manifest.id:
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} '{name}' must use extension namespace '{manifest.id}'"
|
||||
)
|
||||
|
||||
if namespace in CORE_COMMAND_NAMES:
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} '{name}' conflicts with core command namespace '{namespace}'"
|
||||
)
|
||||
|
||||
if name in declared_names:
|
||||
raise ValidationError(
|
||||
f"Duplicate command or alias '{name}' in extension manifest"
|
||||
)
|
||||
|
||||
declared_names[name] = kind
|
||||
|
||||
return declared_names
|
||||
|
||||
def _get_installed_command_name_map(
|
||||
self,
|
||||
exclude_extension_id: Optional[str] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Return registered command and alias names for installed extensions."""
|
||||
installed_names: Dict[str, str] = {}
|
||||
|
||||
for ext_id in self.registry.keys():
|
||||
if ext_id == exclude_extension_id:
|
||||
continue
|
||||
|
||||
manifest = self.get_extension(ext_id)
|
||||
if manifest is None:
|
||||
continue
|
||||
|
||||
for cmd in manifest.commands:
|
||||
cmd_name = cmd.get("name")
|
||||
if isinstance(cmd_name, str):
|
||||
installed_names.setdefault(cmd_name, ext_id)
|
||||
|
||||
aliases = cmd.get("aliases", [])
|
||||
if not isinstance(aliases, list):
|
||||
continue
|
||||
|
||||
for alias in aliases:
|
||||
if isinstance(alias, str):
|
||||
installed_names.setdefault(alias, ext_id)
|
||||
|
||||
return installed_names
|
||||
|
||||
def _validate_install_conflicts(self, manifest: ExtensionManifest) -> None:
|
||||
"""Reject installs that would shadow core or installed extension commands."""
|
||||
declared_names = self._collect_manifest_command_names(manifest)
|
||||
installed_names = self._get_installed_command_name_map(
|
||||
exclude_extension_id=manifest.id
|
||||
)
|
||||
|
||||
collisions = [
|
||||
f"{name} (already provided by extension '{installed_names[name]}')"
|
||||
for name in sorted(declared_names)
|
||||
if name in installed_names
|
||||
]
|
||||
if collisions:
|
||||
raise ValidationError(
|
||||
"Extension commands conflict with installed extensions:\n- "
|
||||
+ "\n- ".join(collisions)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
|
||||
"""Load .extensionignore and return an ignore function for shutil.copytree.
|
||||
@@ -510,6 +673,283 @@ class ExtensionManager:
|
||||
|
||||
return _ignore
|
||||
|
||||
def _get_skills_dir(self) -> Optional[Path]:
|
||||
"""Return the active skills directory for extension skill registration.
|
||||
|
||||
Reads ``.specify/init-options.json`` to determine whether skills
|
||||
are enabled and which agent was selected, then delegates to
|
||||
the module-level ``_get_skills_dir()`` helper for the concrete path.
|
||||
|
||||
Kimi is treated as a native-skills agent: if ``ai == "kimi"`` and
|
||||
``.kimi/skills`` exists, extension installs should still propagate
|
||||
command skills even when ``ai_skills`` is false.
|
||||
|
||||
Returns:
|
||||
The skills directory ``Path``, or ``None`` if skills were not
|
||||
enabled and no native-skills fallback applies.
|
||||
"""
|
||||
from . import load_init_options, _get_skills_dir as resolve_skills_dir
|
||||
|
||||
opts = load_init_options(self.project_root)
|
||||
if not isinstance(opts, dict):
|
||||
opts = {}
|
||||
|
||||
agent = opts.get("ai")
|
||||
if not isinstance(agent, str) or not agent:
|
||||
return None
|
||||
|
||||
ai_skills_enabled = bool(opts.get("ai_skills"))
|
||||
if not ai_skills_enabled and agent != "kimi":
|
||||
return None
|
||||
|
||||
skills_dir = resolve_skills_dir(self.project_root, agent)
|
||||
if not skills_dir.is_dir():
|
||||
return None
|
||||
|
||||
return skills_dir
|
||||
|
||||
def _register_extension_skills(
|
||||
self,
|
||||
manifest: ExtensionManifest,
|
||||
extension_dir: Path,
|
||||
) -> List[str]:
|
||||
"""Generate SKILL.md files for extension commands as agent skills.
|
||||
|
||||
For every command in the extension manifest, creates a SKILL.md
|
||||
file in the agent's skills directory following the agentskills.io
|
||||
specification. This is only done when ``--ai-skills`` was used
|
||||
during project initialisation.
|
||||
|
||||
Args:
|
||||
manifest: Extension manifest.
|
||||
extension_dir: Installed extension directory.
|
||||
|
||||
Returns:
|
||||
List of skill names that were created (for registry storage).
|
||||
"""
|
||||
skills_dir = self._get_skills_dir()
|
||||
if not skills_dir:
|
||||
return []
|
||||
|
||||
from . import load_init_options
|
||||
from .agents import CommandRegistrar
|
||||
import yaml
|
||||
|
||||
written: List[str] = []
|
||||
opts = load_init_options(self.project_root)
|
||||
if not isinstance(opts, dict):
|
||||
opts = {}
|
||||
selected_ai = opts.get("ai")
|
||||
if not isinstance(selected_ai, str) or not selected_ai:
|
||||
return []
|
||||
registrar = CommandRegistrar()
|
||||
|
||||
for cmd_info in manifest.commands:
|
||||
cmd_name = cmd_info["name"]
|
||||
cmd_file_rel = cmd_info["file"]
|
||||
|
||||
# Guard against path traversal: reject absolute paths and ensure
|
||||
# the resolved file stays within the extension directory.
|
||||
cmd_path = Path(cmd_file_rel)
|
||||
if cmd_path.is_absolute():
|
||||
continue
|
||||
try:
|
||||
ext_root = extension_dir.resolve()
|
||||
source_file = (ext_root / cmd_path).resolve()
|
||||
source_file.relative_to(ext_root) # raises ValueError if outside
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
|
||||
if not source_file.is_file():
|
||||
continue
|
||||
|
||||
# Derive skill name from command name using the same hyphenated
|
||||
# convention as hook rendering and preset skill registration.
|
||||
short_name_raw = cmd_name
|
||||
if short_name_raw.startswith("speckit."):
|
||||
short_name_raw = short_name_raw[len("speckit."):]
|
||||
skill_name = f"speckit-{short_name_raw.replace('.', '-')}"
|
||||
|
||||
# Check if skill already exists before creating the directory
|
||||
skill_subdir = skills_dir / skill_name
|
||||
skill_file = skill_subdir / "SKILL.md"
|
||||
if skill_file.exists():
|
||||
# Do not overwrite user-customized skills
|
||||
continue
|
||||
|
||||
# Create skill directory; track whether we created it so we can clean
|
||||
# up safely if reading the source file subsequently fails.
|
||||
created_now = not skill_subdir.exists()
|
||||
skill_subdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Parse the command file — guard against IsADirectoryError / decode errors
|
||||
try:
|
||||
content = source_file.read_text(encoding="utf-8")
|
||||
except (OSError, UnicodeDecodeError):
|
||||
if created_now:
|
||||
try:
|
||||
skill_subdir.rmdir() # undo the mkdir; dir is empty at this point
|
||||
except OSError:
|
||||
pass # best-effort cleanup
|
||||
continue
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
frontmatter = registrar._adjust_script_paths(frontmatter)
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
original_desc = frontmatter.get("description", "")
|
||||
description = original_desc or f"Extension command: {cmd_name}"
|
||||
|
||||
frontmatter_data = {
|
||||
"name": skill_name,
|
||||
"description": description,
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": f"extension:{manifest.id}",
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
|
||||
# Derive a human-friendly title from the command name
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
title_name = short_name.replace(".", " ").replace("-", " ").title()
|
||||
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# {title_name} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
written.append(skill_name)
|
||||
|
||||
return written
|
||||
|
||||
def _unregister_extension_skills(self, skill_names: List[str], extension_id: str) -> None:
|
||||
"""Remove SKILL.md directories for extension skills.
|
||||
|
||||
Called during extension removal to clean up skill files that
|
||||
were created by ``_register_extension_skills()``.
|
||||
|
||||
If ``_get_skills_dir()`` returns ``None`` (e.g. the user removed
|
||||
init-options.json or toggled ai_skills after installation), we
|
||||
fall back to scanning all known agent skills directories so that
|
||||
orphaned skill directories are still cleaned up. In that case
|
||||
each candidate directory is verified against the SKILL.md
|
||||
``metadata.source`` field before removal to avoid accidentally
|
||||
deleting user-created skills with the same name.
|
||||
|
||||
Args:
|
||||
skill_names: List of skill names to remove.
|
||||
extension_id: Extension ID used to verify ownership during
|
||||
fallback candidate scanning.
|
||||
"""
|
||||
if not skill_names:
|
||||
return
|
||||
|
||||
skills_dir = self._get_skills_dir()
|
||||
|
||||
if skills_dir:
|
||||
# Fast path: we know the exact skills directory
|
||||
for skill_name in skill_names:
|
||||
# Guard against path traversal from a corrupted registry entry:
|
||||
# reject names that are absolute, contain path separators, or
|
||||
# resolve to a path outside the skills directory.
|
||||
sn_path = Path(skill_name)
|
||||
if sn_path.is_absolute() or len(sn_path.parts) != 1:
|
||||
continue
|
||||
try:
|
||||
skill_subdir = (skills_dir / skill_name).resolve()
|
||||
skill_subdir.relative_to(skills_dir.resolve()) # raises if outside
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
if not skill_subdir.is_dir():
|
||||
continue
|
||||
# Safety check: only delete if SKILL.md exists and its
|
||||
# metadata.source matches exactly this extension — mirroring
|
||||
# the fallback branch — so a corrupted registry entry cannot
|
||||
# delete an unrelated user skill.
|
||||
skill_md = skill_subdir / "SKILL.md"
|
||||
if not skill_md.is_file():
|
||||
continue
|
||||
try:
|
||||
import yaml as _yaml
|
||||
raw = skill_md.read_text(encoding="utf-8")
|
||||
source = ""
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
fm = _yaml.safe_load(parts[1]) or {}
|
||||
source = (
|
||||
fm.get("metadata", {}).get("source", "")
|
||||
if isinstance(fm, dict)
|
||||
else ""
|
||||
)
|
||||
if source != f"extension:{extension_id}":
|
||||
continue
|
||||
except (OSError, UnicodeDecodeError, Exception):
|
||||
continue
|
||||
shutil.rmtree(skill_subdir)
|
||||
else:
|
||||
# Fallback: scan all possible agent skills directories
|
||||
from . import AGENT_CONFIG, DEFAULT_SKILLS_DIR
|
||||
|
||||
candidate_dirs: set[Path] = set()
|
||||
for cfg in AGENT_CONFIG.values():
|
||||
folder = cfg.get("folder", "")
|
||||
if folder:
|
||||
candidate_dirs.add(self.project_root / folder.rstrip("/") / "skills")
|
||||
candidate_dirs.add(self.project_root / DEFAULT_SKILLS_DIR)
|
||||
|
||||
for skills_candidate in candidate_dirs:
|
||||
if not skills_candidate.is_dir():
|
||||
continue
|
||||
for skill_name in skill_names:
|
||||
# Same path-traversal guard as the fast path above
|
||||
sn_path = Path(skill_name)
|
||||
if sn_path.is_absolute() or len(sn_path.parts) != 1:
|
||||
continue
|
||||
try:
|
||||
skill_subdir = (skills_candidate / skill_name).resolve()
|
||||
skill_subdir.relative_to(skills_candidate.resolve()) # raises if outside
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
if not skill_subdir.is_dir():
|
||||
continue
|
||||
# Safety check: only delete if SKILL.md exists and its
|
||||
# metadata.source matches exactly this extension. If the
|
||||
# file is missing or unreadable we skip to avoid deleting
|
||||
# unrelated user-created directories.
|
||||
skill_md = skill_subdir / "SKILL.md"
|
||||
if not skill_md.is_file():
|
||||
continue
|
||||
try:
|
||||
import yaml as _yaml
|
||||
raw = skill_md.read_text(encoding="utf-8")
|
||||
source = ""
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
fm = _yaml.safe_load(parts[1]) or {}
|
||||
source = (
|
||||
fm.get("metadata", {}).get("source", "")
|
||||
if isinstance(fm, dict)
|
||||
else ""
|
||||
)
|
||||
# Only remove skills explicitly created by this extension
|
||||
if source != f"extension:{extension_id}":
|
||||
continue
|
||||
except (OSError, UnicodeDecodeError, Exception):
|
||||
# If we can't verify, skip to avoid accidental deletion
|
||||
continue
|
||||
shutil.rmtree(skill_subdir)
|
||||
|
||||
def check_compatibility(
|
||||
self,
|
||||
manifest: ExtensionManifest,
|
||||
@@ -584,6 +1024,9 @@ class ExtensionManager:
|
||||
f"Use 'specify extension remove {manifest.id}' first."
|
||||
)
|
||||
|
||||
# Reject manifests that would shadow core commands or installed extensions.
|
||||
self._validate_install_conflicts(manifest)
|
||||
|
||||
# Install extension
|
||||
dest_dir = self.extensions_dir / manifest.id
|
||||
if dest_dir.exists():
|
||||
@@ -601,6 +1044,10 @@ class ExtensionManager:
|
||||
manifest, dest_dir, self.project_root
|
||||
)
|
||||
|
||||
# Auto-register extension commands as agent skills when --ai-skills
|
||||
# was used during project initialisation (feature parity).
|
||||
registered_skills = self._register_extension_skills(manifest, dest_dir)
|
||||
|
||||
# Register hooks
|
||||
hook_executor = HookExecutor(self.project_root)
|
||||
hook_executor.register_hooks(manifest)
|
||||
@@ -612,7 +1059,8 @@ class ExtensionManager:
|
||||
"manifest_hash": manifest.get_hash(),
|
||||
"enabled": True,
|
||||
"priority": priority,
|
||||
"registered_commands": registered_commands
|
||||
"registered_commands": registered_commands,
|
||||
"registered_skills": registered_skills,
|
||||
})
|
||||
|
||||
return manifest
|
||||
@@ -690,9 +1138,15 @@ class ExtensionManager:
|
||||
if not self.registry.is_installed(extension_id):
|
||||
return False
|
||||
|
||||
# Get registered commands before removal
|
||||
# Get registered commands and skills before removal
|
||||
metadata = self.registry.get(extension_id)
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
raw_skills = metadata.get("registered_skills", []) if metadata else []
|
||||
# Normalize: must be a list of plain strings to avoid corrupted-registry errors
|
||||
if isinstance(raw_skills, list):
|
||||
registered_skills = [s for s in raw_skills if isinstance(s, str)]
|
||||
else:
|
||||
registered_skills = []
|
||||
|
||||
extension_dir = self.extensions_dir / extension_id
|
||||
|
||||
@@ -701,6 +1155,9 @@ class ExtensionManager:
|
||||
registrar = CommandRegistrar()
|
||||
registrar.unregister_commands(registered_commands, self.project_root)
|
||||
|
||||
# Unregister agent skills
|
||||
self._unregister_extension_skills(registered_skills, extension_id)
|
||||
|
||||
if keep_config:
|
||||
# Preserve config files, only remove non-config files
|
||||
if extension_dir.exists():
|
||||
@@ -1644,6 +2101,52 @@ class HookExecutor:
|
||||
self.project_root = project_root
|
||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||
self.config_file = project_root / ".specify" / "extensions.yml"
|
||||
self._init_options_cache: Optional[Dict[str, Any]] = None
|
||||
|
||||
def _load_init_options(self) -> Dict[str, Any]:
|
||||
"""Load persisted init options used to determine invocation style.
|
||||
|
||||
Uses the shared helper from specify_cli and caches values per executor
|
||||
instance to avoid repeated filesystem reads during hook rendering.
|
||||
"""
|
||||
if self._init_options_cache is None:
|
||||
from . import load_init_options
|
||||
|
||||
payload = load_init_options(self.project_root)
|
||||
self._init_options_cache = payload if isinstance(payload, dict) else {}
|
||||
return self._init_options_cache
|
||||
|
||||
@staticmethod
|
||||
def _skill_name_from_command(command: Any) -> str:
|
||||
"""Map a command id like speckit.plan to speckit-plan skill name."""
|
||||
if not isinstance(command, str):
|
||||
return ""
|
||||
command_id = command.strip()
|
||||
if not command_id.startswith("speckit."):
|
||||
return ""
|
||||
return f"speckit-{command_id[len('speckit.'):].replace('.', '-')}"
|
||||
|
||||
def _render_hook_invocation(self, command: Any) -> str:
|
||||
"""Render an agent-specific invocation string for a hook command."""
|
||||
if not isinstance(command, str):
|
||||
return ""
|
||||
|
||||
command_id = command.strip()
|
||||
if not command_id:
|
||||
return ""
|
||||
|
||||
init_options = self._load_init_options()
|
||||
selected_ai = init_options.get("ai")
|
||||
codex_skill_mode = selected_ai == "codex" and bool(init_options.get("ai_skills"))
|
||||
kimi_skill_mode = selected_ai == "kimi"
|
||||
|
||||
skill_name = self._skill_name_from_command(command_id)
|
||||
if codex_skill_mode and skill_name:
|
||||
return f"${skill_name}"
|
||||
if kimi_skill_mode and skill_name:
|
||||
return f"/skill:{skill_name}"
|
||||
|
||||
return f"/{command_id}"
|
||||
|
||||
def get_project_config(self) -> Dict[str, Any]:
|
||||
"""Load project-level extension configuration.
|
||||
@@ -1887,21 +2390,27 @@ class HookExecutor:
|
||||
for hook in hooks:
|
||||
extension = hook.get("extension")
|
||||
command = hook.get("command")
|
||||
invocation = self._render_hook_invocation(command)
|
||||
command_text = command if isinstance(command, str) and command.strip() else "<missing command>"
|
||||
display_invocation = invocation or (
|
||||
f"/{command_text}" if command_text != "<missing command>" else "/<missing command>"
|
||||
)
|
||||
optional = hook.get("optional", True)
|
||||
prompt = hook.get("prompt", "")
|
||||
description = hook.get("description", "")
|
||||
|
||||
if optional:
|
||||
lines.append(f"\n**Optional Hook**: {extension}")
|
||||
lines.append(f"Command: `/{command}`")
|
||||
lines.append(f"Command: `{display_invocation}`")
|
||||
if description:
|
||||
lines.append(f"Description: {description}")
|
||||
lines.append(f"\nPrompt: {prompt}")
|
||||
lines.append(f"To execute: `/{command}`")
|
||||
lines.append(f"To execute: `{display_invocation}`")
|
||||
else:
|
||||
lines.append(f"\n**Automatic Hook**: {extension}")
|
||||
lines.append(f"Executing: `/{command}`")
|
||||
lines.append(f"EXECUTE_COMMAND: {command}")
|
||||
lines.append(f"Executing: `{display_invocation}`")
|
||||
lines.append(f"EXECUTE_COMMAND: {command_text}")
|
||||
lines.append(f"EXECUTE_COMMAND_INVOCATION: {display_invocation}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@@ -1965,6 +2474,7 @@ class HookExecutor:
|
||||
"""
|
||||
return {
|
||||
"command": hook.get("command"),
|
||||
"invocation": self._render_hook_invocation(hook.get("command")),
|
||||
"extension": hook.get("extension"),
|
||||
"optional": hook.get("optional", True),
|
||||
"description": hook.get("description", ""),
|
||||
@@ -2008,4 +2518,3 @@ class HookExecutor:
|
||||
hook["enabled"] = False
|
||||
|
||||
self.save_project_config(config)
|
||||
|
||||
|
||||
34
src/specify_cli/integrations/__init__.py
Normal file
34
src/specify_cli/integrations/__init__.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Integration registry for AI coding assistants.
|
||||
|
||||
Each integration is a self-contained subpackage that handles setup/teardown
|
||||
for a specific AI assistant (Copilot, Claude, Gemini, etc.).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .base import IntegrationBase
|
||||
|
||||
# Maps integration key → IntegrationBase instance.
|
||||
# Populated by later stages as integrations are migrated.
|
||||
INTEGRATION_REGISTRY: dict[str, IntegrationBase] = {}
|
||||
|
||||
|
||||
def _register(integration: IntegrationBase) -> None:
|
||||
"""Register an integration instance in the global registry.
|
||||
|
||||
Raises ``ValueError`` for falsy keys and ``KeyError`` for duplicates.
|
||||
"""
|
||||
key = integration.key
|
||||
if not key:
|
||||
raise ValueError("Cannot register integration with an empty key.")
|
||||
if key in INTEGRATION_REGISTRY:
|
||||
raise KeyError(f"Integration with key {key!r} is already registered.")
|
||||
INTEGRATION_REGISTRY[key] = integration
|
||||
|
||||
|
||||
def get_integration(key: str) -> IntegrationBase | None:
|
||||
"""Return the integration for *key*, or ``None`` if not registered."""
|
||||
return INTEGRATION_REGISTRY.get(key)
|
||||
215
src/specify_cli/integrations/base.py
Normal file
215
src/specify_cli/integrations/base.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""Base classes for AI-assistant integrations.
|
||||
|
||||
Provides:
|
||||
- ``IntegrationOption`` — declares a CLI option an integration accepts.
|
||||
- ``IntegrationBase`` — abstract base every integration must implement.
|
||||
- ``MarkdownIntegration`` — concrete base for standard Markdown-format
|
||||
integrations (the common case — subclass, set three class attrs, done).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import shutil
|
||||
from abc import ABC
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manifest import IntegrationManifest
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# IntegrationOption
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class IntegrationOption:
|
||||
"""Declares an option that an integration accepts via ``--integration-options``.
|
||||
|
||||
Attributes:
|
||||
name: The flag name (e.g. ``"--commands-dir"``).
|
||||
is_flag: ``True`` for boolean flags (``--skills``).
|
||||
required: ``True`` if the option must be supplied.
|
||||
default: Default value when not supplied (``None`` → no default).
|
||||
help: One-line description shown in ``specify integrate info``.
|
||||
"""
|
||||
|
||||
name: str
|
||||
is_flag: bool = False
|
||||
required: bool = False
|
||||
default: Any = None
|
||||
help: str = ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# IntegrationBase — abstract base class
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class IntegrationBase(ABC):
|
||||
"""Abstract base class every integration must implement.
|
||||
|
||||
Subclasses must set the following class-level attributes:
|
||||
|
||||
* ``key`` — unique identifier, matches actual CLI tool name
|
||||
* ``config`` — dict compatible with ``AGENT_CONFIG`` entries
|
||||
* ``registrar_config`` — dict compatible with ``CommandRegistrar.AGENT_CONFIGS``
|
||||
|
||||
And may optionally set:
|
||||
|
||||
* ``context_file`` — path (relative to project root) of the agent
|
||||
context/instructions file (e.g. ``"CLAUDE.md"``)
|
||||
"""
|
||||
|
||||
# -- Must be set by every subclass ------------------------------------
|
||||
|
||||
key: str = ""
|
||||
"""Unique integration key — should match the actual CLI tool name."""
|
||||
|
||||
config: dict[str, Any] | None = None
|
||||
"""Metadata dict matching the ``AGENT_CONFIG`` shape."""
|
||||
|
||||
registrar_config: dict[str, Any] | None = None
|
||||
"""Registration dict matching ``CommandRegistrar.AGENT_CONFIGS`` shape."""
|
||||
|
||||
# -- Optional ---------------------------------------------------------
|
||||
|
||||
context_file: str | None = None
|
||||
"""Relative path to the agent context file (e.g. ``CLAUDE.md``)."""
|
||||
|
||||
# -- Public API -------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def options(cls) -> list[IntegrationOption]:
|
||||
"""Return options this integration accepts. Default: none."""
|
||||
return []
|
||||
|
||||
def templates_dir(self) -> Path:
|
||||
"""Return the path to this integration's bundled templates.
|
||||
|
||||
By convention, templates live in a ``templates/`` subdirectory
|
||||
next to the file where the integration class is defined.
|
||||
"""
|
||||
import inspect
|
||||
|
||||
module_file = inspect.getfile(type(self))
|
||||
return Path(module_file).resolve().parent / "templates"
|
||||
|
||||
def setup(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""Install integration files into *project_root*.
|
||||
|
||||
Returns the list of files created. The default implementation
|
||||
copies every file from ``templates_dir()`` into the commands
|
||||
directory derived from ``config``, recording each in *manifest*.
|
||||
"""
|
||||
created: list[Path] = []
|
||||
tpl_dir = self.templates_dir()
|
||||
if not tpl_dir.is_dir():
|
||||
return created
|
||||
|
||||
if not self.config:
|
||||
raise ValueError(
|
||||
f"{type(self).__name__}.config is not set; integration "
|
||||
"subclasses must define a non-empty 'config' mapping."
|
||||
)
|
||||
folder = self.config.get("folder")
|
||||
if not folder:
|
||||
raise ValueError(
|
||||
f"{type(self).__name__}.config is missing required 'folder' entry."
|
||||
)
|
||||
|
||||
project_root_resolved = project_root.resolve()
|
||||
if manifest.project_root != project_root_resolved:
|
||||
raise ValueError(
|
||||
f"manifest.project_root ({manifest.project_root}) does not match "
|
||||
f"project_root ({project_root_resolved})"
|
||||
)
|
||||
subdir = self.config.get("commands_subdir", "commands")
|
||||
dest = (project_root / folder / subdir).resolve()
|
||||
# Ensure destination stays within the project root
|
||||
try:
|
||||
dest.relative_to(project_root_resolved)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f"Integration destination {dest} escapes "
|
||||
f"project root {project_root_resolved}"
|
||||
) from exc
|
||||
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for src_file in sorted(tpl_dir.iterdir()):
|
||||
if src_file.is_file():
|
||||
dst_file = dest / src_file.name
|
||||
dst_resolved = dst_file.resolve()
|
||||
rel = dst_resolved.relative_to(project_root_resolved)
|
||||
shutil.copy2(src_file, dst_file)
|
||||
manifest.record_existing(rel)
|
||||
created.append(dst_file)
|
||||
|
||||
return created
|
||||
|
||||
def teardown(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""Uninstall integration files from *project_root*.
|
||||
|
||||
Delegates to ``manifest.uninstall()`` which only removes files
|
||||
whose hash still matches the recorded value (unless *force*).
|
||||
|
||||
Returns ``(removed, skipped)`` file lists.
|
||||
"""
|
||||
return manifest.uninstall(project_root, force=force)
|
||||
|
||||
# -- Convenience helpers for subclasses -------------------------------
|
||||
|
||||
def install(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""High-level install — calls ``setup()`` and returns created files."""
|
||||
return self.setup(
|
||||
project_root, manifest, parsed_options=parsed_options, **opts
|
||||
)
|
||||
|
||||
def uninstall(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""High-level uninstall — calls ``teardown()``."""
|
||||
return self.teardown(project_root, manifest, force=force)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MarkdownIntegration — covers ~20 standard agents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class MarkdownIntegration(IntegrationBase):
|
||||
"""Concrete base for integrations that use standard Markdown commands.
|
||||
|
||||
Subclasses only need to set ``key``, ``config``, ``registrar_config``
|
||||
(and optionally ``context_file``). Everything else is inherited.
|
||||
|
||||
The default ``setup()`` from ``IntegrationBase`` copies templates
|
||||
into the agent's commands directory — which is correct for the
|
||||
standard Markdown case.
|
||||
"""
|
||||
|
||||
# MarkdownIntegration inherits IntegrationBase.setup() as-is.
|
||||
# Future stages may add markdown-specific path rewriting here.
|
||||
pass
|
||||
265
src/specify_cli/integrations/manifest.py
Normal file
265
src/specify_cli/integrations/manifest.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""Hash-tracked installation manifest for integrations.
|
||||
|
||||
Each installed integration records the files it created together with
|
||||
their SHA-256 hashes. On uninstall only files whose hash still matches
|
||||
the recorded value are removed — modified files are left in place and
|
||||
reported to the caller.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _sha256(path: Path) -> str:
|
||||
"""Return the hex SHA-256 digest of *path*."""
|
||||
h = hashlib.sha256()
|
||||
with open(path, "rb") as fh:
|
||||
for chunk in iter(lambda: fh.read(8192), b""):
|
||||
h.update(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def _validate_rel_path(rel: Path, root: Path) -> Path:
|
||||
"""Resolve *rel* against *root* and verify it stays within *root*.
|
||||
|
||||
Raises ``ValueError`` if *rel* is absolute, contains ``..`` segments
|
||||
that escape *root*, or otherwise resolves outside the project root.
|
||||
"""
|
||||
if rel.is_absolute():
|
||||
raise ValueError(
|
||||
f"Absolute paths are not allowed in manifests: {rel}"
|
||||
)
|
||||
resolved = (root / rel).resolve()
|
||||
root_resolved = root.resolve()
|
||||
try:
|
||||
resolved.relative_to(root_resolved)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Path {rel} resolves to {resolved} which is outside "
|
||||
f"the project root {root_resolved}"
|
||||
) from None
|
||||
return resolved
|
||||
|
||||
|
||||
class IntegrationManifest:
|
||||
"""Tracks files installed by a single integration.
|
||||
|
||||
Parameters:
|
||||
key: Integration identifier (e.g. ``"copilot"``).
|
||||
project_root: Absolute path to the project directory.
|
||||
version: CLI version string recorded in the manifest.
|
||||
"""
|
||||
|
||||
def __init__(self, key: str, project_root: Path, version: str = "") -> None:
|
||||
self.key = key
|
||||
self.project_root = project_root.resolve()
|
||||
self.version = version
|
||||
self._files: dict[str, str] = {} # rel_path → sha256 hex
|
||||
self._installed_at: str = ""
|
||||
|
||||
# -- Manifest file location -------------------------------------------
|
||||
|
||||
@property
|
||||
def manifest_path(self) -> Path:
|
||||
"""Path to the on-disk manifest JSON."""
|
||||
return self.project_root / ".specify" / "integrations" / f"{self.key}.manifest.json"
|
||||
|
||||
# -- Recording files --------------------------------------------------
|
||||
|
||||
def record_file(self, rel_path: str | Path, content: bytes | str) -> Path:
|
||||
"""Write *content* to *rel_path* (relative to project root) and record its hash.
|
||||
|
||||
Creates parent directories as needed. Returns the absolute path
|
||||
of the written file.
|
||||
|
||||
Raises ``ValueError`` if *rel_path* resolves outside the project root.
|
||||
"""
|
||||
rel = Path(rel_path)
|
||||
abs_path = _validate_rel_path(rel, self.project_root)
|
||||
abs_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
abs_path.write_bytes(content)
|
||||
|
||||
normalized = abs_path.relative_to(self.project_root).as_posix()
|
||||
self._files[normalized] = hashlib.sha256(content).hexdigest()
|
||||
return abs_path
|
||||
|
||||
def record_existing(self, rel_path: str | Path) -> None:
|
||||
"""Record the hash of an already-existing file at *rel_path*.
|
||||
|
||||
Raises ``ValueError`` if *rel_path* resolves outside the project root.
|
||||
"""
|
||||
rel = Path(rel_path)
|
||||
abs_path = _validate_rel_path(rel, self.project_root)
|
||||
normalized = abs_path.relative_to(self.project_root).as_posix()
|
||||
self._files[normalized] = _sha256(abs_path)
|
||||
|
||||
# -- Querying ---------------------------------------------------------
|
||||
|
||||
@property
|
||||
def files(self) -> dict[str, str]:
|
||||
"""Return a copy of the ``{rel_path: sha256}`` mapping."""
|
||||
return dict(self._files)
|
||||
|
||||
def check_modified(self) -> list[str]:
|
||||
"""Return relative paths of tracked files whose content changed on disk."""
|
||||
modified: list[str] = []
|
||||
for rel, expected_hash in self._files.items():
|
||||
rel_path = Path(rel)
|
||||
# Skip paths that are absolute or attempt to escape the project root
|
||||
if rel_path.is_absolute() or ".." in rel_path.parts:
|
||||
continue
|
||||
abs_path = self.project_root / rel_path
|
||||
if not abs_path.exists() and not abs_path.is_symlink():
|
||||
continue
|
||||
# Treat symlinks and non-regular-files as modified
|
||||
if abs_path.is_symlink() or not abs_path.is_file():
|
||||
modified.append(rel)
|
||||
continue
|
||||
if _sha256(abs_path) != expected_hash:
|
||||
modified.append(rel)
|
||||
return modified
|
||||
|
||||
# -- Uninstall --------------------------------------------------------
|
||||
|
||||
def uninstall(
|
||||
self,
|
||||
project_root: Path | None = None,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""Remove tracked files whose hash still matches.
|
||||
|
||||
Parameters:
|
||||
project_root: Override for the project root.
|
||||
force: If ``True``, remove files even if modified.
|
||||
|
||||
Returns:
|
||||
``(removed, skipped)`` — absolute paths.
|
||||
"""
|
||||
root = (project_root or self.project_root).resolve()
|
||||
removed: list[Path] = []
|
||||
skipped: list[Path] = []
|
||||
|
||||
for rel, expected_hash in self._files.items():
|
||||
# Use non-resolved path for deletion so symlinks themselves
|
||||
# are removed, not their targets.
|
||||
path = root / rel
|
||||
# Validate containment lexically (without following symlinks)
|
||||
# by collapsing .. segments via Path resolution on the string parts.
|
||||
try:
|
||||
normed = Path(os.path.normpath(path))
|
||||
normed.relative_to(root)
|
||||
except (ValueError, OSError):
|
||||
continue
|
||||
if not path.exists() and not path.is_symlink():
|
||||
continue
|
||||
# Skip directories — manifest only tracks files
|
||||
if not path.is_file() and not path.is_symlink():
|
||||
skipped.append(path)
|
||||
continue
|
||||
# Never follow symlinks when comparing hashes. Only remove
|
||||
# symlinks when forced, to avoid acting on tampered entries.
|
||||
if path.is_symlink():
|
||||
if not force:
|
||||
skipped.append(path)
|
||||
continue
|
||||
else:
|
||||
if not force and _sha256(path) != expected_hash:
|
||||
skipped.append(path)
|
||||
continue
|
||||
try:
|
||||
path.unlink()
|
||||
except OSError:
|
||||
skipped.append(path)
|
||||
continue
|
||||
removed.append(path)
|
||||
# Clean up empty parent directories up to project root
|
||||
parent = path.parent
|
||||
while parent != root:
|
||||
try:
|
||||
parent.rmdir() # only succeeds if empty
|
||||
except OSError:
|
||||
break
|
||||
parent = parent.parent
|
||||
|
||||
# Remove the manifest file itself
|
||||
manifest = root / ".specify" / "integrations" / f"{self.key}.manifest.json"
|
||||
if manifest.exists():
|
||||
manifest.unlink()
|
||||
parent = manifest.parent
|
||||
while parent != root:
|
||||
try:
|
||||
parent.rmdir()
|
||||
except OSError:
|
||||
break
|
||||
parent = parent.parent
|
||||
|
||||
return removed, skipped
|
||||
|
||||
# -- Persistence ------------------------------------------------------
|
||||
|
||||
def save(self) -> Path:
|
||||
"""Write the manifest to disk. Returns the manifest path."""
|
||||
self._installed_at = self._installed_at or datetime.now(timezone.utc).isoformat()
|
||||
data: dict[str, Any] = {
|
||||
"integration": self.key,
|
||||
"version": self.version,
|
||||
"installed_at": self._installed_at,
|
||||
"files": self._files,
|
||||
}
|
||||
path = self.manifest_path
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
|
||||
return path
|
||||
|
||||
@classmethod
|
||||
def load(cls, key: str, project_root: Path) -> IntegrationManifest:
|
||||
"""Load an existing manifest from disk.
|
||||
|
||||
Raises ``FileNotFoundError`` if the manifest does not exist.
|
||||
"""
|
||||
inst = cls(key, project_root)
|
||||
path = inst.manifest_path
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ValueError(
|
||||
f"Integration manifest at {path} contains invalid JSON"
|
||||
) from exc
|
||||
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(
|
||||
f"Integration manifest at {path} must be a JSON object, "
|
||||
f"got {type(data).__name__}"
|
||||
)
|
||||
|
||||
files = data.get("files", {})
|
||||
if not isinstance(files, dict) or not all(
|
||||
isinstance(k, str) and isinstance(v, str) for k, v in files.items()
|
||||
):
|
||||
raise ValueError(
|
||||
f"Integration manifest 'files' at {path} must be a "
|
||||
"mapping of string paths to string hashes"
|
||||
)
|
||||
|
||||
inst.version = data.get("version", "")
|
||||
inst._installed_at = data.get("installed_at", "")
|
||||
inst._files = files
|
||||
|
||||
stored_key = data.get("integration", "")
|
||||
if stored_key and stored_key != key:
|
||||
raise ValueError(
|
||||
f"Manifest at {path} belongs to integration {stored_key!r}, "
|
||||
f"not {key!r}"
|
||||
)
|
||||
|
||||
return inst
|
||||
@@ -556,24 +556,31 @@ class PresetManager:
|
||||
registrar.unregister_commands(registered_commands, self.project_root)
|
||||
|
||||
def _get_skills_dir(self) -> Optional[Path]:
|
||||
"""Return the skills directory if ``--ai-skills`` was used during init.
|
||||
"""Return the active skills directory for preset skill overrides.
|
||||
|
||||
Reads ``.specify/init-options.json`` to determine whether skills
|
||||
are enabled and which agent was selected, then delegates to
|
||||
the module-level ``_get_skills_dir()`` helper for the concrete path.
|
||||
|
||||
Kimi is treated as a native-skills agent: if ``ai == "kimi"`` and
|
||||
``.kimi/skills`` exists, presets should still propagate command
|
||||
overrides to skills even when ``ai_skills`` is false.
|
||||
|
||||
Returns:
|
||||
The skills directory ``Path``, or ``None`` if skills were not
|
||||
enabled or the init-options file is missing.
|
||||
enabled and no native-skills fallback applies.
|
||||
"""
|
||||
from . import load_init_options, _get_skills_dir
|
||||
|
||||
opts = load_init_options(self.project_root)
|
||||
if not opts.get("ai_skills"):
|
||||
if not isinstance(opts, dict):
|
||||
opts = {}
|
||||
agent = opts.get("ai")
|
||||
if not isinstance(agent, str) or not agent:
|
||||
return None
|
||||
|
||||
agent = opts.get("ai")
|
||||
if not agent:
|
||||
ai_skills_enabled = bool(opts.get("ai_skills"))
|
||||
if not ai_skills_enabled and agent != "kimi":
|
||||
return None
|
||||
|
||||
skills_dir = _get_skills_dir(self.project_root, agent)
|
||||
@@ -582,6 +589,76 @@ class PresetManager:
|
||||
|
||||
return skills_dir
|
||||
|
||||
@staticmethod
|
||||
def _skill_names_for_command(cmd_name: str) -> tuple[str, str]:
|
||||
"""Return the modern and legacy skill directory names for a command."""
|
||||
raw_short_name = cmd_name
|
||||
if raw_short_name.startswith("speckit."):
|
||||
raw_short_name = raw_short_name[len("speckit."):]
|
||||
|
||||
modern_skill_name = f"speckit-{raw_short_name.replace('.', '-')}"
|
||||
legacy_skill_name = f"speckit.{raw_short_name}"
|
||||
return modern_skill_name, legacy_skill_name
|
||||
|
||||
@staticmethod
|
||||
def _skill_title_from_command(cmd_name: str) -> str:
|
||||
"""Return a human-friendly title for a skill command name."""
|
||||
title_name = cmd_name
|
||||
if title_name.startswith("speckit."):
|
||||
title_name = title_name[len("speckit."):]
|
||||
return title_name.replace(".", " ").replace("-", " ").title()
|
||||
|
||||
def _build_extension_skill_restore_index(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Index extension-backed skill restore data by skill directory name."""
|
||||
from .extensions import ExtensionManifest, ValidationError
|
||||
|
||||
resolver = PresetResolver(self.project_root)
|
||||
extensions_dir = self.project_root / ".specify" / "extensions"
|
||||
restore_index: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
for _priority, ext_id, _metadata in resolver._get_all_extensions_by_priority():
|
||||
ext_dir = extensions_dir / ext_id
|
||||
manifest_path = ext_dir / "extension.yml"
|
||||
if not manifest_path.is_file():
|
||||
continue
|
||||
|
||||
try:
|
||||
manifest = ExtensionManifest(manifest_path)
|
||||
except ValidationError:
|
||||
continue
|
||||
|
||||
ext_root = ext_dir.resolve()
|
||||
for cmd_info in manifest.commands:
|
||||
cmd_name = cmd_info.get("name")
|
||||
cmd_file_rel = cmd_info.get("file")
|
||||
if not isinstance(cmd_name, str) or not isinstance(cmd_file_rel, str):
|
||||
continue
|
||||
|
||||
cmd_path = Path(cmd_file_rel)
|
||||
if cmd_path.is_absolute():
|
||||
continue
|
||||
|
||||
try:
|
||||
source_file = (ext_root / cmd_path).resolve()
|
||||
source_file.relative_to(ext_root)
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
|
||||
if not source_file.is_file():
|
||||
continue
|
||||
|
||||
restore_info = {
|
||||
"command_name": cmd_name,
|
||||
"source_file": source_file,
|
||||
"source": f"extension:{manifest.id}",
|
||||
}
|
||||
modern_skill_name, legacy_skill_name = self._skill_names_for_command(cmd_name)
|
||||
restore_index.setdefault(modern_skill_name, restore_info)
|
||||
if legacy_skill_name != modern_skill_name:
|
||||
restore_index.setdefault(legacy_skill_name, restore_info)
|
||||
|
||||
return restore_index
|
||||
|
||||
def _register_skills(
|
||||
self,
|
||||
manifest: "PresetManifest",
|
||||
@@ -629,9 +706,15 @@ class PresetManager:
|
||||
return []
|
||||
|
||||
from . import SKILL_DESCRIPTIONS, load_init_options
|
||||
from .agents import CommandRegistrar
|
||||
|
||||
opts = load_init_options(self.project_root)
|
||||
selected_ai = opts.get("ai", "")
|
||||
init_opts = load_init_options(self.project_root)
|
||||
if not isinstance(init_opts, dict):
|
||||
init_opts = {}
|
||||
selected_ai = init_opts.get("ai")
|
||||
if not isinstance(selected_ai, str):
|
||||
return []
|
||||
registrar = CommandRegistrar()
|
||||
|
||||
written: List[str] = []
|
||||
|
||||
@@ -643,43 +726,42 @@ class PresetManager:
|
||||
continue
|
||||
|
||||
# Derive the short command name (e.g. "specify" from "speckit.specify")
|
||||
short_name = cmd_name
|
||||
if short_name.startswith("speckit."):
|
||||
short_name = short_name[len("speckit."):]
|
||||
if selected_ai == "kimi":
|
||||
skill_name = f"speckit.{short_name}"
|
||||
else:
|
||||
skill_name = f"speckit-{short_name}"
|
||||
raw_short_name = cmd_name
|
||||
if raw_short_name.startswith("speckit."):
|
||||
raw_short_name = raw_short_name[len("speckit."):]
|
||||
short_name = raw_short_name.replace(".", "-")
|
||||
skill_name, legacy_skill_name = self._skill_names_for_command(cmd_name)
|
||||
skill_title = self._skill_title_from_command(cmd_name)
|
||||
|
||||
# Only overwrite if the skill already exists (i.e. --ai-skills was used)
|
||||
skill_subdir = skills_dir / skill_name
|
||||
if not skill_subdir.exists():
|
||||
# Only overwrite skills that already exist under skills_dir,
|
||||
# including Kimi native skills when ai_skills is false.
|
||||
# If both modern and legacy directories exist, update both.
|
||||
target_skill_names: List[str] = []
|
||||
if (skills_dir / skill_name).is_dir():
|
||||
target_skill_names.append(skill_name)
|
||||
if legacy_skill_name != skill_name and (skills_dir / legacy_skill_name).is_dir():
|
||||
target_skill_names.append(legacy_skill_name)
|
||||
if not target_skill_names:
|
||||
continue
|
||||
|
||||
# Parse the command file
|
||||
content = source_file.read_text(encoding="utf-8")
|
||||
if content.startswith("---"):
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
frontmatter = yaml.safe_load(parts[1])
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
body = parts[2].strip()
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
|
||||
original_desc = frontmatter.get("description", "")
|
||||
enhanced_desc = SKILL_DESCRIPTIONS.get(
|
||||
short_name,
|
||||
original_desc or f"Spec-kit workflow command: {short_name}",
|
||||
)
|
||||
frontmatter = dict(frontmatter)
|
||||
frontmatter["description"] = enhanced_desc
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
for target_skill_name in target_skill_names:
|
||||
frontmatter_data = {
|
||||
"name": skill_name,
|
||||
"name": target_skill_name,
|
||||
"description": enhanced_desc,
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
@@ -692,13 +774,13 @@ class PresetManager:
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# Speckit {short_name.title()} Skill\n\n"
|
||||
f"# Speckit {skill_title} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
|
||||
skill_file = skill_subdir / "SKILL.md"
|
||||
skill_file = skills_dir / target_skill_name / "SKILL.md"
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
written.append(skill_name)
|
||||
written.append(target_skill_name)
|
||||
|
||||
return written
|
||||
|
||||
@@ -720,10 +802,17 @@ class PresetManager:
|
||||
if not skills_dir:
|
||||
return
|
||||
|
||||
from . import SKILL_DESCRIPTIONS
|
||||
from . import SKILL_DESCRIPTIONS, load_init_options
|
||||
from .agents import CommandRegistrar
|
||||
|
||||
# Locate core command templates from the project's installed templates
|
||||
core_templates_dir = self.project_root / ".specify" / "templates" / "commands"
|
||||
init_opts = load_init_options(self.project_root)
|
||||
if not isinstance(init_opts, dict):
|
||||
init_opts = {}
|
||||
selected_ai = init_opts.get("ai")
|
||||
registrar = CommandRegistrar()
|
||||
extension_restore_index = self._build_extension_skill_restore_index()
|
||||
|
||||
for skill_name in skill_names:
|
||||
# Derive command name from skill name (speckit-specify -> specify)
|
||||
@@ -735,7 +824,10 @@ class PresetManager:
|
||||
|
||||
skill_subdir = skills_dir / skill_name
|
||||
skill_file = skill_subdir / "SKILL.md"
|
||||
if not skill_file.exists():
|
||||
if not skill_subdir.is_dir():
|
||||
continue
|
||||
if not skill_file.is_file():
|
||||
# Only manage directories that contain the expected skill entrypoint.
|
||||
continue
|
||||
|
||||
# Try to find the core command template
|
||||
@@ -746,19 +838,11 @@ class PresetManager:
|
||||
if core_file:
|
||||
# Restore from core template
|
||||
content = core_file.read_text(encoding="utf-8")
|
||||
if content.startswith("---"):
|
||||
parts = content.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
frontmatter = yaml.safe_load(parts[1])
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
body = parts[2].strip()
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
else:
|
||||
frontmatter = {}
|
||||
body = content
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
if isinstance(selected_ai, str):
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
original_desc = frontmatter.get("description", "")
|
||||
enhanced_desc = SKILL_DESCRIPTIONS.get(
|
||||
@@ -776,16 +860,49 @@ class PresetManager:
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
skill_title = self._skill_title_from_command(short_name)
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# Speckit {short_name.title()} Skill\n\n"
|
||||
f"# Speckit {skill_title} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
continue
|
||||
|
||||
extension_restore = extension_restore_index.get(skill_name)
|
||||
if extension_restore:
|
||||
content = extension_restore["source_file"].read_text(encoding="utf-8")
|
||||
frontmatter, body = registrar.parse_frontmatter(content)
|
||||
if isinstance(selected_ai, str):
|
||||
body = registrar.resolve_skill_placeholders(
|
||||
selected_ai, frontmatter, body, self.project_root
|
||||
)
|
||||
|
||||
command_name = extension_restore["command_name"]
|
||||
title_name = self._skill_title_from_command(command_name)
|
||||
|
||||
frontmatter_data = {
|
||||
"name": skill_name,
|
||||
"description": frontmatter.get("description", f"Extension command: {command_name}"),
|
||||
"compatibility": "Requires spec-kit project structure with .specify/ directory",
|
||||
"metadata": {
|
||||
"author": "github-spec-kit",
|
||||
"source": extension_restore["source"],
|
||||
},
|
||||
}
|
||||
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
|
||||
skill_content = (
|
||||
f"---\n"
|
||||
f"{frontmatter_text}\n"
|
||||
f"---\n\n"
|
||||
f"# {title_name} Skill\n\n"
|
||||
f"{body}\n"
|
||||
)
|
||||
skill_file.write_text(skill_content, encoding="utf-8")
|
||||
else:
|
||||
# No core template — remove the skill entirely
|
||||
# No core or extension template — remove the skill entirely
|
||||
shutil.rmtree(skill_subdir)
|
||||
|
||||
def install_from_directory(
|
||||
@@ -915,17 +1032,26 @@ class PresetManager:
|
||||
if not self.registry.is_installed(pack_id):
|
||||
return False
|
||||
|
||||
# Unregister commands from AI agents
|
||||
metadata = self.registry.get(pack_id)
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
if registered_commands:
|
||||
self._unregister_commands(registered_commands)
|
||||
|
||||
# Restore original skills when preset is removed
|
||||
registered_skills = metadata.get("registered_skills", []) if metadata else []
|
||||
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
|
||||
pack_dir = self.presets_dir / pack_id
|
||||
if registered_skills:
|
||||
self._unregister_skills(registered_skills, pack_dir)
|
||||
try:
|
||||
from . import NATIVE_SKILLS_AGENTS
|
||||
except ImportError:
|
||||
NATIVE_SKILLS_AGENTS = set()
|
||||
registered_commands = {
|
||||
agent_name: cmd_names
|
||||
for agent_name, cmd_names in registered_commands.items()
|
||||
if agent_name not in NATIVE_SKILLS_AGENTS
|
||||
}
|
||||
|
||||
# Unregister non-skill command files from AI agents.
|
||||
if registered_commands:
|
||||
self._unregister_commands(registered_commands)
|
||||
|
||||
if pack_dir.exists():
|
||||
shutil.rmtree(pack_dir)
|
||||
|
||||
@@ -44,7 +44,7 @@ Load only the minimal necessary context from each artifact:
|
||||
|
||||
- Overview/Context
|
||||
- Functional Requirements
|
||||
- Non-Functional Requirements
|
||||
- Success Criteria (measurable outcomes — e.g., performance, security, availability, user success, business impact)
|
||||
- User Stories
|
||||
- Edge Cases (if present)
|
||||
|
||||
@@ -71,7 +71,7 @@ Load only the minimal necessary context from each artifact:
|
||||
|
||||
Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
|
||||
- **Requirements inventory**: For each Functional Requirement (FR-###) and Success Criterion (SC-###), record a stable key. Use the explicit FR-/SC- identifier as the primary key when present, and optionally also derive an imperative-phrase slug for readability (e.g., "User can upload file" → `user-can-upload-file`). Include only Success Criteria items that require buildable work (e.g., load-testing infrastructure, security audit tooling), and exclude post-launch outcome metrics and business KPIs (e.g., "Reduce support tickets by 50%").
|
||||
- **User story/action inventory**: Discrete user actions with acceptance criteria
|
||||
- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
|
||||
- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
|
||||
@@ -105,7 +105,7 @@ Focus on high-signal findings. Limit to 50 findings total; aggregate remainder i
|
||||
|
||||
- Requirements with zero associated tasks
|
||||
- Tasks with no mapped requirement/story
|
||||
- Non-functional requirements not reflected in tasks (e.g., performance, security)
|
||||
- Success Criteria requiring buildable work (performance, security, availability) not reflected in tasks
|
||||
|
||||
#### F. Inconsistency
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ Execution steps:
|
||||
- Functional ambiguity → Update or add a bullet in Functional Requirements.
|
||||
- User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
|
||||
- Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
|
||||
- Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
|
||||
- Non-functional constraint → Add/modify measurable criteria in Success Criteria > Measurable Outcomes (convert vague adjective to metric or explicit target).
|
||||
- Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
|
||||
- Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
|
||||
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
|
||||
|
||||
@@ -113,3 +113,16 @@
|
||||
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
|
||||
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
|
||||
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
|
||||
|
||||
## Assumptions
|
||||
|
||||
<!--
|
||||
ACTION REQUIRED: The content in this section represents placeholders.
|
||||
Fill them out with the right assumptions based on reasonable defaults
|
||||
chosen when the feature description did not specify certain details.
|
||||
-->
|
||||
|
||||
- [Assumption about target users, e.g., "Users have stable internet connectivity"]
|
||||
- [Assumption about scope boundaries, e.g., "Mobile support is out of scope for v1"]
|
||||
- [Assumption about data/environment, e.g., "Existing authentication system will be reused"]
|
||||
- [Dependency on existing system/service, e.g., "Requires access to the existing user profile API"]
|
||||
|
||||
@@ -24,8 +24,8 @@ import specify_cli
|
||||
|
||||
from specify_cli import (
|
||||
_get_skills_dir,
|
||||
_migrate_legacy_kimi_dotted_skills,
|
||||
install_ai_skills,
|
||||
AGENT_SKILLS_DIR_OVERRIDES,
|
||||
DEFAULT_SKILLS_DIR,
|
||||
SKILL_DESCRIPTIONS,
|
||||
AGENT_CONFIG,
|
||||
@@ -169,8 +169,8 @@ class TestGetSkillsDir:
|
||||
result = _get_skills_dir(project_dir, "copilot")
|
||||
assert result == project_dir / ".github" / "skills"
|
||||
|
||||
def test_codex_uses_override(self, project_dir):
|
||||
"""Codex should use the AGENT_SKILLS_DIR_OVERRIDES value."""
|
||||
def test_codex_skills_dir_from_agent_config(self, project_dir):
|
||||
"""Codex should resolve skills directory from AGENT_CONFIG folder."""
|
||||
result = _get_skills_dir(project_dir, "codex")
|
||||
assert result == project_dir / ".agents" / "skills"
|
||||
|
||||
@@ -203,12 +203,71 @@ class TestGetSkillsDir:
|
||||
# Should always end with "skills"
|
||||
assert result.name == "skills"
|
||||
|
||||
def test_override_takes_precedence_over_config(self, project_dir):
|
||||
"""AGENT_SKILLS_DIR_OVERRIDES should take precedence over AGENT_CONFIG."""
|
||||
for agent_key in AGENT_SKILLS_DIR_OVERRIDES:
|
||||
result = _get_skills_dir(project_dir, agent_key)
|
||||
expected = project_dir / AGENT_SKILLS_DIR_OVERRIDES[agent_key]
|
||||
assert result == expected
|
||||
class TestKimiLegacySkillMigration:
|
||||
"""Test temporary migration from Kimi dotted skill names to hyphenated names."""
|
||||
|
||||
def test_migrates_legacy_dotted_skill_directory(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 1
|
||||
assert removed == 0
|
||||
assert not legacy_dir.exists()
|
||||
assert (skills_dir / "speckit-plan" / "SKILL.md").exists()
|
||||
|
||||
def test_removes_legacy_dir_when_hyphenated_target_exists_with_same_content(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
target_dir = skills_dir / "speckit-plan"
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / "SKILL.md").write_text("legacy")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 0
|
||||
assert removed == 1
|
||||
assert not legacy_dir.exists()
|
||||
assert (target_dir / "SKILL.md").read_text() == "legacy"
|
||||
|
||||
def test_keeps_legacy_dir_when_hyphenated_target_differs(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
target_dir = skills_dir / "speckit-plan"
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / "SKILL.md").write_text("new")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 0
|
||||
assert removed == 0
|
||||
assert legacy_dir.exists()
|
||||
assert (legacy_dir / "SKILL.md").read_text() == "legacy"
|
||||
assert (target_dir / "SKILL.md").read_text() == "new"
|
||||
|
||||
def test_keeps_legacy_dir_when_matching_target_but_extra_files_exist(self, project_dir):
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
legacy_dir = skills_dir / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True)
|
||||
(legacy_dir / "SKILL.md").write_text("legacy")
|
||||
(legacy_dir / "notes.txt").write_text("custom")
|
||||
target_dir = skills_dir / "speckit-plan"
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / "SKILL.md").write_text("legacy")
|
||||
|
||||
migrated, removed = _migrate_legacy_kimi_dotted_skills(skills_dir)
|
||||
|
||||
assert migrated == 0
|
||||
assert removed == 0
|
||||
assert legacy_dir.exists()
|
||||
assert (legacy_dir / "notes.txt").read_text() == "custom"
|
||||
|
||||
|
||||
# ===== install_ai_skills Tests =====
|
||||
@@ -473,8 +532,7 @@ class TestInstallAiSkills:
|
||||
skills_dir = _get_skills_dir(proj, agent_key)
|
||||
assert skills_dir.exists()
|
||||
skill_dirs = [d.name for d in skills_dir.iterdir() if d.is_dir()]
|
||||
# Kimi uses dotted skill names; other agents use hyphen-separated names.
|
||||
expected_skill_name = "speckit.specify" if agent_key == "kimi" else "speckit-specify"
|
||||
expected_skill_name = "speckit-specify"
|
||||
assert expected_skill_name in skill_dirs
|
||||
assert (skills_dir / expected_skill_name / "SKILL.md").exists()
|
||||
|
||||
@@ -773,6 +831,32 @@ class TestNewProjectCommandSkip:
|
||||
mock_skills.assert_called_once()
|
||||
assert mock_skills.call_args.kwargs.get("overwrite_existing") is True
|
||||
|
||||
def test_kimi_legacy_migration_runs_without_ai_skills_flag(self, tmp_path):
|
||||
"""Kimi init should migrate dotted legacy skills even when --ai-skills is not set."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
runner = CliRunner()
|
||||
target = tmp_path / "kimi-legacy-no-ai-skills"
|
||||
|
||||
def fake_download(project_path, *args, **kwargs):
|
||||
legacy_dir = project_path / ".kimi" / "skills" / "speckit.plan"
|
||||
legacy_dir.mkdir(parents=True, exist_ok=True)
|
||||
(legacy_dir / "SKILL.md").write_text("---\nname: speckit.plan\n---\n\nlegacy\n")
|
||||
|
||||
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
|
||||
patch("specify_cli.ensure_executable_scripts"), \
|
||||
patch("specify_cli.ensure_constitution_from_template"), \
|
||||
patch("specify_cli.is_git_repo", return_value=False), \
|
||||
patch("specify_cli.shutil.which", return_value="/usr/bin/kimi"):
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["init", str(target), "--ai", "kimi", "--script", "sh", "--no-git"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert not (target / ".kimi" / "skills" / "speckit.plan").exists()
|
||||
assert (target / ".kimi" / "skills" / "speckit-plan" / "SKILL.md").exists()
|
||||
|
||||
def test_codex_ai_skills_here_mode_preserves_existing_codex_dir(self, tmp_path, monkeypatch):
|
||||
"""Codex --here skills init should not delete a pre-existing .codex directory."""
|
||||
from typer.testing import CliRunner
|
||||
@@ -1118,12 +1202,12 @@ class TestCliValidation:
|
||||
assert "Optional skills that you can use for your specs" in result.output
|
||||
|
||||
def test_kimi_next_steps_show_skill_invocation(self, monkeypatch):
|
||||
"""Kimi next-steps guidance should display /skill:speckit.* usage."""
|
||||
"""Kimi next-steps guidance should display /skill:speckit-* usage."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
def _fake_download(*args, **kwargs):
|
||||
project_path = Path(args[0])
|
||||
skill_dir = project_path / ".kimi" / "skills" / "speckit.specify"
|
||||
skill_dir = project_path / ".kimi" / "skills" / "speckit-specify"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text("---\ndescription: Test skill\n---\n\nBody.\n")
|
||||
|
||||
@@ -1137,7 +1221,7 @@ class TestCliValidation:
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "/skill:speckit.constitution" in result.output
|
||||
assert "/skill:speckit-constitution" in result.output
|
||||
assert "/speckit.constitution" not in result.output
|
||||
assert "Optional skills that you can use for your specs" in result.output
|
||||
|
||||
|
||||
96
tests/test_check_tool.py
Normal file
96
tests/test_check_tool.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Tests for check_tool() — Claude Code CLI detection across install methods.
|
||||
|
||||
Covers issue https://github.com/github/spec-kit/issues/550:
|
||||
`specify check` reports "Claude Code CLI (not found)" even when claude is
|
||||
installed via npm-local (the default `claude` installer path).
|
||||
"""
|
||||
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from specify_cli import check_tool
|
||||
|
||||
|
||||
class TestCheckToolClaude:
|
||||
"""Claude CLI detection must work for all install methods."""
|
||||
|
||||
def test_detected_via_migrate_installer_path(self, tmp_path):
|
||||
"""claude migrate-installer puts binary at ~/.claude/local/claude."""
|
||||
fake_claude = tmp_path / "claude"
|
||||
fake_claude.touch()
|
||||
|
||||
# Ensure npm-local path is missing so we only exercise migrate-installer path
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_claude), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_detected_via_npm_local_path(self, tmp_path):
|
||||
"""npm-local install puts binary at ~/.claude/local/node_modules/.bin/claude."""
|
||||
fake_npm_claude = tmp_path / "node_modules" / ".bin" / "claude"
|
||||
fake_npm_claude.parent.mkdir(parents=True)
|
||||
fake_npm_claude.touch()
|
||||
|
||||
# Neither the migrate-installer path nor PATH has claude
|
||||
fake_migrate = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_migrate), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_npm_claude), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_detected_via_path(self, tmp_path):
|
||||
"""claude on PATH (global npm install) should still work."""
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value="/usr/local/bin/claude"):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_not_found_when_nowhere(self, tmp_path):
|
||||
"""Should return False when claude is genuinely not installed."""
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is False
|
||||
|
||||
def test_tracker_updated_on_npm_local_detection(self, tmp_path):
|
||||
"""StepTracker should be marked 'available' for npm-local installs."""
|
||||
fake_npm_claude = tmp_path / "node_modules" / ".bin" / "claude"
|
||||
fake_npm_claude.parent.mkdir(parents=True)
|
||||
fake_npm_claude.touch()
|
||||
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
tracker = MagicMock()
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_npm_claude), \
|
||||
patch("shutil.which", return_value=None):
|
||||
result = check_tool("claude", tracker=tracker)
|
||||
|
||||
assert result is True
|
||||
tracker.complete.assert_called_once_with("claude", "available")
|
||||
|
||||
|
||||
class TestCheckToolOther:
|
||||
"""Non-Claude tools should be unaffected by the fix."""
|
||||
|
||||
def test_git_detected_via_path(self):
|
||||
with patch("shutil.which", return_value="/usr/bin/git"):
|
||||
assert check_tool("git") is True
|
||||
|
||||
def test_missing_tool(self):
|
||||
with patch("shutil.which", return_value=None):
|
||||
assert check_tool("nonexistent-tool") is False
|
||||
|
||||
def test_kiro_fallback(self):
|
||||
"""kiro-cli detection should try both kiro-cli and kiro."""
|
||||
def fake_which(name):
|
||||
return "/usr/bin/kiro" if name == "kiro" else None
|
||||
|
||||
with patch("shutil.which", side_effect=fake_which):
|
||||
assert check_tool("kiro-cli") is True
|
||||
@@ -142,7 +142,7 @@ def _expected_cmd_dir(project_path: Path, agent: str) -> Path:
|
||||
|
||||
# Agents whose commands are laid out as <skills_dir>/<name>/SKILL.md.
|
||||
# Maps agent -> separator used in skill directory names.
|
||||
_SKILL_AGENTS: dict[str, str] = {"codex": "-", "kimi": "."}
|
||||
_SKILL_AGENTS: dict[str, str] = {"codex": "-", "kimi": "-"}
|
||||
|
||||
|
||||
def _expected_ext(agent: str) -> str:
|
||||
|
||||
741
tests/test_extension_skills.py
Normal file
741
tests/test_extension_skills.py
Normal file
@@ -0,0 +1,741 @@
|
||||
"""
|
||||
Unit tests for extension skill auto-registration.
|
||||
|
||||
Tests cover:
|
||||
- SKILL.md generation when --ai-skills was used during init
|
||||
- No skills created when ai_skills not active
|
||||
- SKILL.md content correctness
|
||||
- Existing user-modified skills not overwritten
|
||||
- Skill cleanup on extension removal
|
||||
- Registry metadata includes registered_skills
|
||||
"""
|
||||
|
||||
import json
|
||||
import pytest
|
||||
import tempfile
|
||||
import shutil
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
from specify_cli.extensions import (
|
||||
ExtensionManifest,
|
||||
ExtensionManager,
|
||||
ExtensionError,
|
||||
)
|
||||
|
||||
|
||||
# ===== Helpers =====
|
||||
|
||||
def _create_init_options(project_root: Path, ai: str = "claude", ai_skills: bool = True):
|
||||
"""Write a .specify/init-options.json file."""
|
||||
opts_dir = project_root / ".specify"
|
||||
opts_dir.mkdir(parents=True, exist_ok=True)
|
||||
opts_file = opts_dir / "init-options.json"
|
||||
opts_file.write_text(json.dumps({
|
||||
"ai": ai,
|
||||
"ai_skills": ai_skills,
|
||||
"script": "sh",
|
||||
}))
|
||||
|
||||
|
||||
def _create_skills_dir(project_root: Path, ai: str = "claude") -> Path:
|
||||
"""Create and return the expected skills directory for the given agent."""
|
||||
# Match the logic in _get_skills_dir() from specify_cli
|
||||
from specify_cli import AGENT_CONFIG, DEFAULT_SKILLS_DIR
|
||||
|
||||
agent_config = AGENT_CONFIG.get(ai, {})
|
||||
agent_folder = agent_config.get("folder", "")
|
||||
if agent_folder:
|
||||
skills_dir = project_root / agent_folder.rstrip("/") / "skills"
|
||||
else:
|
||||
skills_dir = project_root / DEFAULT_SKILLS_DIR
|
||||
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
return skills_dir
|
||||
|
||||
|
||||
def _create_extension_dir(temp_dir: Path, ext_id: str = "test-ext") -> Path:
|
||||
"""Create a complete extension directory with manifest and command files."""
|
||||
ext_dir = temp_dir / ext_id
|
||||
ext_dir.mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": ext_id,
|
||||
"name": "Test Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "A test extension for skill registration",
|
||||
},
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0",
|
||||
},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": f"speckit.{ext_id}.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test hello command",
|
||||
},
|
||||
{
|
||||
"name": f"speckit.{ext_id}.world",
|
||||
"file": "commands/world.md",
|
||||
"description": "Test world command",
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
commands_dir = ext_dir / "commands"
|
||||
commands_dir.mkdir()
|
||||
|
||||
(commands_dir / "hello.md").write_text(
|
||||
"---\n"
|
||||
"description: \"Test hello command\"\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"# Hello Command\n"
|
||||
"\n"
|
||||
"Run this to say hello.\n"
|
||||
"$ARGUMENTS\n"
|
||||
)
|
||||
|
||||
(commands_dir / "world.md").write_text(
|
||||
"---\n"
|
||||
"description: \"Test world command\"\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"# World Command\n"
|
||||
"\n"
|
||||
"Run this to greet the world.\n"
|
||||
)
|
||||
|
||||
return ext_dir
|
||||
|
||||
|
||||
# ===== Fixtures =====
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir():
|
||||
"""Create a temporary directory for tests."""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
yield Path(tmpdir)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def project_dir(temp_dir):
|
||||
"""Create a mock spec-kit project directory."""
|
||||
proj_dir = temp_dir / "project"
|
||||
proj_dir.mkdir()
|
||||
|
||||
# Create .specify directory
|
||||
specify_dir = proj_dir / ".specify"
|
||||
specify_dir.mkdir()
|
||||
|
||||
return proj_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def extension_dir(temp_dir):
|
||||
"""Create a complete extension directory."""
|
||||
return _create_extension_dir(temp_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def skills_project(project_dir):
|
||||
"""Create a project with --ai-skills enabled and skills directory."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=True)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="claude")
|
||||
return project_dir, skills_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def no_skills_project(project_dir):
|
||||
"""Create a project without --ai-skills."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=False)
|
||||
return project_dir
|
||||
|
||||
|
||||
# ===== ExtensionManager._get_skills_dir Tests =====
|
||||
|
||||
class TestExtensionManagerGetSkillsDir:
|
||||
"""Test _get_skills_dir() on ExtensionManager."""
|
||||
|
||||
def test_returns_skills_dir_when_active(self, skills_project):
|
||||
"""Should return skills dir when ai_skills is true and dir exists."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result == skills_dir
|
||||
|
||||
def test_returns_none_when_no_ai_skills(self, no_skills_project):
|
||||
"""Should return None when ai_skills is false."""
|
||||
manager = ExtensionManager(no_skills_project)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
def test_returns_none_when_no_init_options(self, project_dir):
|
||||
"""Should return None when init-options.json is missing."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
def test_returns_none_when_skills_dir_missing(self, project_dir):
|
||||
"""Should return None when skills dir doesn't exist on disk."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=True)
|
||||
# Don't create the skills directory
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
def test_returns_kimi_skills_dir_when_ai_skills_disabled(self, project_dir):
|
||||
"""Kimi should still use its native skills dir when ai_skills is false."""
|
||||
_create_init_options(project_dir, ai="kimi", ai_skills=False)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="kimi")
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result == skills_dir
|
||||
|
||||
def test_returns_none_for_non_dict_init_options(self, project_dir):
|
||||
"""Corrupted-but-parseable init-options should not crash skill-dir lookup."""
|
||||
opts_file = project_dir / ".specify" / "init-options.json"
|
||||
opts_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
opts_file.write_text("[]")
|
||||
_create_skills_dir(project_dir, ai="claude")
|
||||
manager = ExtensionManager(project_dir)
|
||||
result = manager._get_skills_dir()
|
||||
assert result is None
|
||||
|
||||
|
||||
# ===== Extension Skill Registration Tests =====
|
||||
|
||||
class TestExtensionSkillRegistration:
|
||||
"""Test _register_extension_skills() on ExtensionManager."""
|
||||
|
||||
def test_skills_created_when_ai_skills_active(self, skills_project, extension_dir):
|
||||
"""Skills should be created when ai_skills is enabled."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Check that skill directories were created
|
||||
skill_dirs = sorted([d.name for d in skills_dir.iterdir() if d.is_dir()])
|
||||
assert "speckit-test-ext-hello" in skill_dirs
|
||||
assert "speckit-test-ext-world" in skill_dirs
|
||||
|
||||
def test_skill_md_content_correct(self, skills_project, extension_dir):
|
||||
"""SKILL.md should have correct agentskills.io structure."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
|
||||
# Check structure
|
||||
assert content.startswith("---\n")
|
||||
assert "name: speckit-test-ext-hello" in content
|
||||
assert "description:" in content
|
||||
assert "Test hello command" in content
|
||||
assert "source: extension:test-ext" in content
|
||||
assert "author: github-spec-kit" in content
|
||||
assert "compatibility:" in content
|
||||
assert "Run this to say hello." in content
|
||||
|
||||
def test_skill_md_has_parseable_yaml(self, skills_project, extension_dir):
|
||||
"""Generated SKILL.md should contain valid, parseable YAML frontmatter."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
content = skill_file.read_text()
|
||||
|
||||
assert content.startswith("---\n")
|
||||
parts = content.split("---", 2)
|
||||
assert len(parts) >= 3
|
||||
parsed = yaml.safe_load(parts[1])
|
||||
assert isinstance(parsed, dict)
|
||||
assert parsed["name"] == "speckit-test-ext-hello"
|
||||
assert "description" in parsed
|
||||
|
||||
def test_no_skills_when_ai_skills_disabled(self, no_skills_project, extension_dir):
|
||||
"""No skills should be created when ai_skills is false."""
|
||||
manager = ExtensionManager(no_skills_project)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify registry
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert metadata["registered_skills"] == []
|
||||
|
||||
def test_no_skills_when_init_options_missing(self, project_dir, extension_dir):
|
||||
"""No skills should be created when init-options.json is absent."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert metadata["registered_skills"] == []
|
||||
|
||||
def test_existing_skill_not_overwritten(self, skills_project, extension_dir):
|
||||
"""Pre-existing SKILL.md should not be overwritten."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
# Pre-create a custom skill
|
||||
custom_dir = skills_dir / "speckit-test-ext-hello"
|
||||
custom_dir.mkdir(parents=True)
|
||||
custom_content = "# My Custom Hello Skill\nUser-modified content\n"
|
||||
(custom_dir / "SKILL.md").write_text(custom_content)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Custom skill should be untouched
|
||||
assert (custom_dir / "SKILL.md").read_text() == custom_content
|
||||
|
||||
# But the other skill should still be created
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
# The pre-existing one should NOT be in registered_skills (it was skipped)
|
||||
assert "speckit-test-ext-hello" not in metadata["registered_skills"]
|
||||
|
||||
def test_registered_skills_in_registry(self, skills_project, extension_dir):
|
||||
"""Registry should contain registered_skills list."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "registered_skills" in metadata
|
||||
assert len(metadata["registered_skills"]) == 2
|
||||
assert "speckit-test-ext-hello" in metadata["registered_skills"]
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
|
||||
def test_kimi_uses_hyphenated_skill_names(self, project_dir, temp_dir):
|
||||
"""Kimi agent should use the same hyphenated skill names as hooks."""
|
||||
_create_init_options(project_dir, ai="kimi", ai_skills=True)
|
||||
_create_skills_dir(project_dir, ai="kimi")
|
||||
ext_dir = _create_extension_dir(temp_dir, ext_id="test-ext")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-test-ext-hello" in metadata["registered_skills"]
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
|
||||
def test_kimi_creates_skills_when_ai_skills_disabled(self, project_dir, temp_dir):
|
||||
"""Kimi should still auto-register extension skills in native-skills mode."""
|
||||
_create_init_options(project_dir, ai="kimi", ai_skills=False)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="kimi")
|
||||
ext_dir = _create_extension_dir(temp_dir, ext_id="test-ext")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-test-ext-hello" in metadata["registered_skills"]
|
||||
assert "speckit-test-ext-world" in metadata["registered_skills"]
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
|
||||
def test_skill_registration_resolves_script_placeholders(self, project_dir, temp_dir):
|
||||
"""Auto-registered extension skills should resolve script placeholders."""
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=True)
|
||||
skills_dir = _create_skills_dir(project_dir, ai="claude")
|
||||
|
||||
ext_dir = temp_dir / "scripted-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "scripted-ext",
|
||||
"name": "Scripted Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.scripted-ext.plan",
|
||||
"file": "commands/plan.md",
|
||||
"description": "Scripted plan command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"---\n"
|
||||
"description: Scripted plan command\n"
|
||||
"scripts:\n"
|
||||
" sh: ../../scripts/bash/setup-plan.sh --json \"{ARGS}\"\n"
|
||||
"agent_scripts:\n"
|
||||
" sh: ../../scripts/bash/update-agent-context.sh __AGENT__\n"
|
||||
"---\n\n"
|
||||
"Run {SCRIPT}\n"
|
||||
"Then {AGENT_SCRIPT}\n"
|
||||
"Review templates/checklist.md and memory/constitution.md for __AGENT__.\n"
|
||||
)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
content = (skills_dir / "speckit-scripted-ext-plan" / "SKILL.md").read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "{AGENT_SCRIPT}" not in content
|
||||
assert "{ARGS}" not in content
|
||||
assert "__AGENT__" not in content
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert ".specify/scripts/bash/update-agent-context.sh claude" in content
|
||||
assert ".specify/templates/checklist.md" in content
|
||||
assert ".specify/memory/constitution.md" in content
|
||||
|
||||
def test_missing_command_file_skipped(self, skills_project, temp_dir):
|
||||
"""Commands with missing source files should be skipped gracefully."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir = temp_dir / "missing-cmd-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "missing-cmd-ext",
|
||||
"name": "Missing Cmd Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.missing-cmd-ext.exists",
|
||||
"file": "commands/exists.md",
|
||||
"description": "Exists",
|
||||
},
|
||||
{
|
||||
"name": "speckit.missing-cmd-ext.ghost",
|
||||
"file": "commands/ghost.md",
|
||||
"description": "Does not exist",
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "exists.md").write_text(
|
||||
"---\ndescription: Exists\n---\n\n# Exists\n\nBody.\n"
|
||||
)
|
||||
# Intentionally do NOT create ghost.md
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert "speckit-missing-cmd-ext-exists" in metadata["registered_skills"]
|
||||
assert "speckit-missing-cmd-ext-ghost" not in metadata["registered_skills"]
|
||||
|
||||
|
||||
# ===== Extension Skill Unregistration Tests =====
|
||||
|
||||
class TestExtensionSkillUnregistration:
|
||||
"""Test _unregister_extension_skills() on ExtensionManager."""
|
||||
|
||||
def test_skills_removed_on_extension_remove(self, skills_project, extension_dir):
|
||||
"""Removing an extension should clean up its skill directories."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify skills exist
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
assert (skills_dir / "speckit-test-ext-world" / "SKILL.md").exists()
|
||||
|
||||
# Remove extension
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
|
||||
# Skills should be gone
|
||||
assert not (skills_dir / "speckit-test-ext-hello").exists()
|
||||
assert not (skills_dir / "speckit-test-ext-world").exists()
|
||||
|
||||
def test_other_skills_preserved_on_remove(self, skills_project, extension_dir):
|
||||
"""Non-extension skills should not be affected by extension removal."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
# Pre-create a custom skill
|
||||
custom_dir = skills_dir / "my-custom-skill"
|
||||
custom_dir.mkdir(parents=True)
|
||||
(custom_dir / "SKILL.md").write_text("# My Custom Skill\n")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
manager.remove(manifest.id, keep_config=False)
|
||||
|
||||
# Custom skill should still exist
|
||||
assert (custom_dir / "SKILL.md").exists()
|
||||
assert (custom_dir / "SKILL.md").read_text() == "# My Custom Skill\n"
|
||||
|
||||
def test_remove_handles_already_deleted_skills(self, skills_project, extension_dir):
|
||||
"""Gracefully handle case where skill dirs were already deleted."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Manually delete skill dirs before calling remove
|
||||
shutil.rmtree(skills_dir / "speckit-test-ext-hello")
|
||||
shutil.rmtree(skills_dir / "speckit-test-ext-world")
|
||||
|
||||
# Should not raise
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
|
||||
def test_remove_no_skills_when_not_active(self, no_skills_project, extension_dir):
|
||||
"""Removal without active skills should not attempt skill cleanup."""
|
||||
manager = ExtensionManager(no_skills_project)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Should not raise even though no skills exist
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
|
||||
|
||||
# ===== Command File Without Frontmatter =====
|
||||
|
||||
class TestExtensionSkillEdgeCases:
|
||||
"""Test edge cases in extension skill registration."""
|
||||
|
||||
def test_install_with_non_dict_init_options_does_not_crash(self, project_dir, extension_dir):
|
||||
"""Corrupted init-options payloads should disable skill registration, not crash install."""
|
||||
opts_file = project_dir / ".specify" / "init-options.json"
|
||||
opts_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
opts_file.write_text("[]")
|
||||
_create_skills_dir(project_dir, ai="claude")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
metadata = manager.registry.get(manifest.id)
|
||||
assert metadata["registered_skills"] == []
|
||||
|
||||
def test_command_without_frontmatter(self, skills_project, temp_dir):
|
||||
"""Commands without YAML frontmatter should still produce valid skills."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir = temp_dir / "nofm-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "nofm-ext",
|
||||
"name": "No Frontmatter Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.nofm-ext.plain",
|
||||
"file": "commands/plain.md",
|
||||
"description": "Plain command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
(ext_dir / "commands" / "plain.md").write_text(
|
||||
"# Plain Command\n\nBody without frontmatter.\n"
|
||||
)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-nofm-ext-plain" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "name: speckit-nofm-ext-plain" in content
|
||||
# Fallback description when no frontmatter description
|
||||
assert "Extension command: speckit.nofm-ext.plain" in content
|
||||
assert "Body without frontmatter." in content
|
||||
|
||||
def test_gemini_agent_skills(self, project_dir, temp_dir):
|
||||
"""Gemini agent should use .gemini/skills/ for skill directory."""
|
||||
_create_init_options(project_dir, ai="gemini", ai_skills=True)
|
||||
_create_skills_dir(project_dir, ai="gemini")
|
||||
ext_dir = _create_extension_dir(temp_dir, ext_id="test-ext")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skills_dir = project_dir / ".gemini" / "skills"
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
assert (skills_dir / "speckit-test-ext-world" / "SKILL.md").exists()
|
||||
|
||||
def test_multiple_extensions_independent_skills(self, skills_project, temp_dir):
|
||||
"""Installing and removing different extensions should be independent."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir_a = _create_extension_dir(temp_dir, ext_id="ext-a")
|
||||
ext_dir_b = _create_extension_dir(temp_dir, ext_id="ext-b")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest_a = manager.install_from_directory(
|
||||
ext_dir_a, "0.1.0", register_commands=False
|
||||
)
|
||||
manifest_b = manager.install_from_directory(
|
||||
ext_dir_b, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Both should have skills
|
||||
assert (skills_dir / "speckit-ext-a-hello" / "SKILL.md").exists()
|
||||
assert (skills_dir / "speckit-ext-b-hello" / "SKILL.md").exists()
|
||||
|
||||
# Remove ext-a
|
||||
manager.remove("ext-a", keep_config=False)
|
||||
|
||||
# ext-a skills gone, ext-b skills preserved
|
||||
assert not (skills_dir / "speckit-ext-a-hello").exists()
|
||||
assert (skills_dir / "speckit-ext-b-hello" / "SKILL.md").exists()
|
||||
|
||||
def test_malformed_frontmatter_handled(self, skills_project, temp_dir):
|
||||
"""Commands with invalid YAML frontmatter should still produce valid skills."""
|
||||
project_dir, skills_dir = skills_project
|
||||
|
||||
ext_dir = temp_dir / "badfm-ext"
|
||||
ext_dir.mkdir()
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "badfm-ext",
|
||||
"name": "Bad Frontmatter Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.badfm-ext.broken",
|
||||
"file": "commands/broken.md",
|
||||
"description": "Broken frontmatter",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands").mkdir()
|
||||
# Malformed YAML: invalid key-value syntax
|
||||
(ext_dir / "commands" / "broken.md").write_text(
|
||||
"---\n"
|
||||
"description: [invalid yaml\n"
|
||||
" unclosed: bracket\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"# Broken Command\n"
|
||||
"\n"
|
||||
"This body should still be used.\n"
|
||||
)
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
# Should not raise
|
||||
manifest = manager.install_from_directory(
|
||||
ext_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
skill_file = skills_dir / "speckit-badfm-ext-broken" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
# Fallback description since frontmatter was invalid
|
||||
assert "Extension command: speckit.badfm-ext.broken" in content
|
||||
assert "This body should still be used." in content
|
||||
|
||||
def test_remove_cleans_up_when_init_options_deleted(self, skills_project, extension_dir):
|
||||
"""Skills should be cleaned up even if init-options.json is deleted after install."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify skills exist
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
|
||||
# Delete init-options.json to simulate user change
|
||||
init_opts = project_dir / ".specify" / "init-options.json"
|
||||
init_opts.unlink()
|
||||
|
||||
# Remove should still clean up via fallback scan
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
assert not (skills_dir / "speckit-test-ext-hello").exists()
|
||||
assert not (skills_dir / "speckit-test-ext-world").exists()
|
||||
|
||||
def test_remove_cleans_up_when_ai_skills_toggled(self, skills_project, extension_dir):
|
||||
"""Skills should be cleaned up even if ai_skills is toggled to false after install."""
|
||||
project_dir, skills_dir = skills_project
|
||||
manager = ExtensionManager(project_dir)
|
||||
manifest = manager.install_from_directory(
|
||||
extension_dir, "0.1.0", register_commands=False
|
||||
)
|
||||
|
||||
# Verify skills exist
|
||||
assert (skills_dir / "speckit-test-ext-hello" / "SKILL.md").exists()
|
||||
|
||||
# Toggle ai_skills to false
|
||||
_create_init_options(project_dir, ai="claude", ai_skills=False)
|
||||
|
||||
# Remove should still clean up via fallback scan
|
||||
result = manager.remove(manifest.id, keep_config=False)
|
||||
assert result is True
|
||||
assert not (skills_dir / "speckit-test-ext-hello").exists()
|
||||
assert not (skills_dir / "speckit-test-ext-world").exists()
|
||||
@@ -18,10 +18,12 @@ from datetime import datetime, timezone
|
||||
|
||||
from specify_cli.extensions import (
|
||||
CatalogEntry,
|
||||
CORE_COMMAND_NAMES,
|
||||
ExtensionManifest,
|
||||
ExtensionRegistry,
|
||||
ExtensionManager,
|
||||
CommandRegistrar,
|
||||
HookExecutor,
|
||||
ExtensionCatalog,
|
||||
ExtensionError,
|
||||
ValidationError,
|
||||
@@ -62,7 +64,7 @@ def valid_manifest_data():
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.hello",
|
||||
"name": "speckit.test-ext.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test command",
|
||||
}
|
||||
@@ -70,7 +72,7 @@ def valid_manifest_data():
|
||||
},
|
||||
"hooks": {
|
||||
"after_tasks": {
|
||||
"command": "speckit.test.hello",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"optional": True,
|
||||
"prompt": "Run test?",
|
||||
}
|
||||
@@ -188,7 +190,18 @@ class TestExtensionManifest:
|
||||
assert manifest.version == "1.0.0"
|
||||
assert manifest.description == "A test extension"
|
||||
assert len(manifest.commands) == 1
|
||||
assert manifest.commands[0]["name"] == "speckit.test.hello"
|
||||
assert manifest.commands[0]["name"] == "speckit.test-ext.hello"
|
||||
|
||||
def test_core_command_names_match_bundled_templates(self):
|
||||
"""Core command reservations should stay aligned with bundled templates."""
|
||||
commands_dir = Path(__file__).resolve().parent.parent / "templates" / "commands"
|
||||
expected = {
|
||||
command_file.stem
|
||||
for command_file in commands_dir.iterdir()
|
||||
if command_file.is_file() and command_file.suffix == ".md"
|
||||
}
|
||||
|
||||
assert CORE_COMMAND_NAMES == expected
|
||||
|
||||
def test_missing_required_field(self, temp_dir):
|
||||
"""Test manifest missing required field."""
|
||||
@@ -588,6 +601,172 @@ class TestExtensionManager:
|
||||
with pytest.raises(ExtensionError, match="already installed"):
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_extension_id_in_core_namespace(self, temp_dir, project_dir):
|
||||
"""Install should reject extension IDs that shadow core commands."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "analyze-ext"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "analyze",
|
||||
"name": "Analyze Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.analyze.extra",
|
||||
"file": "commands/cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="conflicts with core command namespace"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_alias_without_extension_namespace(self, temp_dir, project_dir):
|
||||
"""Install should reject legacy short aliases that can shadow core commands."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "alias-shortcut"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "alias-shortcut",
|
||||
"name": "Alias Shortcut",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias-shortcut.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="Invalid alias 'speckit.shortcut'"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_namespace_squatting(self, temp_dir, project_dir):
|
||||
"""Install should reject commands and aliases outside the extension namespace."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "squat-ext"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "squat-ext",
|
||||
"name": "Squat Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.other-ext.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.squat-ext.ok"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="must use extension namespace 'squat-ext'"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_command_collision_with_installed_extension(self, temp_dir, project_dir):
|
||||
"""Install should reject names already claimed by an installed legacy extension."""
|
||||
import yaml
|
||||
|
||||
first_dir = temp_dir / "ext-one"
|
||||
first_dir.mkdir()
|
||||
(first_dir / "commands").mkdir()
|
||||
first_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-one",
|
||||
"name": "Extension One",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-one.sync",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shared.sync"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
(first_dir / "extension.yml").write_text(yaml.dump(first_manifest))
|
||||
(first_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
installed_ext_dir = project_dir / ".specify" / "extensions" / "ext-one"
|
||||
installed_ext_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copytree(first_dir, installed_ext_dir)
|
||||
|
||||
second_dir = temp_dir / "ext-two"
|
||||
second_dir.mkdir()
|
||||
(second_dir / "commands").mkdir()
|
||||
second_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "shared",
|
||||
"name": "Shared Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.shared.sync",
|
||||
"file": "commands/cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
(second_dir / "extension.yml").write_text(yaml.dump(second_manifest))
|
||||
(second_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.registry.add("ext-one", {"version": "1.0.0", "source": "local"})
|
||||
|
||||
with pytest.raises(ValidationError, match="already provided by extension 'ext-one'"):
|
||||
manager.install_from_directory(second_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_remove_extension(self, extension_dir, project_dir):
|
||||
"""Test removing an installed extension."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
@@ -759,6 +938,81 @@ $ARGUMENTS
|
||||
assert "Prüfe Konformität" in output
|
||||
assert "\\u" not in output
|
||||
|
||||
def test_adjust_script_paths_does_not_mutate_input(self):
|
||||
"""Path adjustments should not mutate caller-owned frontmatter dicts."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
original = {
|
||||
"scripts": {
|
||||
"sh": "../../scripts/bash/setup-plan.sh {ARGS}",
|
||||
"ps": "../../scripts/powershell/setup-plan.ps1 {ARGS}",
|
||||
}
|
||||
}
|
||||
before = json.loads(json.dumps(original))
|
||||
|
||||
adjusted = registrar._adjust_script_paths(original)
|
||||
|
||||
assert original == before
|
||||
assert adjusted["scripts"]["sh"] == ".specify/scripts/bash/setup-plan.sh {ARGS}"
|
||||
assert adjusted["scripts"]["ps"] == ".specify/scripts/powershell/setup-plan.ps1 {ARGS}"
|
||||
|
||||
def test_adjust_script_paths_preserves_extension_local_paths(self):
|
||||
"""Extension-local script paths should not be rewritten into .specify/.specify."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
original = {
|
||||
"scripts": {
|
||||
"sh": ".specify/extensions/test-ext/scripts/setup.sh {ARGS}",
|
||||
"ps": "scripts/powershell/setup-plan.ps1 {ARGS}",
|
||||
}
|
||||
}
|
||||
|
||||
adjusted = registrar._adjust_script_paths(original)
|
||||
|
||||
assert adjusted["scripts"]["sh"] == ".specify/extensions/test-ext/scripts/setup.sh {ARGS}"
|
||||
assert adjusted["scripts"]["ps"] == ".specify/scripts/powershell/setup-plan.ps1 {ARGS}"
|
||||
|
||||
def test_rewrite_project_relative_paths_preserves_extension_local_body_paths(self):
|
||||
"""Body rewrites should preserve extension-local assets while fixing top-level refs."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
|
||||
body = (
|
||||
"Read `.specify/extensions/test-ext/templates/spec.md`\n"
|
||||
"Run scripts/bash/setup-plan.sh\n"
|
||||
)
|
||||
|
||||
rewritten = AgentCommandRegistrar._rewrite_project_relative_paths(body)
|
||||
|
||||
assert ".specify/extensions/test-ext/templates/spec.md" in rewritten
|
||||
assert ".specify/scripts/bash/setup-plan.sh" in rewritten
|
||||
|
||||
def test_render_toml_command_handles_embedded_triple_double_quotes(self):
|
||||
"""TOML renderer should stay valid when body includes triple double-quotes."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
output = registrar.render_toml_command(
|
||||
{"description": "x"},
|
||||
'line1\n"""danger"""\nline2',
|
||||
"extension:test-ext",
|
||||
)
|
||||
|
||||
assert "prompt = '''" in output
|
||||
assert '"""danger"""' in output
|
||||
|
||||
def test_render_toml_command_escapes_when_both_triple_quote_styles_exist(self):
|
||||
"""If body has both triple quote styles, fall back to escaped basic string."""
|
||||
from specify_cli.agents import CommandRegistrar as AgentCommandRegistrar
|
||||
registrar = AgentCommandRegistrar()
|
||||
output = registrar.render_toml_command(
|
||||
{"description": "x"},
|
||||
'a """ b\nc \'\'\' d',
|
||||
"extension:test-ext",
|
||||
)
|
||||
|
||||
assert 'prompt = "' in output
|
||||
assert "\\n" in output
|
||||
assert "\\\"\\\"\\\"" in output
|
||||
|
||||
def test_register_commands_for_claude(self, extension_dir, project_dir):
|
||||
"""Test registering commands for Claude agent."""
|
||||
# Create .claude directory
|
||||
@@ -776,10 +1030,10 @@ $ARGUMENTS
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test.hello" in registered
|
||||
assert "speckit.test-ext.hello" in registered
|
||||
|
||||
# Check command file was created
|
||||
cmd_file = claude_dir / "speckit.test.hello.md"
|
||||
cmd_file = claude_dir / "speckit.test-ext.hello.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
@@ -809,9 +1063,9 @@ $ARGUMENTS
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias.cmd",
|
||||
"name": "speckit.ext-alias.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
"aliases": ["speckit.ext-alias.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -831,10 +1085,10 @@ $ARGUMENTS
|
||||
registered = registrar.register_commands_for_claude(manifest, ext_dir, project_dir)
|
||||
|
||||
assert len(registered) == 2
|
||||
assert "speckit.alias.cmd" in registered
|
||||
assert "speckit.shortcut" in registered
|
||||
assert (claude_dir / "speckit.alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.shortcut.md").exists()
|
||||
assert "speckit.ext-alias.cmd" in registered
|
||||
assert "speckit.ext-alias.shortcut" in registered
|
||||
assert (claude_dir / "speckit.ext-alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.ext-alias.shortcut.md").exists()
|
||||
|
||||
def test_unregister_commands_for_codex_skills_uses_mapped_names(self, project_dir):
|
||||
"""Codex skill cleanup should use the same mapped names as registration."""
|
||||
@@ -875,11 +1129,11 @@ $ARGUMENTS
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, extension_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test.hello" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert "name: speckit-test.hello" in content
|
||||
assert "name: speckit-test-ext-hello" in content
|
||||
assert "description: Test hello command" in content
|
||||
assert "compatibility:" in content
|
||||
assert "metadata:" in content
|
||||
@@ -906,7 +1160,7 @@ $ARGUMENTS
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.plan",
|
||||
"name": "speckit.ext-scripted.plan",
|
||||
"file": "commands/plan.md",
|
||||
"description": "Scripted command",
|
||||
}
|
||||
@@ -944,7 +1198,7 @@ Agent __AGENT__
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test.plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-ext-scripted-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -975,9 +1229,9 @@ Agent __AGENT__
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias.cmd",
|
||||
"name": "speckit.ext-alias-skill.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
"aliases": ["speckit.ext-alias-skill.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -994,13 +1248,13 @@ Agent __AGENT__
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
primary = skills_dir / "speckit-alias.cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-shortcut" / "SKILL.md"
|
||||
primary = skills_dir / "speckit-ext-alias-skill-cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-ext-alias-skill-shortcut" / "SKILL.md"
|
||||
|
||||
assert primary.exists()
|
||||
assert alias.exists()
|
||||
assert "name: speckit-alias.cmd" in primary.read_text()
|
||||
assert "name: speckit-shortcut" in alias.read_text()
|
||||
assert "name: speckit-ext-alias-skill-cmd" in primary.read_text()
|
||||
assert "name: speckit-ext-alias-skill-shortcut" in alias.read_text()
|
||||
|
||||
def test_codex_skill_registration_uses_fallback_script_variant_without_init_options(
|
||||
self, project_dir, temp_dir
|
||||
@@ -1024,7 +1278,7 @@ Agent __AGENT__
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.fallback.plan",
|
||||
"name": "speckit.ext-script-fallback.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1056,7 +1310,7 @@ Then {AGENT_SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-fallback.plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-ext-script-fallback-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1065,6 +1319,62 @@ Then {AGENT_SCRIPT}
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert ".specify/scripts/bash/update-agent-context.sh codex" in content
|
||||
|
||||
def test_codex_skill_registration_handles_non_dict_init_options(
|
||||
self, project_dir, temp_dir
|
||||
):
|
||||
"""Non-dict init-options payloads should not crash skill placeholder resolution."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "ext-script-list-init"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-script-list-init",
|
||||
"name": "List init options",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-script-list-init.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(ext_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
(ext_dir / "commands" / "plan.md").write_text(
|
||||
"""---
|
||||
description: "List init scripted command"
|
||||
scripts:
|
||||
sh: ../../scripts/bash/setup-plan.sh --json "{ARGS}"
|
||||
---
|
||||
|
||||
Run {SCRIPT}
|
||||
"""
|
||||
)
|
||||
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text("[]")
|
||||
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
manifest = ExtensionManifest(ext_dir / "extension.yml")
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
content = (skills_dir / "speckit-ext-script-list-init-plan" / "SKILL.md").read_text()
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
|
||||
def test_codex_skill_registration_fallback_prefers_powershell_on_windows(
|
||||
self, project_dir, temp_dir, monkeypatch
|
||||
):
|
||||
@@ -1089,7 +1399,7 @@ Then {AGENT_SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.windows.plan",
|
||||
"name": "speckit.ext-script-windows-fallback.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1121,7 +1431,7 @@ Then {AGENT_SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-windows.plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-ext-script-windows-fallback-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1143,14 +1453,14 @@ Then {AGENT_SCRIPT}
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test.hello" in registered
|
||||
assert "speckit.test-ext.hello" in registered
|
||||
|
||||
# Verify command file uses .agent.md extension
|
||||
cmd_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
cmd_file = agents_dir / "speckit.test-ext.hello.agent.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify NO plain .md file was created
|
||||
plain_md_file = agents_dir / "speckit.test.hello.md"
|
||||
plain_md_file = agents_dir / "speckit.test-ext.hello.md"
|
||||
assert not plain_md_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
@@ -1170,12 +1480,12 @@ Then {AGENT_SCRIPT}
|
||||
)
|
||||
|
||||
# Verify companion .prompt.md file exists
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test-ext.hello.prompt.md"
|
||||
assert prompt_file.exists()
|
||||
|
||||
# Verify content has correct agent frontmatter
|
||||
content = prompt_file.read_text()
|
||||
assert content == "---\nagent: speckit.test.hello\n---\n"
|
||||
assert content == "---\nagent: speckit.test-ext.hello\n---\n"
|
||||
|
||||
def test_copilot_aliases_get_companion_prompts(self, project_dir, temp_dir):
|
||||
"""Test that aliases also get companion .prompt.md files for Copilot."""
|
||||
@@ -1196,9 +1506,9 @@ Then {AGENT_SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias-copilot.cmd",
|
||||
"name": "speckit.ext-alias-copilot.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut-copilot"],
|
||||
"aliases": ["speckit.ext-alias-copilot.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1225,8 +1535,8 @@ Then {AGENT_SCRIPT}
|
||||
|
||||
# Both primary and alias get companion .prompt.md
|
||||
prompts_dir = project_dir / ".github" / "prompts"
|
||||
assert (prompts_dir / "speckit.alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.shortcut-copilot.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.ext-alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.ext-alias-copilot.shortcut.prompt.md").exists()
|
||||
|
||||
def test_non_copilot_agent_no_companion_file(self, extension_dir, project_dir):
|
||||
"""Test that non-copilot agents do NOT create .prompt.md files."""
|
||||
@@ -1299,7 +1609,7 @@ class TestIntegration:
|
||||
assert installed[0]["id"] == "test-ext"
|
||||
|
||||
# Verify command registered
|
||||
cmd_file = project_dir / ".claude" / "commands" / "speckit.test.hello.md"
|
||||
cmd_file = project_dir / ".claude" / "commands" / "speckit.test-ext.hello.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify registry has registered commands (now a dict keyed by agent)
|
||||
@@ -1307,7 +1617,7 @@ class TestIntegration:
|
||||
registered_commands = metadata["registered_commands"]
|
||||
# Check that the command is registered for at least one agent
|
||||
assert any(
|
||||
"speckit.test.hello" in cmds
|
||||
"speckit.test-ext.hello" in cmds
|
||||
for cmds in registered_commands.values()
|
||||
)
|
||||
|
||||
@@ -1333,8 +1643,8 @@ class TestIntegration:
|
||||
assert "copilot" in metadata["registered_commands"]
|
||||
|
||||
# Verify files exist before cleanup
|
||||
agent_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
agent_file = agents_dir / "speckit.test-ext.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test-ext.hello.prompt.md"
|
||||
assert agent_file.exists()
|
||||
assert prompt_file.exists()
|
||||
|
||||
@@ -2644,7 +2954,7 @@ class TestExtensionUpdateCLI:
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test.hello",
|
||||
"name": "speckit.test-ext.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test command",
|
||||
}
|
||||
@@ -2652,7 +2962,7 @@ class TestExtensionUpdateCLI:
|
||||
},
|
||||
"hooks": {
|
||||
"after_tasks": {
|
||||
"command": "speckit.test.hello",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"optional": True,
|
||||
}
|
||||
},
|
||||
@@ -2681,7 +2991,7 @@ class TestExtensionUpdateCLI:
|
||||
"description": "A test extension",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {"commands": [{"name": "speckit.test.hello", "file": "commands/hello.md"}]},
|
||||
"provides": {"commands": [{"name": "speckit.test-ext.hello", "file": "commands/hello.md"}]},
|
||||
}
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||
@@ -3231,3 +3541,128 @@ class TestExtensionPriorityBackwardsCompatibility:
|
||||
assert result[0][0] == "ext-with-priority"
|
||||
assert result[1][0] == "legacy-ext"
|
||||
assert result[2][0] == "ext-low-priority"
|
||||
|
||||
|
||||
class TestHookInvocationRendering:
|
||||
"""Test hook invocation formatting for different agent modes."""
|
||||
|
||||
def test_kimi_hooks_render_skill_invocation(self, project_dir):
|
||||
"""Kimi projects should render /skill:speckit-* invocations."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"before_plan",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.plan",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/skill:speckit-plan`" in message
|
||||
assert "EXECUTE_COMMAND: speckit.plan" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /skill:speckit-plan" in message
|
||||
|
||||
def test_codex_hooks_render_dollar_skill_invocation(self, project_dir):
|
||||
"""Codex projects with --ai-skills should render $speckit-* invocations."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "codex", "ai_skills": True}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
execution = hook_executor.execute_hook(
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.tasks",
|
||||
"optional": False,
|
||||
}
|
||||
)
|
||||
|
||||
assert execution["command"] == "speckit.tasks"
|
||||
assert execution["invocation"] == "$speckit-tasks"
|
||||
|
||||
def test_non_skill_command_keeps_slash_invocation(self, project_dir):
|
||||
"""Custom hook commands should keep slash invocation style."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"before_tasks",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "pre_tasks_test",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/pre_tasks_test`" in message
|
||||
assert "EXECUTE_COMMAND: pre_tasks_test" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /pre_tasks_test" in message
|
||||
|
||||
def test_extension_command_uses_hyphenated_skill_invocation(self, project_dir):
|
||||
"""Multi-segment extension command ids should map to hyphenated skills."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"after_tasks",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/skill:speckit-test-ext-hello`" in message
|
||||
assert "EXECUTE_COMMAND: speckit.test-ext.hello" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /skill:speckit-test-ext-hello" in message
|
||||
|
||||
def test_hook_executor_caches_init_options_lookup(self, project_dir, monkeypatch):
|
||||
"""Init options should be loaded once per executor instance."""
|
||||
calls = {"count": 0}
|
||||
|
||||
def fake_load_init_options(_project_root):
|
||||
calls["count"] += 1
|
||||
return {"ai": "kimi", "ai_skills": False}
|
||||
|
||||
monkeypatch.setattr("specify_cli.load_init_options", fake_load_init_options)
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
assert hook_executor._render_hook_invocation("speckit.plan") == "/skill:speckit-plan"
|
||||
assert hook_executor._render_hook_invocation("speckit.tasks") == "/skill:speckit-tasks"
|
||||
assert calls["count"] == 1
|
||||
|
||||
def test_hook_message_falls_back_when_invocation_is_empty(self, project_dir):
|
||||
"""Hook messages should still render actionable command placeholders."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text(json.dumps({"ai": "kimi", "ai_skills": False}))
|
||||
|
||||
hook_executor = HookExecutor(project_dir)
|
||||
message = hook_executor.format_hook_message(
|
||||
"after_tasks",
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": None,
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/<missing command>`" in message
|
||||
assert "EXECUTE_COMMAND: <missing command>" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /<missing command>" in message
|
||||
|
||||
460
tests/test_integrations.py
Normal file
460
tests/test_integrations.py
Normal file
@@ -0,0 +1,460 @@
|
||||
"""Tests for the integrations foundation (Stage 1).
|
||||
|
||||
Covers:
|
||||
- IntegrationOption dataclass
|
||||
- IntegrationBase ABC and MarkdownIntegration base class
|
||||
- IntegrationManifest — record, hash, save, load, uninstall, modified detection
|
||||
- INTEGRATION_REGISTRY basics
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations import (
|
||||
INTEGRATION_REGISTRY,
|
||||
_register,
|
||||
get_integration,
|
||||
)
|
||||
from specify_cli.integrations.base import (
|
||||
IntegrationBase,
|
||||
IntegrationOption,
|
||||
MarkdownIntegration,
|
||||
)
|
||||
from specify_cli.integrations.manifest import IntegrationManifest, _sha256
|
||||
|
||||
|
||||
# ── helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class _StubIntegration(MarkdownIntegration):
|
||||
"""Minimal concrete integration for testing."""
|
||||
|
||||
key = "stub"
|
||||
config = {
|
||||
"name": "Stub Agent",
|
||||
"folder": ".stub/",
|
||||
"commands_subdir": "commands",
|
||||
"install_url": None,
|
||||
"requires_cli": False,
|
||||
}
|
||||
registrar_config = {
|
||||
"dir": ".stub/commands",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md",
|
||||
}
|
||||
context_file = "STUB.md"
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# IntegrationOption
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestIntegrationOption:
|
||||
def test_defaults(self):
|
||||
opt = IntegrationOption(name="--flag")
|
||||
assert opt.name == "--flag"
|
||||
assert opt.is_flag is False
|
||||
assert opt.required is False
|
||||
assert opt.default is None
|
||||
assert opt.help == ""
|
||||
|
||||
def test_flag_option(self):
|
||||
opt = IntegrationOption(name="--skills", is_flag=True, default=True, help="Enable skills")
|
||||
assert opt.is_flag is True
|
||||
assert opt.default is True
|
||||
assert opt.help == "Enable skills"
|
||||
|
||||
def test_required_option(self):
|
||||
opt = IntegrationOption(name="--commands-dir", required=True, help="Dir path")
|
||||
assert opt.required is True
|
||||
|
||||
def test_frozen(self):
|
||||
opt = IntegrationOption(name="--x")
|
||||
with pytest.raises(AttributeError):
|
||||
opt.name = "--y" # type: ignore[misc]
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# IntegrationBase / MarkdownIntegration
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestIntegrationBase:
|
||||
def test_key_and_config(self):
|
||||
i = _StubIntegration()
|
||||
assert i.key == "stub"
|
||||
assert i.config["name"] == "Stub Agent"
|
||||
assert i.registrar_config["format"] == "markdown"
|
||||
assert i.context_file == "STUB.md"
|
||||
|
||||
def test_options_default_empty(self):
|
||||
assert _StubIntegration.options() == []
|
||||
|
||||
def test_templates_dir(self):
|
||||
i = _StubIntegration()
|
||||
td = i.templates_dir()
|
||||
# Should point to a templates/ dir next to this test module.
|
||||
# It won't exist, but the path should be well-formed.
|
||||
assert td.name == "templates"
|
||||
|
||||
def test_setup_no_templates_returns_empty(self, tmp_path):
|
||||
"""setup() gracefully returns empty list when templates dir is missing."""
|
||||
i = _StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
created = i.setup(tmp_path, manifest)
|
||||
assert created == []
|
||||
|
||||
def test_setup_copies_templates(self, tmp_path, monkeypatch):
|
||||
"""setup() copies template files and records them in the manifest."""
|
||||
# Create templates under tmp_path so we don't mutate the source tree
|
||||
tpl = tmp_path / "_templates"
|
||||
tpl.mkdir()
|
||||
(tpl / "speckit.plan.md").write_text("plan content", encoding="utf-8")
|
||||
(tpl / "speckit.specify.md").write_text("spec content", encoding="utf-8")
|
||||
|
||||
i = _StubIntegration()
|
||||
monkeypatch.setattr(type(i), "templates_dir", lambda self: tpl)
|
||||
|
||||
project = tmp_path / "project"
|
||||
project.mkdir()
|
||||
created = i.setup(project, IntegrationManifest("stub", project))
|
||||
assert len(created) == 2
|
||||
assert (project / ".stub" / "commands" / "speckit.plan.md").exists()
|
||||
assert (project / ".stub" / "commands" / "speckit.specify.md").exists()
|
||||
|
||||
def test_install_delegates_to_setup(self, tmp_path):
|
||||
i = _StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
result = i.install(tmp_path, manifest)
|
||||
assert result == [] # no templates dir → empty
|
||||
|
||||
def test_uninstall_delegates_to_teardown(self, tmp_path):
|
||||
i = _StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
removed, skipped = i.uninstall(tmp_path, manifest)
|
||||
assert removed == []
|
||||
assert skipped == []
|
||||
|
||||
|
||||
class TestMarkdownIntegration:
|
||||
def test_is_subclass_of_base(self):
|
||||
assert issubclass(MarkdownIntegration, IntegrationBase)
|
||||
|
||||
def test_stub_is_markdown(self):
|
||||
assert isinstance(_StubIntegration(), MarkdownIntegration)
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# IntegrationManifest
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestManifestRecordFile:
|
||||
def test_record_file_writes_and_hashes(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
content = "hello world"
|
||||
abs_path = m.record_file("a/b.txt", content)
|
||||
|
||||
assert abs_path == tmp_path / "a" / "b.txt"
|
||||
assert abs_path.read_text(encoding="utf-8") == content
|
||||
expected_hash = hashlib.sha256(content.encode()).hexdigest()
|
||||
assert m.files["a/b.txt"] == expected_hash
|
||||
|
||||
def test_record_file_bytes(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
data = b"\x00\x01\x02"
|
||||
abs_path = m.record_file("bin.dat", data)
|
||||
assert abs_path.read_bytes() == data
|
||||
assert m.files["bin.dat"] == hashlib.sha256(data).hexdigest()
|
||||
|
||||
def test_record_existing(self, tmp_path):
|
||||
f = tmp_path / "existing.txt"
|
||||
f.write_text("content", encoding="utf-8")
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_existing("existing.txt")
|
||||
assert m.files["existing.txt"] == _sha256(f)
|
||||
|
||||
|
||||
class TestManifestPathTraversal:
|
||||
def test_record_file_rejects_parent_traversal(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="outside"):
|
||||
m.record_file("../escape.txt", "bad")
|
||||
|
||||
def test_record_file_rejects_absolute_path(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="Absolute paths"):
|
||||
m.record_file("/tmp/escape.txt", "bad")
|
||||
|
||||
def test_record_existing_rejects_parent_traversal(self, tmp_path):
|
||||
# Create a file outside the project root
|
||||
escape = tmp_path.parent / "escape.txt"
|
||||
escape.write_text("evil", encoding="utf-8")
|
||||
try:
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="outside"):
|
||||
m.record_existing("../escape.txt")
|
||||
finally:
|
||||
escape.unlink(missing_ok=True)
|
||||
|
||||
def test_uninstall_skips_traversal_paths(self, tmp_path):
|
||||
"""If a manifest is corrupted with traversal paths, uninstall ignores them."""
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("safe.txt", "good")
|
||||
# Manually inject a traversal path into the manifest
|
||||
m._files["../outside.txt"] = "fakehash"
|
||||
m.save()
|
||||
|
||||
removed, skipped = m.uninstall()
|
||||
# Only the safe file should have been removed
|
||||
assert len(removed) == 1
|
||||
assert removed[0].name == "safe.txt"
|
||||
|
||||
|
||||
class TestManifestCheckModified:
|
||||
def test_unmodified_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
assert m.check_modified() == []
|
||||
|
||||
def test_modified_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
(tmp_path / "f.txt").write_text("changed", encoding="utf-8")
|
||||
assert m.check_modified() == ["f.txt"]
|
||||
|
||||
def test_deleted_file_not_reported(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
assert m.check_modified() == []
|
||||
|
||||
def test_symlink_treated_as_modified(self, tmp_path):
|
||||
"""A tracked file replaced with a symlink is reported as modified."""
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
assert m.check_modified() == ["f.txt"]
|
||||
|
||||
|
||||
class TestManifestUninstall:
|
||||
def test_removes_unmodified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("d/f.txt", "content")
|
||||
m.save()
|
||||
|
||||
removed, skipped = m.uninstall()
|
||||
assert len(removed) == 1
|
||||
assert not (tmp_path / "d" / "f.txt").exists()
|
||||
# Parent dir cleaned up because empty
|
||||
assert not (tmp_path / "d").exists()
|
||||
assert skipped == []
|
||||
|
||||
def test_skips_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").write_text("modified", encoding="utf-8")
|
||||
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert len(skipped) == 1
|
||||
assert (tmp_path / "f.txt").exists()
|
||||
|
||||
def test_force_removes_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").write_text("modified", encoding="utf-8")
|
||||
|
||||
removed, skipped = m.uninstall(force=True)
|
||||
assert len(removed) == 1
|
||||
assert skipped == []
|
||||
assert not (tmp_path / "f.txt").exists()
|
||||
|
||||
def test_already_deleted_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").unlink()
|
||||
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert skipped == []
|
||||
|
||||
def test_removes_manifest_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path, version="1.0")
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
assert m.manifest_path.exists()
|
||||
|
||||
m.uninstall()
|
||||
assert not m.manifest_path.exists()
|
||||
|
||||
def test_cleans_empty_parent_dirs(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("a/b/c/f.txt", "content")
|
||||
m.save()
|
||||
|
||||
m.uninstall()
|
||||
assert not (tmp_path / "a" / "b" / "c").exists()
|
||||
assert not (tmp_path / "a" / "b").exists()
|
||||
assert not (tmp_path / "a").exists()
|
||||
|
||||
def test_preserves_nonempty_parent_dirs(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("a/b/tracked.txt", "content")
|
||||
# Create an untracked sibling
|
||||
(tmp_path / "a" / "b" / "other.txt").write_text("keep", encoding="utf-8")
|
||||
m.save()
|
||||
|
||||
m.uninstall()
|
||||
assert not (tmp_path / "a" / "b" / "tracked.txt").exists()
|
||||
assert (tmp_path / "a" / "b" / "other.txt").exists()
|
||||
assert (tmp_path / "a" / "b").is_dir()
|
||||
|
||||
def test_symlink_skipped_without_force(self, tmp_path):
|
||||
"""A tracked file replaced with a symlink is skipped unless force."""
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert len(skipped) == 1
|
||||
assert (tmp_path / "f.txt").is_symlink() # still there
|
||||
|
||||
def test_symlink_removed_with_force(self, tmp_path):
|
||||
"""A tracked file replaced with a symlink is removed with force."""
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
|
||||
removed, skipped = m.uninstall(force=True)
|
||||
assert len(removed) == 1
|
||||
assert not (tmp_path / "f.txt").exists()
|
||||
assert target.exists() # target not deleted
|
||||
|
||||
|
||||
class TestManifestPersistence:
|
||||
def test_save_and_load_roundtrip(self, tmp_path):
|
||||
m = IntegrationManifest("myagent", tmp_path, version="2.0.1")
|
||||
m.record_file("dir/file.md", "# Hello")
|
||||
m.save()
|
||||
|
||||
loaded = IntegrationManifest.load("myagent", tmp_path)
|
||||
assert loaded.key == "myagent"
|
||||
assert loaded.version == "2.0.1"
|
||||
assert loaded.files == m.files
|
||||
assert loaded._installed_at == m._installed_at
|
||||
|
||||
def test_manifest_path(self, tmp_path):
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
assert m.manifest_path == tmp_path / ".specify" / "integrations" / "copilot.manifest.json"
|
||||
|
||||
def test_load_missing_raises(self, tmp_path):
|
||||
with pytest.raises(FileNotFoundError):
|
||||
IntegrationManifest.load("nonexistent", tmp_path)
|
||||
|
||||
def test_save_creates_directories(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
path = m.save()
|
||||
assert path.exists()
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
assert data["integration"] == "test"
|
||||
assert "installed_at" in data
|
||||
assert "f.txt" in data["files"]
|
||||
|
||||
def test_save_preserves_installed_at(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
first_ts = m._installed_at
|
||||
|
||||
# Save again — timestamp should not change
|
||||
m.save()
|
||||
assert m._installed_at == first_ts
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# Registry
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestRegistry:
|
||||
def test_registry_starts_empty(self):
|
||||
# Registry may have been populated by other tests; at minimum
|
||||
# it should be a dict.
|
||||
assert isinstance(INTEGRATION_REGISTRY, dict)
|
||||
|
||||
def test_register_and_get(self):
|
||||
stub = _StubIntegration()
|
||||
_register(stub)
|
||||
try:
|
||||
assert get_integration("stub") is stub
|
||||
finally:
|
||||
INTEGRATION_REGISTRY.pop("stub", None)
|
||||
|
||||
def test_get_missing_returns_none(self):
|
||||
assert get_integration("nonexistent-xyz") is None
|
||||
|
||||
def test_register_empty_key_raises(self):
|
||||
class EmptyKey(MarkdownIntegration):
|
||||
key = ""
|
||||
with pytest.raises(ValueError, match="empty key"):
|
||||
_register(EmptyKey())
|
||||
|
||||
def test_register_duplicate_raises(self):
|
||||
stub = _StubIntegration()
|
||||
_register(stub)
|
||||
try:
|
||||
with pytest.raises(KeyError, match="already registered"):
|
||||
_register(_StubIntegration())
|
||||
finally:
|
||||
INTEGRATION_REGISTRY.pop("stub", None)
|
||||
|
||||
|
||||
class TestManifestLoadValidation:
|
||||
def test_load_non_dict_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text('"just a string"', encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="JSON object"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_bad_files_type_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(json.dumps({"files": ["not", "a", "dict"]}), encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="mapping"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_bad_files_values_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(json.dumps({"files": {"a.txt": 123}}), encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="mapping"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_invalid_json_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text("{not valid json", encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="invalid JSON"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
@@ -1170,8 +1170,12 @@ class TestPresetCatalog:
|
||||
assert not catalog.cache_file.exists()
|
||||
assert not catalog.cache_metadata_file.exists()
|
||||
|
||||
def test_search_with_cached_data(self, project_dir):
|
||||
def test_search_with_cached_data(self, project_dir, monkeypatch):
|
||||
"""Test search with cached catalog data."""
|
||||
from unittest.mock import patch
|
||||
|
||||
# Only use the default catalog to prevent fetching the community catalog from the network
|
||||
monkeypatch.setenv("SPECKIT_PRESET_CATALOG_URL", PresetCatalog.DEFAULT_CATALOG_URL)
|
||||
catalog = PresetCatalog(project_dir)
|
||||
catalog.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -1200,6 +1204,9 @@ class TestPresetCatalog:
|
||||
"cached_at": datetime.now(timezone.utc).isoformat(),
|
||||
}))
|
||||
|
||||
# Isolate from community catalog so results are deterministic
|
||||
default_only = [PresetCatalogEntry(url=catalog.DEFAULT_CATALOG_URL, name="default", priority=1, install_allowed=True)]
|
||||
with patch.object(catalog, "get_active_catalogs", return_value=default_only):
|
||||
# Search by query
|
||||
results = catalog.search(query="agile")
|
||||
assert len(results) == 1
|
||||
@@ -1935,10 +1942,10 @@ class TestInitOptions:
|
||||
class TestPresetSkills:
|
||||
"""Tests for preset skill registration and unregistration."""
|
||||
|
||||
def _write_init_options(self, project_dir, ai="claude", ai_skills=True):
|
||||
def _write_init_options(self, project_dir, ai="claude", ai_skills=True, script="sh"):
|
||||
from specify_cli import save_init_options
|
||||
|
||||
save_init_options(project_dir, {"ai": ai, "ai_skills": ai_skills})
|
||||
save_init_options(project_dir, {"ai": ai, "ai_skills": ai_skills, "script": script})
|
||||
|
||||
def _create_skill(self, skills_dir, skill_name, body="original body"):
|
||||
skill_dir = skills_dir / skill_name
|
||||
@@ -1988,6 +1995,26 @@ class TestPresetSkills:
|
||||
content = skill_file.read_text()
|
||||
assert "untouched" in content, "Skill should not be modified when ai_skills=False"
|
||||
|
||||
def test_get_skills_dir_returns_none_for_non_string_ai(self, project_dir):
|
||||
"""Corrupted init-options ai values should not crash preset skill resolution."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text('{"ai":["codex"],"ai_skills":true,"script":"sh"}')
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
|
||||
assert manager._get_skills_dir() is None
|
||||
|
||||
def test_get_skills_dir_returns_none_for_non_dict_init_options(self, project_dir):
|
||||
"""Corrupted non-dict init-options payloads should fail closed."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text("[]")
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
|
||||
assert manager._get_skills_dir() is None
|
||||
|
||||
def test_skill_not_updated_without_init_options(self, project_dir, temp_dir):
|
||||
"""When no init-options.json exists, preset install should not touch skills."""
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
@@ -2033,6 +2060,52 @@ class TestPresetSkills:
|
||||
assert "preset:self-test" not in content, "Preset content should be gone"
|
||||
assert "templates/commands/specify.md" in content, "Should reference core template"
|
||||
|
||||
def test_skill_restored_on_remove_resolves_script_placeholders(self, project_dir):
|
||||
"""Core restore should resolve {SCRIPT}/{ARGS} placeholders like other skill paths."""
|
||||
self._write_init_options(project_dir, ai="claude", ai_skills=True, script="sh")
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="old")
|
||||
(project_dir / ".claude" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
core_cmds = project_dir / ".specify" / "templates" / "commands"
|
||||
core_cmds.mkdir(parents=True, exist_ok=True)
|
||||
(core_cmds / "specify.md").write_text(
|
||||
"---\n"
|
||||
"description: Core specify command\n"
|
||||
"scripts:\n"
|
||||
" sh: .specify/scripts/bash/create-new-feature.sh --json \"{ARGS}\"\n"
|
||||
"---\n\n"
|
||||
"Run:\n"
|
||||
"{SCRIPT}\n"
|
||||
)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(SELF_TEST_DIR, "0.1.5")
|
||||
manager.remove("self-test")
|
||||
|
||||
content = (skills_dir / "speckit-specify" / "SKILL.md").read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "{ARGS}" not in content
|
||||
assert ".specify/scripts/bash/create-new-feature.sh --json \"$ARGUMENTS\"" in content
|
||||
|
||||
def test_skill_not_overridden_when_skill_path_is_file(self, project_dir):
|
||||
"""Preset install should skip non-directory skill targets."""
|
||||
self._write_init_options(project_dir, ai="claude")
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skills_dir / "speckit-specify").write_text("not-a-directory")
|
||||
|
||||
(project_dir / ".claude" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(SELF_TEST_DIR, "0.1.5")
|
||||
|
||||
assert (skills_dir / "speckit-specify").is_file()
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert "speckit-specify" not in metadata.get("registered_skills", [])
|
||||
|
||||
def test_no_skills_registered_when_no_skill_dir_exists(self, project_dir, temp_dir):
|
||||
"""Skills should not be created when no existing skill dir is found."""
|
||||
self._write_init_options(project_dir, ai="claude")
|
||||
@@ -2047,6 +2120,304 @@ class TestPresetSkills:
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert metadata.get("registered_skills", []) == []
|
||||
|
||||
def test_extension_skill_override_matches_hyphenated_multisegment_name(self, project_dir, temp_dir):
|
||||
"""Preset overrides for speckit.<ext>.<cmd> should target speckit-<ext>-<cmd> skills."""
|
||||
self._write_init_options(project_dir, ai="codex")
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-fakeext-cmd", body="untouched")
|
||||
(project_dir / ".specify" / "extensions" / "fakeext").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
preset_dir = temp_dir / "ext-skill-override"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.fakeext.cmd.md").write_text(
|
||||
"---\ndescription: Override fakeext cmd\n---\n\npreset:ext-skill-override\n"
|
||||
)
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "ext-skill-override",
|
||||
"name": "Ext Skill Override",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/speckit.fakeext.cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(preset_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit-fakeext-cmd" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:ext-skill-override" in content
|
||||
assert "name: speckit-fakeext-cmd" in content
|
||||
assert "# Speckit Fakeext Cmd Skill" in content
|
||||
|
||||
metadata = manager.registry.get("ext-skill-override")
|
||||
assert "speckit-fakeext-cmd" in metadata.get("registered_skills", [])
|
||||
|
||||
def test_extension_skill_restored_on_preset_remove(self, project_dir, temp_dir):
|
||||
"""Preset removal should restore an extension-backed skill instead of deleting it."""
|
||||
self._write_init_options(project_dir, ai="codex")
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-fakeext-cmd", body="original extension skill")
|
||||
|
||||
extension_dir = project_dir / ".specify" / "extensions" / "fakeext"
|
||||
(extension_dir / "commands").mkdir(parents=True, exist_ok=True)
|
||||
(extension_dir / "commands" / "cmd.md").write_text(
|
||||
"---\n"
|
||||
"description: Extension fakeext cmd\n"
|
||||
"scripts:\n"
|
||||
" sh: ../../scripts/bash/setup-plan.sh --json \"{ARGS}\"\n"
|
||||
"---\n\n"
|
||||
"extension:fakeext\n"
|
||||
"Run {SCRIPT}\n"
|
||||
)
|
||||
extension_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "fakeext",
|
||||
"name": "Fake Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"description": "Fake extension command",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(extension_dir / "extension.yml", "w") as f:
|
||||
yaml.dump(extension_manifest, f)
|
||||
|
||||
preset_dir = temp_dir / "ext-skill-restore"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.fakeext.cmd.md").write_text(
|
||||
"---\ndescription: Override fakeext cmd\n---\n\npreset:ext-skill-restore\n"
|
||||
)
|
||||
preset_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "ext-skill-restore",
|
||||
"name": "Ext Skill Restore",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/speckit.fakeext.cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(preset_manifest, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(preset_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit-fakeext-cmd" / "SKILL.md"
|
||||
assert "preset:ext-skill-restore" in skill_file.read_text()
|
||||
|
||||
manager.remove("ext-skill-restore")
|
||||
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:ext-skill-restore" not in content
|
||||
assert "source: extension:fakeext" in content
|
||||
assert "extension:fakeext" in content
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
assert "# Fakeext Cmd Skill" in content
|
||||
|
||||
def test_preset_remove_skips_skill_dir_without_skill_file(self, project_dir, temp_dir):
|
||||
"""Preset removal should not delete arbitrary directories missing SKILL.md."""
|
||||
self._write_init_options(project_dir, ai="codex")
|
||||
skills_dir = project_dir / ".agents" / "skills"
|
||||
stray_skill_dir = skills_dir / "speckit-fakeext-cmd"
|
||||
stray_skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
note_file = stray_skill_dir / "notes.txt"
|
||||
note_file.write_text("user content", encoding="utf-8")
|
||||
|
||||
preset_dir = temp_dir / "ext-skill-missing-file"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.fakeext.cmd.md").write_text(
|
||||
"---\ndescription: Override fakeext cmd\n---\n\npreset:ext-skill-missing-file\n"
|
||||
)
|
||||
preset_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "ext-skill-missing-file",
|
||||
"name": "Ext Skill Missing File",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.fakeext.cmd",
|
||||
"file": "commands/speckit.fakeext.cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(preset_manifest, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
installed_preset_dir = manager.presets_dir / "ext-skill-missing-file"
|
||||
shutil.copytree(preset_dir, installed_preset_dir)
|
||||
manager.registry.add(
|
||||
"ext-skill-missing-file",
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"source": str(preset_dir),
|
||||
"provides_templates": ["speckit.fakeext.cmd"],
|
||||
"registered_skills": ["speckit-fakeext-cmd"],
|
||||
"priority": 10,
|
||||
},
|
||||
)
|
||||
|
||||
manager.remove("ext-skill-missing-file")
|
||||
|
||||
assert stray_skill_dir.is_dir()
|
||||
assert note_file.read_text(encoding="utf-8") == "user content"
|
||||
|
||||
def test_kimi_legacy_dotted_skill_override_still_applies(self, project_dir, temp_dir):
|
||||
"""Preset overrides should still target legacy dotted Kimi skill directories."""
|
||||
self._write_init_options(project_dir, ai="kimi")
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
self._create_skill(skills_dir, "speckit.specify", body="untouched")
|
||||
|
||||
(project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
self_test_dir = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(self_test_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit.specify" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:self-test" in content
|
||||
assert "name: speckit.specify" in content
|
||||
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert "speckit.specify" in metadata.get("registered_skills", [])
|
||||
|
||||
def test_kimi_skill_updated_even_when_ai_skills_disabled(self, project_dir, temp_dir):
|
||||
"""Kimi presets should still propagate command overrides to existing skills."""
|
||||
self._write_init_options(project_dir, ai="kimi", ai_skills=False)
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="untouched")
|
||||
|
||||
(project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
self_test_dir = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(self_test_dir, "0.1.5")
|
||||
|
||||
skill_file = skills_dir / "speckit-specify" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
content = skill_file.read_text()
|
||||
assert "preset:self-test" in content
|
||||
assert "name: speckit-specify" in content
|
||||
|
||||
metadata = manager.registry.get("self-test")
|
||||
assert "speckit-specify" in metadata.get("registered_skills", [])
|
||||
|
||||
def test_kimi_preset_skill_override_resolves_script_placeholders(self, project_dir, temp_dir):
|
||||
"""Kimi preset skill overrides should resolve placeholders and rewrite project paths."""
|
||||
self._write_init_options(project_dir, ai="kimi", ai_skills=False, script="sh")
|
||||
skills_dir = project_dir / ".kimi" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="untouched")
|
||||
(project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
preset_dir = temp_dir / "kimi-placeholder-override"
|
||||
preset_dir.mkdir()
|
||||
(preset_dir / "commands").mkdir()
|
||||
(preset_dir / "commands" / "speckit.specify.md").write_text(
|
||||
"---\n"
|
||||
"description: Kimi placeholder override\n"
|
||||
"scripts:\n"
|
||||
" sh: scripts/bash/create-new-feature.sh --json \"{ARGS}\"\n"
|
||||
"---\n\n"
|
||||
"Execute `{SCRIPT}` for __AGENT__\n"
|
||||
"Review templates/checklist.md and memory/constitution.md\n"
|
||||
)
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"preset": {
|
||||
"id": "kimi-placeholder-override",
|
||||
"name": "Kimi Placeholder Override",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"templates": [
|
||||
{
|
||||
"type": "command",
|
||||
"name": "speckit.specify",
|
||||
"file": "commands/speckit.specify.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
with open(preset_dir / "preset.yml", "w") as f:
|
||||
yaml.dump(manifest_data, f)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
manager.install_from_directory(preset_dir, "0.1.5")
|
||||
|
||||
content = (skills_dir / "speckit-specify" / "SKILL.md").read_text()
|
||||
assert "{SCRIPT}" not in content
|
||||
assert "__AGENT__" not in content
|
||||
assert ".specify/scripts/bash/create-new-feature.sh --json \"$ARGUMENTS\"" in content
|
||||
assert ".specify/templates/checklist.md" in content
|
||||
assert ".specify/memory/constitution.md" in content
|
||||
assert "for kimi" in content
|
||||
|
||||
def test_preset_skill_registration_handles_non_dict_init_options(self, project_dir, temp_dir):
|
||||
"""Non-dict init-options payloads should not crash preset install/remove flows."""
|
||||
init_options = project_dir / ".specify" / "init-options.json"
|
||||
init_options.parent.mkdir(parents=True, exist_ok=True)
|
||||
init_options.write_text("[]")
|
||||
|
||||
skills_dir = project_dir / ".claude" / "skills"
|
||||
self._create_skill(skills_dir, "speckit-specify", body="untouched")
|
||||
(project_dir / ".claude" / "commands").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manager = PresetManager(project_dir)
|
||||
self_test_dir = Path(__file__).parent.parent / "presets" / "self-test"
|
||||
manager.install_from_directory(self_test_dir, "0.1.5")
|
||||
|
||||
content = (skills_dir / "speckit-specify" / "SKILL.md").read_text()
|
||||
assert "untouched" in content
|
||||
|
||||
|
||||
class TestPresetSetPriority:
|
||||
"""Test preset set-priority CLI command."""
|
||||
|
||||
@@ -14,6 +14,7 @@ import pytest
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
CREATE_FEATURE = PROJECT_ROOT / "scripts" / "bash" / "create-new-feature.sh"
|
||||
CREATE_FEATURE_PS = PROJECT_ROOT / "scripts" / "powershell" / "create-new-feature.ps1"
|
||||
COMMON_SH = PROJECT_ROOT / "scripts" / "bash" / "common.sh"
|
||||
|
||||
|
||||
@@ -147,6 +148,24 @@ class TestSequentialBranch:
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch == "003-next-feat", f"expected 003-next-feat, got: {branch}"
|
||||
|
||||
def test_sequential_supports_four_digit_prefixes(self, git_repo: Path):
|
||||
"""Sequential numbering should continue past 999 without truncation."""
|
||||
(git_repo / "specs" / "999-last-3digit").mkdir(parents=True)
|
||||
(git_repo / "specs" / "1000-first-4digit").mkdir(parents=True)
|
||||
result = run_script(git_repo, "--short-name", "next-feat", "Next feature")
|
||||
assert result.returncode == 0, result.stderr
|
||||
branch = None
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("BRANCH_NAME:"):
|
||||
branch = line.split(":", 1)[1].strip()
|
||||
assert branch == "1001-next-feat", f"expected 1001-next-feat, got: {branch}"
|
||||
|
||||
def test_powershell_scanner_uses_long_tryparse_for_large_prefixes(self):
|
||||
"""PowerShell scanner should parse large prefixes without [int] casts."""
|
||||
content = CREATE_FEATURE_PS.read_text(encoding="utf-8")
|
||||
assert "[long]::TryParse($matches[1], [ref]$num)" in content
|
||||
assert "$num = [int]$matches[1]" not in content
|
||||
|
||||
|
||||
# ── check_feature_branch Tests ───────────────────────────────────────────────
|
||||
|
||||
@@ -250,3 +269,146 @@ class TestE2EFlow:
|
||||
assert (git_repo / "specs" / branch).is_dir()
|
||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
||||
assert val.returncode == 0
|
||||
|
||||
|
||||
# ── Allow Existing Branch Tests ──────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestAllowExistingBranch:
|
||||
def test_allow_existing_switches_to_branch(self, git_repo: Path):
|
||||
"""T006: Pre-create branch, verify script switches to it."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "004-pre-exist"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "pre-exist",
|
||||
"--number", "4", "Pre-existing feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo, capture_output=True, text=True,
|
||||
).stdout.strip()
|
||||
assert current == "004-pre-exist", f"expected 004-pre-exist, got {current}"
|
||||
|
||||
def test_allow_existing_already_on_branch(self, git_repo: Path):
|
||||
"""T007: Verify success when already on the target branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "005-already-on"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "already-on",
|
||||
"--number", "5", "Already on branch",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
|
||||
def test_allow_existing_creates_spec_dir(self, git_repo: Path):
|
||||
"""T008: Verify spec directory created on existing branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "006-spec-dir"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "spec-dir",
|
||||
"--number", "6", "Spec dir feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert (git_repo / "specs" / "006-spec-dir").is_dir()
|
||||
assert (git_repo / "specs" / "006-spec-dir" / "spec.md").exists()
|
||||
|
||||
def test_without_flag_still_errors(self, git_repo: Path):
|
||||
"""T009: Verify backwards compatibility (error without flag)."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "007-no-flag"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--short-name", "no-flag", "--number", "7", "No flag feature",
|
||||
)
|
||||
assert result.returncode != 0, "should fail without --allow-existing-branch"
|
||||
assert "already exists" in result.stderr
|
||||
|
||||
def test_allow_existing_no_overwrite_spec(self, git_repo: Path):
|
||||
"""T010: Pre-create spec.md with content, verify it is preserved."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "008-no-overwrite"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
spec_dir = git_repo / "specs" / "008-no-overwrite"
|
||||
spec_dir.mkdir(parents=True)
|
||||
spec_file = spec_dir / "spec.md"
|
||||
spec_file.write_text("# My custom spec content\n")
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "no-overwrite",
|
||||
"--number", "8", "No overwrite feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert spec_file.read_text() == "# My custom spec content\n"
|
||||
|
||||
def test_allow_existing_creates_branch_if_not_exists(self, git_repo: Path):
|
||||
"""T011: Verify normal creation when branch doesn't exist."""
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "new-branch",
|
||||
"New branch feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo, capture_output=True, text=True,
|
||||
).stdout.strip()
|
||||
assert "new-branch" in current
|
||||
|
||||
def test_allow_existing_with_json(self, git_repo: Path):
|
||||
"""T012: Verify JSON output is correct."""
|
||||
import json
|
||||
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "009-json-test"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--json", "--short-name", "json-test",
|
||||
"--number", "9", "JSON test",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
data = json.loads(result.stdout)
|
||||
assert data["BRANCH_NAME"] == "009-json-test"
|
||||
|
||||
def test_allow_existing_no_git(self, no_git_dir: Path):
|
||||
"""T013: Verify flag is silently ignored in non-git repos."""
|
||||
result = run_script(
|
||||
no_git_dir, "--allow-existing-branch", "--short-name", "no-git",
|
||||
"No git feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
|
||||
|
||||
class TestAllowExistingBranchPowerShell:
|
||||
def test_powershell_supports_allow_existing_branch_flag(self):
|
||||
"""Static guard: PS script exposes and uses -AllowExistingBranch."""
|
||||
contents = CREATE_FEATURE_PS.read_text(encoding="utf-8")
|
||||
assert "-AllowExistingBranch" in contents
|
||||
# Ensure the flag is referenced in script logic, not just declared
|
||||
assert "AllowExistingBranch" in contents.replace("-AllowExistingBranch", "")
|
||||
|
||||
Reference in New Issue
Block a user