mirror of
https://github.com/github/spec-kit.git
synced 2026-04-01 10:13:08 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aff14ea5a0 |
@@ -12,7 +12,7 @@ body:
|
||||
- Review the [Extension Publishing Guide](https://github.com/github/spec-kit/blob/main/extensions/EXTENSION-PUBLISHING-GUIDE.md)
|
||||
- Ensure your extension has a valid `extension.yml` manifest
|
||||
- Create a GitHub release with a version tag (e.g., v1.0.0)
|
||||
- Test installation: `specify extension add <extension-name> --from <your-release-url>`
|
||||
- Test installation: `specify extension add --from <your-release-url>`
|
||||
|
||||
- type: input
|
||||
id: extension-id
|
||||
@@ -229,7 +229,7 @@ body:
|
||||
placeholder: |
|
||||
```bash
|
||||
# Install extension
|
||||
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
|
||||
# Use a command
|
||||
/speckit.your-extension.command-name arg1 arg2
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -64,5 +64,5 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v5
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run markdownlint-cli2
|
||||
uses: DavidAnson/markdownlint-cli2-action@ce4853d43830c74c1753b39f3cf40f71c2031eb9 # v23
|
||||
uses: DavidAnson/markdownlint-cli2-action@v19
|
||||
with:
|
||||
globs: |
|
||||
'**/*.md'
|
||||
|
||||
23
.github/workflows/release-trigger.yml
vendored
23
.github/workflows/release-trigger.yml
vendored
@@ -139,22 +139,6 @@ jobs:
|
||||
git push origin "${{ steps.version.outputs.tag }}"
|
||||
echo "Branch ${{ env.branch }} and tag ${{ steps.version.outputs.tag }} pushed"
|
||||
|
||||
- name: Bump to dev version
|
||||
id: dev_version
|
||||
run: |
|
||||
IFS='.' read -r MAJOR MINOR PATCH <<< "${{ steps.version.outputs.version }}"
|
||||
NEXT_DEV="$MAJOR.$MINOR.$((PATCH + 1)).dev0"
|
||||
echo "dev_version=$NEXT_DEV" >> $GITHUB_OUTPUT
|
||||
sed -i "s/version = \".*\"/version = \"$NEXT_DEV\"/" pyproject.toml
|
||||
git add pyproject.toml
|
||||
if git diff --cached --quiet; then
|
||||
echo "No dev version changes to commit"
|
||||
else
|
||||
git commit -m "chore: begin $NEXT_DEV development"
|
||||
git push origin "${{ env.branch }}"
|
||||
echo "Bumped to dev version $NEXT_DEV"
|
||||
fi
|
||||
|
||||
- name: Open pull request
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_PAT }}
|
||||
@@ -162,17 +146,16 @@ jobs:
|
||||
gh pr create \
|
||||
--base main \
|
||||
--head "${{ env.branch }}" \
|
||||
--title "chore: release ${{ steps.version.outputs.version }}, begin ${{ steps.dev_version.outputs.dev_version }} development" \
|
||||
--body "Automated release of ${{ steps.version.outputs.version }}.
|
||||
--title "chore: bump version to ${{ steps.version.outputs.version }}" \
|
||||
--body "Automated version bump to ${{ steps.version.outputs.version }}.
|
||||
|
||||
This PR was created by the Release Trigger workflow. The git tag \`${{ steps.version.outputs.tag }}\` has already been pushed and the release artifacts are being built.
|
||||
|
||||
Merging this PR will set \`main\` to \`${{ steps.dev_version.outputs.dev_version }}\` so that development installs are clearly marked as pre-release."
|
||||
Merge this PR to record the version bump and changelog update on \`main\`."
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "✅ Version bumped to ${{ steps.version.outputs.version }}"
|
||||
echo "✅ Tag ${{ steps.version.outputs.tag }} created and pushed"
|
||||
echo "✅ Dev version set to ${{ steps.dev_version.outputs.dev_version }}"
|
||||
echo "✅ PR opened to merge version bump into main"
|
||||
echo "🚀 Release workflow is building artifacts from the tag"
|
||||
|
||||
@@ -497,13 +497,13 @@ $AllAgents = @('claude', 'gemini', 'copilot', 'cursor-agent', 'qwen', 'opencode'
|
||||
$AllScripts = @('sh', 'ps')
|
||||
|
||||
function Normalize-List {
|
||||
param([string]$Value)
|
||||
param([string]$Input)
|
||||
|
||||
if ([string]::IsNullOrEmpty($Value)) {
|
||||
if ([string]::IsNullOrEmpty($Input)) {
|
||||
return @()
|
||||
}
|
||||
|
||||
$items = $Value -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||
$items = $Input -split '[,\s]+' | Where-Object { $_ } | Select-Object -Unique
|
||||
return $items
|
||||
}
|
||||
|
||||
@@ -526,7 +526,7 @@ function Validate-Subset {
|
||||
|
||||
# Determine agent list
|
||||
if (-not [string]::IsNullOrEmpty($Agents)) {
|
||||
$AgentList = Normalize-List -Value $Agents
|
||||
$AgentList = Normalize-List -Input $Agents
|
||||
if (-not (Validate-Subset -Type 'agent' -Allowed $AllAgents -Items $AgentList)) {
|
||||
exit 1
|
||||
}
|
||||
@@ -536,7 +536,7 @@ if (-not [string]::IsNullOrEmpty($Agents)) {
|
||||
|
||||
# Determine script list
|
||||
if (-not [string]::IsNullOrEmpty($Scripts)) {
|
||||
$ScriptList = Normalize-List -Value $Scripts
|
||||
$ScriptList = Normalize-List -Input $Scripts
|
||||
if (-not (Validate-Subset -Type 'script' -Allowed $AllScripts -Items $ScriptList)) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v6
|
||||
|
||||
36
AGENTS.md
36
AGENTS.md
@@ -30,10 +30,10 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **Claude Code** | `.claude/commands/` | Markdown | `claude` | Anthropic's Claude Code CLI |
|
||||
| **Gemini CLI** | `.gemini/commands/` | TOML | `gemini` | Google's Gemini CLI |
|
||||
| **GitHub Copilot** | `.github/agents/` | Markdown | N/A (IDE-based) | GitHub Copilot in VS Code |
|
||||
| **Cursor** | `.cursor/commands/` | Markdown | N/A (IDE-based) | Cursor IDE (`--ai cursor-agent`) |
|
||||
| **Cursor** | `.cursor/commands/` | Markdown | `cursor-agent` | Cursor CLI |
|
||||
| **Qwen Code** | `.qwen/commands/` | Markdown | `qwen` | Alibaba's Qwen Code CLI |
|
||||
| **opencode** | `.opencode/command/` | Markdown | `opencode` | opencode CLI |
|
||||
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (`--ai codex --ai-skills`) |
|
||||
| **Codex CLI** | `.agents/skills/` | Markdown | `codex` | Codex CLI (skills) |
|
||||
| **Windsurf** | `.windsurf/workflows/` | Markdown | N/A (IDE-based) | Windsurf IDE workflows |
|
||||
| **Junie** | `.junie/commands/` | Markdown | `junie` | Junie by JetBrains |
|
||||
| **Kilo Code** | `.kilocode/workflows/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
||||
@@ -50,8 +50,6 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
||||
| **iFlow CLI** | `.iflow/commands/` | Markdown | `iflow` | iFlow CLI (iflow-ai) |
|
||||
| **IBM Bob** | `.bob/commands/` | Markdown | N/A (IDE-based) | IBM Bob IDE |
|
||||
| **Trae** | `.trae/rules/` | Markdown | N/A (IDE-based) | Trae IDE |
|
||||
| **Antigravity** | `.agent/commands/` | Markdown | N/A (IDE-based) | Antigravity IDE (`--ai agy --ai-skills`) |
|
||||
| **Mistral Vibe** | `.vibe/prompts/` | Markdown | `vibe` | Mistral Vibe CLI |
|
||||
| **Generic** | User-specified via `--ai-commands-dir` | Markdown | N/A | Bring your own agent |
|
||||
|
||||
### Step-by-Step Integration Guide
|
||||
@@ -318,40 +316,32 @@ Require a command-line tool to be installed:
|
||||
|
||||
- **Claude Code**: `claude` CLI
|
||||
- **Gemini CLI**: `gemini` CLI
|
||||
- **Cursor**: `cursor-agent` CLI
|
||||
- **Qwen Code**: `qwen` CLI
|
||||
- **opencode**: `opencode` CLI
|
||||
- **Codex CLI**: `codex` CLI (requires `--ai-skills`)
|
||||
- **Junie**: `junie` CLI
|
||||
- **Auggie CLI**: `auggie` CLI
|
||||
- **Kiro CLI**: `kiro-cli` CLI
|
||||
- **CodeBuddy CLI**: `codebuddy` CLI
|
||||
- **Qoder CLI**: `qodercli` CLI
|
||||
- **Kiro CLI**: `kiro-cli` CLI
|
||||
- **Amp**: `amp` CLI
|
||||
- **SHAI**: `shai` CLI
|
||||
- **Tabnine CLI**: `tabnine` CLI
|
||||
- **Kimi Code**: `kimi` CLI
|
||||
- **Mistral Vibe**: `vibe` CLI
|
||||
- **Pi Coding Agent**: `pi` CLI
|
||||
- **iFlow CLI**: `iflow` CLI
|
||||
|
||||
### IDE-Based Agents
|
||||
|
||||
Work within integrated development environments:
|
||||
|
||||
- **GitHub Copilot**: Built into VS Code/compatible editors
|
||||
- **Cursor**: Built into Cursor IDE (`--ai cursor-agent`)
|
||||
- **Windsurf**: Built into Windsurf IDE
|
||||
- **Kilo Code**: Built into Kilo Code IDE
|
||||
- **Roo Code**: Built into Roo Code IDE
|
||||
- **IBM Bob**: Built into IBM Bob IDE
|
||||
- **Trae**: Built into Trae IDE
|
||||
- **Antigravity**: Built into Antigravity IDE (`--ai agy --ai-skills`)
|
||||
|
||||
## Command File Formats
|
||||
|
||||
### Markdown Format
|
||||
|
||||
Used by: Claude, Cursor, GitHub Copilot, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi, Codex, Auggie, CodeBuddy, Qoder, Roo Code, Kilo Code, Trae, Antigravity, Mistral Vibe, iFlow
|
||||
Used by: Claude, Cursor, opencode, Windsurf, Junie, Kiro CLI, Amp, SHAI, IBM Bob, Kimi Code, Qwen, Pi
|
||||
|
||||
**Standard format:**
|
||||
|
||||
@@ -389,29 +379,15 @@ Command content with {SCRIPT} and {{args}} placeholders.
|
||||
## Directory Conventions
|
||||
|
||||
- **CLI agents**: Usually `.<agent-name>/commands/`
|
||||
- **Singular command exception**:
|
||||
- opencode: `.opencode/command/` (singular `command`, not `commands`)
|
||||
- **Nested path exception**:
|
||||
- Tabnine: `.tabnine/agent/commands/` (extra `agent/` segment)
|
||||
- **Shared `.agents/` folder**:
|
||||
- Amp: `.agents/commands/` (shared folder, not `.amp/`)
|
||||
- Codex: `.agents/skills/` (shared folder; requires `--ai-skills`; invoked as `$speckit-<command>`)
|
||||
- **Skills-based exceptions**:
|
||||
- Kimi Code: `.kimi/skills/` (skills, invoked as `/skill:speckit-<command>`)
|
||||
- Codex: `.agents/skills/` (skills, invoked as `$speckit-<command>`)
|
||||
- **Prompt-based exceptions**:
|
||||
- Kiro CLI: `.kiro/prompts/`
|
||||
- Pi: `.pi/prompts/`
|
||||
- Mistral Vibe: `.vibe/prompts/`
|
||||
- **Rules-based exceptions**:
|
||||
- Trae: `.trae/rules/`
|
||||
- **IDE agents**: Follow IDE-specific patterns:
|
||||
- Copilot: `.github/agents/`
|
||||
- Cursor: `.cursor/commands/`
|
||||
- Windsurf: `.windsurf/workflows/`
|
||||
- Kilo Code: `.kilocode/workflows/`
|
||||
- Roo Code: `.roo/commands/`
|
||||
- IBM Bob: `.bob/commands/`
|
||||
- Antigravity: `.agent/skills/` (`--ai-skills` required; `.agent/commands/` is deprecated)
|
||||
|
||||
## Argument Patterns
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler
|
||||
> If your pull request introduces a large change that materially impacts the work of the CLI or the rest of the repository (e.g., you're introducing new templates, arguments, or otherwise major changes), make sure that it was **discussed and agreed upon** by the project maintainers. Pull requests with large changes that did not have a prior conversation and agreement will be closed.
|
||||
|
||||
1. Fork and clone the repository
|
||||
1. Configure and install the dependencies: `uv sync --extra test`
|
||||
1. Configure and install the dependencies: `uv sync`
|
||||
1. Make sure the CLI works on your machine: `uv run specify --help`
|
||||
1. Create a new branch: `git checkout -b my-branch-name`
|
||||
1. Make your change, add tests, and make sure everything still works
|
||||
@@ -44,9 +44,6 @@ On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler
|
||||
1. Push to your fork and submit a pull request
|
||||
1. Wait for your pull request to be reviewed and merged.
|
||||
|
||||
For the detailed test workflow, command-selection prompt, and PR reporting template, see [`TESTING.md`](./TESTING.md).
|
||||
Activate the project virtual environment (see the Setup block in [`TESTING.md`](./TESTING.md)), then install the CLI from your working tree (`uv pip install -e .` after `uv sync --extra test`) or otherwise ensure the shell uses the local `specify` binary before running the manual slash-command tests described below.
|
||||
|
||||
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
|
||||
|
||||
- Follow the project's coding conventions.
|
||||
@@ -65,14 +62,6 @@ When working on spec-kit:
|
||||
3. Test script functionality in the `scripts/` directory
|
||||
4. Ensure memory files (`memory/constitution.md`) are updated if major process changes are made
|
||||
|
||||
### Recommended validation flow
|
||||
|
||||
For the smoothest review experience, validate changes in this order:
|
||||
|
||||
1. **Run focused automated checks first** — use the quick verification commands in [`TESTING.md`](./TESTING.md) to catch packaging, scaffolding, and configuration regressions early.
|
||||
2. **Run manual workflow tests second** — if your change affects slash commands or the developer workflow, follow [`TESTING.md`](./TESTING.md) to choose the right commands, run them in an agent, and capture results for your PR.
|
||||
3. **Use local release packages when debugging packaged output** — if you need to inspect the exact files CI-style packaging produces, generate local release packages as described below.
|
||||
|
||||
### Testing template and command changes locally
|
||||
|
||||
Running `uv run specify init` pulls released packages, which won’t include your local changes.
|
||||
@@ -96,8 +85,6 @@ To test your templates, commands, and other changes locally, follow these steps:
|
||||
|
||||
Navigate to your test project folder and open the agent to verify your implementation.
|
||||
|
||||
If you only need to validate generated file structure and content before doing manual agent testing, start with the focused automated checks in [`TESTING.md`](./TESTING.md). Keep this section for the cases where you need to inspect the exact packaged output locally.
|
||||
|
||||
## AI contributions in Spec Kit
|
||||
|
||||
> [!IMPORTANT]
|
||||
|
||||
26
README.md
26
README.md
@@ -9,7 +9,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/github/spec-kit/releases/latest"><img src="https://img.shields.io/github/v/release/github/spec-kit" alt="Latest Release"/></a>
|
||||
<a href="https://github.com/github/spec-kit/actions/workflows/release.yml"><img src="https://github.com/github/spec-kit/actions/workflows/release.yml/badge.svg" alt="Release"/></a>
|
||||
<a href="https://github.com/github/spec-kit/stargazers"><img src="https://img.shields.io/github/stars/github/spec-kit?style=social" alt="GitHub stars"/></a>
|
||||
<a href="https://github.com/github/spec-kit/blob/main/LICENSE"><img src="https://img.shields.io/github/license/github/spec-kit" alt="License"/></a>
|
||||
<a href="https://github.github.io/spec-kit/"><img src="https://img.shields.io/badge/docs-GitHub_Pages-blue" alt="Documentation"/></a>
|
||||
@@ -162,18 +162,9 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
||||
|
||||
The following community-contributed extensions are available in [`catalog.community.json`](extensions/catalog.community.json):
|
||||
|
||||
**Categories:**
|
||||
**Categories:** `docs` — reads, validates, or generates spec artifacts · `code` — reviews, validates, or modifies source code · `process` — orchestrates workflow across phases · `integration` — syncs with external platforms · `visibility` — reports on project health or progress
|
||||
|
||||
- `docs` — reads, validates, or generates spec artifacts
|
||||
- `code` — reviews, validates, or modifies source code
|
||||
- `process` — orchestrates workflow across phases
|
||||
- `integration` — syncs with external platforms
|
||||
- `visibility` — reports on project health or progress
|
||||
|
||||
**Effect:**
|
||||
|
||||
- `Read-only` — produces reports without modifying files
|
||||
- `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
**Effect:** `Read-only` — produces reports without modifying files · `Read+Write` — modifies files, creates artifacts, or updates specs
|
||||
|
||||
| Extension | Purpose | Category | Effect | URL |
|
||||
|-----------|---------|----------|--------|-----|
|
||||
@@ -190,17 +181,7 @@ The following community-contributed extensions are available in [`catalog.commun
|
||||
| Iterate | Iterate on spec documents with a two-phase define-and-apply workflow — refine specs mid-implementation and go straight back to building | `docs` | Read+Write | [spec-kit-iterate](https://github.com/imviancagrace/spec-kit-iterate) |
|
||||
| Jira Integration | Create Jira Epics, Stories, and Issues from spec-kit specifications and task breakdowns with configurable hierarchy and custom field support | `integration` | Read+Write | [spec-kit-jira](https://github.com/mbachorik/spec-kit-jira) |
|
||||
| Learning Extension | Generate educational guides from implementations and enhance clarifications with mentoring context | `docs` | Read+Write | [spec-kit-learn](https://github.com/imviancagrace/spec-kit-learn) |
|
||||
| MAQA — Multi-Agent & Quality Assurance | Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins. Optional CI gate. | `process` | Read+Write | [spec-kit-maqa-ext](https://github.com/GenieRobot/spec-kit-maqa-ext) |
|
||||
| MAQA Azure DevOps Integration | Azure DevOps Boards integration for MAQA — syncs User Stories and Task children as features progress | `integration` | Read+Write | [spec-kit-maqa-azure-devops](https://github.com/GenieRobot/spec-kit-maqa-azure-devops) |
|
||||
| MAQA CI/CD Gate | Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green. | `process` | Read+Write | [spec-kit-maqa-ci](https://github.com/GenieRobot/spec-kit-maqa-ci) |
|
||||
| MAQA GitHub Projects Integration | GitHub Projects v2 integration for MAQA — syncs draft issues and Status columns as features progress | `integration` | Read+Write | [spec-kit-maqa-github-projects](https://github.com/GenieRobot/spec-kit-maqa-github-projects) |
|
||||
| MAQA Jira Integration | Jira integration for MAQA — syncs Stories and Subtasks as features progress through the board | `integration` | Read+Write | [spec-kit-maqa-jira](https://github.com/GenieRobot/spec-kit-maqa-jira) |
|
||||
| MAQA Linear Integration | Linear integration for MAQA — syncs issues and sub-issues across workflow states as features progress | `integration` | Read+Write | [spec-kit-maqa-linear](https://github.com/GenieRobot/spec-kit-maqa-linear) |
|
||||
| MAQA Trello Integration | Trello board integration for MAQA — populates board from specs, moves cards, real-time checklist ticking | `integration` | Read+Write | [spec-kit-maqa-trello](https://github.com/GenieRobot/spec-kit-maqa-trello) |
|
||||
| Onboard | Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step | `process` | Read+Write | [spec-kit-onboard](https://github.com/dmux/spec-kit-onboard) |
|
||||
| Plan Review Gate | Require spec.md and plan.md to be merged via MR/PR before allowing task generation | `process` | Read-only | [spec-kit-plan-review-gate](https://github.com/luno/spec-kit-plan-review-gate) |
|
||||
| Presetify | Create and validate presets and preset catalogs | `process` | Read+Write | [presetify](https://github.com/mnriem/spec-kit-extensions/tree/main/presetify) |
|
||||
| Product Forge | Full product lifecycle: research → product spec → SpecKit → implement → verify → test | `process` | Read+Write | [speckit-product-forge](https://github.com/VaiYav/speckit-product-forge) |
|
||||
| Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) |
|
||||
| Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) |
|
||||
| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) |
|
||||
@@ -208,7 +189,6 @@ The following community-contributed extensions are available in [`catalog.commun
|
||||
| Retrospective Extension | Post-implementation retrospective with spec adherence scoring, drift analysis, and human-gated spec updates | `docs` | Read+Write | [spec-kit-retrospective](https://github.com/emi-dm/spec-kit-retrospective) |
|
||||
| Review Extension | Post-implementation comprehensive code review with specialized agents for code quality, comments, tests, error handling, type design, and simplification | `code` | Read-only | [spec-kit-review](https://github.com/ismaelJimenez/spec-kit-review) |
|
||||
| SDD Utilities | Resume interrupted workflows, validate project health, and verify spec-to-task traceability | `process` | Read+Write | [speckit-utils](https://github.com/mvanhorn/speckit-utils) |
|
||||
| Superpowers Bridge | Orchestrates obra/superpowers skills within the spec-kit SDD workflow across the full lifecycle (clarification, TDD, review, verification, critique, debugging, branch completion) | `process` | Read+Write | [superpowers-bridge](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/superpowers-bridge) |
|
||||
| Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) |
|
||||
| Understanding | Automated requirements quality analysis — 31 deterministic metrics against IEEE/ISO standards with experimental energy-based ambiguity detection | `docs` | Read-only | [understanding](https://github.com/Testimonial/understanding) |
|
||||
| V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) |
|
||||
|
||||
66
TESTING.md
66
TESTING.md
@@ -1,59 +1,8 @@
|
||||
# Testing Guide
|
||||
|
||||
This document is the detailed testing companion to [`CONTRIBUTING.md`](./CONTRIBUTING.md).
|
||||
|
||||
Use it for three things:
|
||||
|
||||
1. running quick automated checks before manual testing,
|
||||
2. manually testing affected slash commands through an AI agent, and
|
||||
3. capturing the results in a PR-friendly format.
|
||||
# Manual Testing Guide
|
||||
|
||||
Any change that affects a slash command's behavior requires manually testing that command through an AI agent and submitting results with the PR.
|
||||
|
||||
## Recommended order
|
||||
|
||||
1. **Sync your environment** — install the project and test dependencies.
|
||||
2. **Run focused automated checks** — especially for packaging, scaffolding, agent config, and generated-file changes.
|
||||
3. **Run manual agent tests** — for any affected slash commands.
|
||||
4. **Paste results into your PR** — include both command-selection reasoning and manual test results.
|
||||
|
||||
## Quick automated checks
|
||||
|
||||
Run these before manual testing when your change affects packaging, scaffolding, templates, release artifacts, or agent wiring.
|
||||
|
||||
### Environment setup
|
||||
|
||||
```bash
|
||||
cd <spec-kit-repo>
|
||||
uv sync --extra test
|
||||
source .venv/bin/activate # On Windows (CMD): .venv\Scripts\activate | (PowerShell): .venv\Scripts\Activate.ps1
|
||||
```
|
||||
|
||||
### Generated package structure and content
|
||||
|
||||
```bash
|
||||
uv run python -m pytest tests/test_core_pack_scaffold.py -q
|
||||
```
|
||||
|
||||
This validates the generated files that CI-style packaging depends on, including directory layout, file names, frontmatter/TOML validity, placeholder replacement, `.specify/` path rewrites, and parity with `create-release-packages.sh`.
|
||||
|
||||
### Agent configuration and release wiring consistency
|
||||
|
||||
```bash
|
||||
uv run python -m pytest tests/test_agent_config_consistency.py -q
|
||||
```
|
||||
|
||||
Run this when you change agent metadata, release scripts, context update scripts, or artifact naming.
|
||||
|
||||
### Optional single-agent packaging spot check
|
||||
|
||||
```bash
|
||||
AGENTS=copilot SCRIPTS=sh ./.github/workflows/scripts/create-release-packages.sh v1.0.0
|
||||
```
|
||||
|
||||
Inspect `.genreleases/sdd-copilot-package-sh/` and the matching ZIP in `.genreleases/` when you want to review the exact packaged output for one agent/script combination.
|
||||
|
||||
## Manual testing process
|
||||
## Process
|
||||
|
||||
1. **Identify affected commands** — use the [prompt below](#determining-which-tests-to-run) to have your agent analyze your changed files and determine which commands need testing.
|
||||
2. **Set up a test project** — scaffold from your local branch (see [Setup](#setup)).
|
||||
@@ -64,22 +13,19 @@ Inspect `.genreleases/sdd-copilot-package-sh/` and the matching ZIP in `.genrele
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Install the project and test dependencies from your local branch
|
||||
# Install the CLI from your local branch
|
||||
cd <spec-kit-repo>
|
||||
uv sync --extra test
|
||||
source .venv/bin/activate # On Windows (CMD): .venv\Scripts\activate | (PowerShell): .venv\Scripts\Activate.ps1
|
||||
uv venv .venv
|
||||
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
||||
uv pip install -e .
|
||||
# Ensure the `specify` binary in this environment points at your working tree so the agent runs the branch you're testing.
|
||||
|
||||
# Initialize a test project using your local changes
|
||||
uv run specify init /tmp/speckit-test --ai <agent> --offline
|
||||
specify init /tmp/speckit-test --ai <agent> --offline
|
||||
cd /tmp/speckit-test
|
||||
|
||||
# Open in your agent
|
||||
```
|
||||
|
||||
If you are testing the packaged output rather than the live source tree, create a local release package first as described in [`CONTRIBUTING.md`](./CONTRIBUTING.md).
|
||||
|
||||
## Reporting results
|
||||
|
||||
Paste this into your PR:
|
||||
|
||||
@@ -44,7 +44,7 @@ provides:
|
||||
- name: string # Required, pattern: ^speckit\.[a-z0-9-]+\.[a-z0-9-]+$
|
||||
file: string # Required, relative path to command file
|
||||
description: string # Required
|
||||
aliases: [string] # Optional, same pattern as name; namespace must match extension.id and must not shadow core or installed extension commands
|
||||
aliases: [string] # Optional, array of alternate names
|
||||
|
||||
config: # Optional, array of config files
|
||||
- name: string # Config file name
|
||||
|
||||
@@ -41,7 +41,7 @@ provides:
|
||||
- name: "speckit.my-ext.hello" # Must follow pattern: speckit.{ext-id}.{cmd}
|
||||
file: "commands/hello.md"
|
||||
description: "Say hello"
|
||||
aliases: ["speckit.my-ext.hi"] # Optional aliases, same pattern
|
||||
aliases: ["speckit.hello"] # Optional aliases
|
||||
|
||||
config: # Optional: Config files
|
||||
- name: "my-ext-config.yml"
|
||||
@@ -186,7 +186,7 @@ What the extension provides.
|
||||
- `name`: Command name (must match `speckit.{ext-id}.{command}`)
|
||||
- `file`: Path to command file (relative to extension root)
|
||||
- `description`: Command description (optional)
|
||||
- `aliases`: Alternative command names (optional, array; each must match `speckit.{ext-id}.{command}`)
|
||||
- `aliases`: Alternative command names (optional, array)
|
||||
|
||||
### Optional Fields
|
||||
|
||||
@@ -514,7 +514,7 @@ zip -r spec-kit-my-ext-1.0.0.zip extension.yml commands/ scripts/ docs/
|
||||
Users install with:
|
||||
|
||||
```bash
|
||||
specify extension add <extension-name> --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
|
||||
specify extension add --from https://github.com/.../spec-kit-my-ext-1.0.0.zip
|
||||
```
|
||||
|
||||
### Option 3: Community Reference Catalog
|
||||
|
||||
@@ -122,7 +122,7 @@ Test that users can install from your release:
|
||||
specify extension add --dev /path/to/your-extension
|
||||
|
||||
# Test from GitHub archive
|
||||
specify extension add <extension-name> --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add --from https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -160,7 +160,7 @@ This will:
|
||||
|
||||
```bash
|
||||
# From GitHub release
|
||||
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
### Install from Local Directory (Development)
|
||||
@@ -214,8 +214,8 @@ Extensions add commands that appear in your AI agent (Claude Code):
|
||||
# In Claude Code
|
||||
> /speckit.jira.specstoissues
|
||||
|
||||
# Or use a namespaced alias (if provided)
|
||||
> /speckit.jira.sync
|
||||
# Or use short alias (if provided)
|
||||
> /speckit.specstoissues
|
||||
```
|
||||
|
||||
### Extension Configuration
|
||||
@@ -737,7 +737,7 @@ You can still install extensions not in your catalog using `--from`:
|
||||
specify extension add jira
|
||||
|
||||
# Direct URL (bypasses catalog)
|
||||
specify extension add <extension-name> --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
|
||||
specify extension add --from https://github.com/someone/spec-kit-ext/archive/v1.0.0.zip
|
||||
|
||||
# Local development
|
||||
specify extension add --dev /path/to/extension
|
||||
@@ -807,7 +807,7 @@ specify extension add --dev /path/to/extension
|
||||
2. Install older version of extension:
|
||||
|
||||
```bash
|
||||
specify extension add <extension-name> --from https://github.com/org/ext/archive/v1.0.0.zip
|
||||
specify extension add --from https://github.com/org/ext/archive/v1.0.0.zip
|
||||
```
|
||||
|
||||
### MCP Tool Not Available
|
||||
|
||||
@@ -59,7 +59,7 @@ Populate your `catalog.json` with approved extensions:
|
||||
Skip catalog curation - team members install directly using URLs:
|
||||
|
||||
```bash
|
||||
specify extension add <extension-name> --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
specify extension add --from https://github.com/org/spec-kit-ext/archive/refs/tags/v1.0.0.zip
|
||||
```
|
||||
|
||||
**Benefits**: Quick for one-off testing or private extensions
|
||||
@@ -108,7 +108,7 @@ specify extension search # See what's in your catalog
|
||||
specify extension add <extension-name> # Install by name
|
||||
|
||||
# Direct from URL (bypasses catalog)
|
||||
specify extension add <extension-name> --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
|
||||
specify extension add --from https://github.com/<org>/<repo>/archive/refs/tags/<version>.zip
|
||||
|
||||
# List installed extensions
|
||||
specify extension list
|
||||
|
||||
@@ -223,7 +223,7 @@ provides:
|
||||
- name: "speckit.jira.specstoissues"
|
||||
file: "commands/specstoissues.md"
|
||||
description: "Create Jira hierarchy from spec and tasks"
|
||||
aliases: ["speckit.jira.sync"] # Alternate names
|
||||
aliases: ["speckit.specstoissues"] # Alternate names
|
||||
|
||||
- name: "speckit.jira.discover-fields"
|
||||
file: "commands/discover-fields.md"
|
||||
@@ -1517,7 +1517,7 @@ specify extension add github-projects
|
||||
/speckit.github.taskstoissues
|
||||
```
|
||||
|
||||
**Migration alias** (if needed):
|
||||
**Compatibility shim** (if needed):
|
||||
|
||||
```yaml
|
||||
# extension.yml
|
||||
@@ -1525,10 +1525,10 @@ provides:
|
||||
commands:
|
||||
- name: "speckit.github.taskstoissues"
|
||||
file: "commands/taskstoissues.md"
|
||||
aliases: ["speckit.github.sync-taskstoissues"] # Alternate namespaced entry point
|
||||
aliases: ["speckit.taskstoissues"] # Backward compatibility
|
||||
```
|
||||
|
||||
AI agents register both names, so callers can migrate to the alternate alias without relying on deprecated global shortcuts like `/speckit.taskstoissues`.
|
||||
AI agent registers both names, so old scripts work.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"updated_at": "2026-03-30T00:00:00Z",
|
||||
"updated_at": "2026-03-19T12:08:20Z",
|
||||
"catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json",
|
||||
"extensions": {
|
||||
"aide": {
|
||||
@@ -242,7 +242,7 @@
|
||||
"updated_at": "2026-03-19T12:08:20Z"
|
||||
},
|
||||
"docguard": {
|
||||
"name": "DocGuard — CDD Enforcement",
|
||||
"name": "DocGuard \u2014 CDD Enforcement",
|
||||
"id": "docguard",
|
||||
"description": "Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies.",
|
||||
"author": "raccioly",
|
||||
@@ -437,327 +437,6 @@
|
||||
"created_at": "2026-03-05T00:00:00Z",
|
||||
"updated_at": "2026-03-05T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"maqa": {
|
||||
"name": "MAQA — Multi-Agent & Quality Assurance",
|
||||
"id": "maqa",
|
||||
"description": "Coordinator → feature → QA agent workflow with parallel worktree-based implementation. Language-agnostic. Auto-detects installed board plugins (Trello, Linear, GitHub Projects, Jira, Azure DevOps). Optional CI gate.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.3",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-ext/releases/download/maqa-v0.1.3/maqa.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-ext",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-ext",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-ext/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-ext/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 4,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"multi-agent",
|
||||
"orchestration",
|
||||
"quality-assurance",
|
||||
"workflow",
|
||||
"parallel",
|
||||
"tdd"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-azure-devops": {
|
||||
"name": "MAQA Azure DevOps Integration",
|
||||
"id": "maqa-azure-devops",
|
||||
"description": "Azure DevOps Boards integration for the MAQA extension. Populates work items from specs, moves User Stories across columns as features progress, real-time Task child ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/releases/download/maqa-azure-devops-v0.1.0/maqa-azure-devops.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-azure-devops/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"azure-devops",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-ci": {
|
||||
"name": "MAQA CI/CD Gate",
|
||||
"id": "maqa-ci",
|
||||
"description": "CI/CD pipeline gate for the MAQA extension. Auto-detects GitHub Actions, CircleCI, GitLab CI, and Bitbucket Pipelines. Blocks QA handoff until pipeline is green.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-ci/releases/download/maqa-ci-v0.1.0/maqa-ci.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-ci",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-ci",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-ci/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-ci/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"ci-cd",
|
||||
"github-actions",
|
||||
"circleci",
|
||||
"gitlab-ci",
|
||||
"quality-gate",
|
||||
"maqa"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-github-projects": {
|
||||
"name": "MAQA GitHub Projects Integration",
|
||||
"id": "maqa-github-projects",
|
||||
"description": "GitHub Projects v2 integration for the MAQA extension. Populates draft issues from specs, moves items across Status columns as features progress, real-time task list ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/releases/download/maqa-github-projects-v0.1.0/maqa-github-projects.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-github-projects",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-github-projects",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-github-projects/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"github-projects",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-jira": {
|
||||
"name": "MAQA Jira Integration",
|
||||
"id": "maqa-jira",
|
||||
"description": "Jira integration for the MAQA extension. Populates Stories from specs, moves issues across board columns as features progress, real-time Subtask ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-jira/releases/download/maqa-jira-v0.1.0/maqa-jira.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-jira",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-jira",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-jira/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-jira/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"jira",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-linear": {
|
||||
"name": "MAQA Linear Integration",
|
||||
"id": "maqa-linear",
|
||||
"description": "Linear integration for the MAQA extension. Populates issues from specs, moves items across workflow states as features progress, real-time sub-issue ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.0",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-linear/releases/download/maqa-linear-v0.1.0/maqa-linear.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-linear",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-linear",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-linear/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-linear/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"linear",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T00:00:00Z",
|
||||
"updated_at": "2026-03-27T00:00:00Z"
|
||||
},
|
||||
"maqa-trello": {
|
||||
"name": "MAQA Trello Integration",
|
||||
"id": "maqa-trello",
|
||||
"description": "Trello board integration for the MAQA extension. Populates board from specs, moves cards between lists as features progress, real-time checklist ticking.",
|
||||
"author": "GenieRobot",
|
||||
"version": "0.1.1",
|
||||
"download_url": "https://github.com/GenieRobot/spec-kit-maqa-trello/releases/download/maqa-trello-v0.1.1/maqa-trello.zip",
|
||||
"repository": "https://github.com/GenieRobot/spec-kit-maqa-trello",
|
||||
"homepage": "https://github.com/GenieRobot/spec-kit-maqa-trello",
|
||||
"documentation": "https://github.com/GenieRobot/spec-kit-maqa-trello/blob/main/README.md",
|
||||
"changelog": "https://github.com/GenieRobot/spec-kit-maqa-trello/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.3.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"trello",
|
||||
"project-management",
|
||||
"multi-agent",
|
||||
"maqa",
|
||||
"kanban"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-26T00:00:00Z"
|
||||
},
|
||||
"onboard": {
|
||||
"name": "Onboard",
|
||||
"id": "onboard",
|
||||
"description": "Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step.",
|
||||
"author": "Rafael Sales",
|
||||
"version": "2.1.0",
|
||||
"download_url": "https://github.com/dmux/spec-kit-onboard/archive/refs/tags/v2.1.0.zip",
|
||||
"repository": "https://github.com/dmux/spec-kit-onboard",
|
||||
"homepage": "https://github.com/dmux/spec-kit-onboard",
|
||||
"documentation": "https://github.com/dmux/spec-kit-onboard/blob/main/README.md",
|
||||
"changelog": "https://github.com/dmux/spec-kit-onboard/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 7,
|
||||
"hooks": 3
|
||||
},
|
||||
"tags": [
|
||||
"onboarding",
|
||||
"learning",
|
||||
"mentoring",
|
||||
"developer-experience",
|
||||
"gamification",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-26T00:00:00Z",
|
||||
"updated_at": "2026-03-26T00:00:00Z"
|
||||
},
|
||||
"plan-review-gate": {
|
||||
"name": "Plan Review Gate",
|
||||
"id": "plan-review-gate",
|
||||
"description": "Require spec.md and plan.md to be merged via MR/PR before allowing task generation",
|
||||
"author": "luno",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/luno/spec-kit-plan-review-gate/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/luno/spec-kit-plan-review-gate",
|
||||
"homepage": "https://github.com/luno/spec-kit-plan-review-gate",
|
||||
"documentation": "https://github.com/luno/spec-kit-plan-review-gate/blob/main/README.md",
|
||||
"changelog": "https://github.com/luno/spec-kit-plan-review-gate/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"review",
|
||||
"quality",
|
||||
"workflow",
|
||||
"gate"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-27T08:22:30Z",
|
||||
"updated_at": "2026-03-27T08:22:30Z"
|
||||
},
|
||||
"presetify": {
|
||||
"name": "Presetify",
|
||||
"id": "presetify",
|
||||
@@ -789,38 +468,6 @@
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"product-forge": {
|
||||
"name": "Product Forge",
|
||||
"id": "product-forge",
|
||||
"description": "Full product lifecycle: research \u2192 product spec \u2192 SpecKit \u2192 implement \u2192 verify \u2192 test",
|
||||
"author": "VaiYav",
|
||||
"version": "1.1.1",
|
||||
"download_url": "https://github.com/VaiYav/speckit-product-forge/archive/refs/tags/v1.1.1.zip",
|
||||
"repository": "https://github.com/VaiYav/speckit-product-forge",
|
||||
"homepage": "https://github.com/VaiYav/speckit-product-forge",
|
||||
"documentation": "https://github.com/VaiYav/speckit-product-forge/blob/main/README.md",
|
||||
"changelog": "https://github.com/VaiYav/speckit-product-forge/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 10,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"process",
|
||||
"research",
|
||||
"product-spec",
|
||||
"lifecycle",
|
||||
"testing"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-28T00:00:00Z",
|
||||
"updated_at": "2026-03-28T00:00:00Z"
|
||||
},
|
||||
"ralph": {
|
||||
"name": "Ralph Loop",
|
||||
"id": "ralph",
|
||||
@@ -991,81 +638,6 @@
|
||||
"created_at": "2026-03-18T00:00:00Z",
|
||||
"updated_at": "2026-03-18T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"superb": {
|
||||
"name": "Superpowers Bridge",
|
||||
"id": "superb",
|
||||
"description": "Orchestrates obra/superpowers skills within the spec-kit SDD workflow. Thin bridge commands delegate to superpowers' authoritative SKILL.md files at runtime (with graceful fallback), while bridge-original commands provide spec-kit-native value. Eight commands cover the full lifecycle: intent clarification, TDD enforcement, task review, verification, critique, systematic debugging, branch completion, and review response. Hook-bound commands fire automatically; standalone commands are invoked when needed.",
|
||||
"author": "rbbtsn0w",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/RbBtSn0w/spec-kit-extensions/releases/download/superpowers-bridge-v1.0.0/superpowers-bridge.zip",
|
||||
"repository": "https://github.com/RbBtSn0w/spec-kit-extensions",
|
||||
"homepage": "https://github.com/RbBtSn0w/spec-kit-extensions",
|
||||
"documentation": "https://github.com/RbBtSn0w/spec-kit-extensions/blob/main/superpowers-bridge/README.md",
|
||||
"changelog": "https://github.com/RbBtSn0w/spec-kit-extensions/blob/main/superpowers-bridge/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.4.3",
|
||||
"tools": [
|
||||
{
|
||||
"name": "superpowers",
|
||||
"version": ">=5.0.0",
|
||||
"required": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"provides": {
|
||||
"commands": 8,
|
||||
"hooks": 4
|
||||
},
|
||||
"tags": [
|
||||
"methodology",
|
||||
"tdd",
|
||||
"code-review",
|
||||
"workflow",
|
||||
"superpowers",
|
||||
"brainstorming",
|
||||
"verification",
|
||||
"debugging",
|
||||
"branch-management"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-30T00:00:00Z",
|
||||
"updated_at": "2026-03-30T00:00:00Z"
|
||||
},
|
||||
"sync": {
|
||||
"name": "Spec Sync",
|
||||
"id": "sync",
|
||||
@@ -1101,7 +673,7 @@
|
||||
"understanding": {
|
||||
"name": "Understanding",
|
||||
"id": "understanding",
|
||||
"description": "Automated requirements quality analysis — validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"description": "Automated requirements quality analysis \u2014 validates specs against IEEE/ISO standards using 31 deterministic metrics. Catches ambiguity, missing testability, and structural issues before they reach implementation. Includes experimental energy-based ambiguity detection using local LM token perplexity.",
|
||||
"author": "Ladislav Bihari",
|
||||
"version": "3.4.0",
|
||||
"download_url": "https://github.com/Testimonial/understanding/archive/refs/tags/v3.4.0.zip",
|
||||
@@ -1139,6 +711,38 @@
|
||||
"created_at": "2026-03-07T00:00:00Z",
|
||||
"updated_at": "2026-03-07T00:00:00Z"
|
||||
},
|
||||
"status": {
|
||||
"name": "Project Status",
|
||||
"id": "status",
|
||||
"description": "Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary.",
|
||||
"author": "KhawarHabibKhan",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/KhawarHabibKhan/spec-kit-status/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"homepage": "https://github.com/KhawarHabibKhan/spec-kit-status",
|
||||
"documentation": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/README.md",
|
||||
"changelog": "https://github.com/KhawarHabibKhan/spec-kit-status/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 1,
|
||||
"hooks": 0
|
||||
},
|
||||
"tags": [
|
||||
"status",
|
||||
"workflow",
|
||||
"progress",
|
||||
"feature-tracking",
|
||||
"task-progress"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-16T00:00:00Z",
|
||||
"updated_at": "2026-03-16T00:00:00Z"
|
||||
},
|
||||
"v-model": {
|
||||
"name": "V-Model Extension Pack",
|
||||
"id": "v-model",
|
||||
@@ -1171,6 +775,37 @@
|
||||
"created_at": "2026-02-20T00:00:00Z",
|
||||
"updated_at": "2026-02-22T00:00:00Z"
|
||||
},
|
||||
"learn": {
|
||||
"name": "Learning Extension",
|
||||
"id": "learn",
|
||||
"description": "Generate educational guides from implementations and enhance clarifications with mentoring context.",
|
||||
"author": "Vianca Martinez",
|
||||
"version": "1.0.0",
|
||||
"download_url": "https://github.com/imviancagrace/spec-kit-learn/archive/refs/tags/v1.0.0.zip",
|
||||
"repository": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"homepage": "https://github.com/imviancagrace/spec-kit-learn",
|
||||
"documentation": "https://github.com/imviancagrace/spec-kit-learn/blob/main/README.md",
|
||||
"changelog": "https://github.com/imviancagrace/spec-kit-learn/blob/main/CHANGELOG.md",
|
||||
"license": "MIT",
|
||||
"requires": {
|
||||
"speckit_version": ">=0.1.0"
|
||||
},
|
||||
"provides": {
|
||||
"commands": 2,
|
||||
"hooks": 1
|
||||
},
|
||||
"tags": [
|
||||
"learning",
|
||||
"education",
|
||||
"mentoring",
|
||||
"knowledge-transfer"
|
||||
],
|
||||
"verified": false,
|
||||
"downloads": 0,
|
||||
"stars": 0,
|
||||
"created_at": "2026-03-17T00:00:00Z",
|
||||
"updated_at": "2026-03-17T00:00:00Z"
|
||||
},
|
||||
"verify": {
|
||||
"name": "Verify Extension",
|
||||
"id": "verify",
|
||||
|
||||
@@ -47,8 +47,8 @@ provides:
|
||||
- name: "speckit.my-extension.example"
|
||||
file: "commands/example.md"
|
||||
description: "Example command that demonstrates functionality"
|
||||
# Optional: Add aliases in the same namespaced format
|
||||
aliases: ["speckit.my-extension.example-short"]
|
||||
# Optional: Add aliases for shorter command names
|
||||
aliases: ["speckit.example"]
|
||||
|
||||
# ADD MORE COMMANDS: Copy this block for each command
|
||||
# - name: "speckit.my-extension.another-command"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.4.4.dev0"
|
||||
version = "0.4.3"
|
||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
set -e
|
||||
|
||||
JSON_MODE=false
|
||||
ALLOW_EXISTING=false
|
||||
SHORT_NAME=""
|
||||
BRANCH_NUMBER=""
|
||||
USE_TIMESTAMP=false
|
||||
@@ -15,9 +14,6 @@ while [ $i -le $# ]; do
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
;;
|
||||
--allow-existing-branch)
|
||||
ALLOW_EXISTING=true
|
||||
;;
|
||||
--short-name)
|
||||
if [ $((i + 1)) -gt $# ]; then
|
||||
echo 'Error: --short-name requires a value' >&2
|
||||
@@ -49,11 +45,10 @@ while [ $i -le $# ]; do
|
||||
USE_TIMESTAMP=true
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --json Output in JSON format"
|
||||
echo " --allow-existing-branch Switch to branch if it already exists instead of failing"
|
||||
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||
echo " --number N Specify branch number manually (overrides auto-detection)"
|
||||
echo " --timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
@@ -74,7 +69,7 @@ done
|
||||
|
||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||
echo "Usage: $0 [--json] [--allow-existing-branch] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--timestamp] <feature_description>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -292,19 +287,12 @@ if [ "$HAS_GIT" = true ]; then
|
||||
if ! git checkout -b "$BRANCH_NAME" 2>/dev/null; then
|
||||
# Check if branch already exists
|
||||
if git branch --list "$BRANCH_NAME" | grep -q .; then
|
||||
if [ "$ALLOW_EXISTING" = true ]; then
|
||||
# Switch to the existing branch instead of failing
|
||||
if ! git checkout "$BRANCH_NAME" 2>/dev/null; then
|
||||
>&2 echo "Error: Failed to switch to existing branch '$BRANCH_NAME'. Please resolve any local changes or conflicts and try again."
|
||||
exit 1
|
||||
fi
|
||||
elif [ "$USE_TIMESTAMP" = true ]; then
|
||||
if [ "$USE_TIMESTAMP" = true ]; then
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Rerun to get a new timestamp or use a different --short-name."
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Branch '$BRANCH_NAME' already exists. Please use a different feature name or specify a different number with --number."
|
||||
exit 1
|
||||
fi
|
||||
exit 1
|
||||
else
|
||||
>&2 echo "Error: Failed to create git branch '$BRANCH_NAME'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
@@ -317,15 +305,13 @@ fi
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
SPEC_FILE="$FEATURE_DIR/spec.md"
|
||||
if [ ! -f "$SPEC_FILE" ]; then
|
||||
TEMPLATE=$(resolve_template "spec-template" "$REPO_ROOT") || true
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
||||
touch "$SPEC_FILE"
|
||||
fi
|
||||
if [ -n "$TEMPLATE" ] && [ -f "$TEMPLATE" ]; then
|
||||
cp "$TEMPLATE" "$SPEC_FILE"
|
||||
else
|
||||
echo "Warning: Spec template not found; created empty spec file" >&2
|
||||
touch "$SPEC_FILE"
|
||||
fi
|
||||
|
||||
# Inform the user how to persist the feature variable in their own shell
|
||||
|
||||
@@ -63,7 +63,7 @@ AGENT_TYPE="${1:-}"
|
||||
# Agent-specific file paths
|
||||
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
|
||||
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
|
||||
COPILOT_FILE="$REPO_ROOT/.github/agents/copilot-instructions.md"
|
||||
CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
|
||||
QWEN_FILE="$REPO_ROOT/QWEN.md"
|
||||
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[switch]$Json,
|
||||
[switch]$AllowExistingBranch,
|
||||
[string]$ShortName,
|
||||
[Parameter()]
|
||||
[long]$Number = 0,
|
||||
@@ -16,11 +15,10 @@ $ErrorActionPreference = 'Stop'
|
||||
|
||||
# Show help if requested
|
||||
if ($Help) {
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Host ""
|
||||
Write-Host "Options:"
|
||||
Write-Host " -Json Output in JSON format"
|
||||
Write-Host " -AllowExistingBranch Switch to branch if it already exists instead of failing"
|
||||
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
||||
Write-Host " -Timestamp Use timestamp prefix (YYYYMMDD-HHMMSS) instead of sequential numbering"
|
||||
@@ -35,7 +33,7 @@ if ($Help) {
|
||||
|
||||
# Check if feature description provided
|
||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-AllowExistingBranch] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] [-Timestamp] <feature description>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@@ -253,20 +251,12 @@ if ($hasGit) {
|
||||
# Check if branch already exists
|
||||
$existingBranch = git branch --list $branchName 2>$null
|
||||
if ($existingBranch) {
|
||||
if ($AllowExistingBranch) {
|
||||
# Switch to the existing branch instead of failing
|
||||
git checkout -q $branchName 2>$null | Out-Null
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Error "Error: Branch '$branchName' exists but could not be checked out. Resolve any uncommitted changes or conflicts and try again."
|
||||
exit 1
|
||||
}
|
||||
} elseif ($Timestamp) {
|
||||
if ($Timestamp) {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Rerun to get a new timestamp or use a different -ShortName."
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Branch '$branchName' already exists. Please use a different feature name or specify a different number with -Number."
|
||||
exit 1
|
||||
}
|
||||
exit 1
|
||||
} else {
|
||||
Write-Error "Error: Failed to create git branch '$branchName'. Please check your git configuration and try again."
|
||||
exit 1
|
||||
@@ -279,14 +269,12 @@ if ($hasGit) {
|
||||
$featureDir = Join-Path $specsDir $branchName
|
||||
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||
|
||||
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||
$specFile = Join-Path $featureDir 'spec.md'
|
||||
if (-not (Test-Path -PathType Leaf $specFile)) {
|
||||
$template = Resolve-Template -TemplateName 'spec-template' -RepoRoot $repoRoot
|
||||
if ($template -and (Test-Path $template)) {
|
||||
Copy-Item $template $specFile -Force
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile | Out-Null
|
||||
}
|
||||
if ($template -and (Test-Path $template)) {
|
||||
Copy-Item $template $specFile -Force
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile | Out-Null
|
||||
}
|
||||
|
||||
# Set the SPECIFY_FEATURE environment variable for the current session
|
||||
|
||||
@@ -46,7 +46,7 @@ $NEW_PLAN = $IMPL_PLAN
|
||||
# Agent file paths
|
||||
$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md'
|
||||
$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md'
|
||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/copilot-instructions.md'
|
||||
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md'
|
||||
$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
||||
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
||||
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||
|
||||
@@ -345,7 +345,6 @@ AI_ASSISTANT_HELP = _build_ai_assistant_help()
|
||||
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
|
||||
|
||||
CLAUDE_LOCAL_PATH = Path.home() / ".claude" / "local" / "claude"
|
||||
CLAUDE_NPM_LOCAL_PATH = Path.home() / ".claude" / "local" / "node_modules" / ".bin" / "claude"
|
||||
|
||||
BANNER = """
|
||||
███████╗██████╗ ███████╗ ██████╗██╗███████╗██╗ ██╗
|
||||
@@ -606,15 +605,13 @@ def check_tool(tool: str, tracker: StepTracker = None) -> bool:
|
||||
Returns:
|
||||
True if tool is found, False otherwise
|
||||
"""
|
||||
# Special handling for Claude CLI local installs
|
||||
# Special handling for Claude CLI after `claude migrate-installer`
|
||||
# See: https://github.com/github/spec-kit/issues/123
|
||||
# See: https://github.com/github/spec-kit/issues/550
|
||||
# Claude Code can be installed in two local paths:
|
||||
# 1. ~/.claude/local/claude (after `claude migrate-installer`)
|
||||
# 2. ~/.claude/local/node_modules/.bin/claude (npm-local install, e.g. via nvm)
|
||||
# Neither path may be on the system PATH, so we check them explicitly.
|
||||
# The migrate-installer command REMOVES the original executable from PATH
|
||||
# and creates an alias at ~/.claude/local/claude instead
|
||||
# This path should be prioritized over other claude executables in PATH
|
||||
if tool == "claude":
|
||||
if CLAUDE_LOCAL_PATH.is_file() or CLAUDE_NPM_LOCAL_PATH.is_file():
|
||||
if CLAUDE_LOCAL_PATH.exists() and CLAUDE_LOCAL_PATH.is_file():
|
||||
if tracker:
|
||||
tracker.complete(tool, "available")
|
||||
return True
|
||||
@@ -1197,84 +1194,6 @@ def _locate_release_script() -> tuple[Path, str]:
|
||||
raise FileNotFoundError(f"Release script '{name}' not found in core_pack or source checkout")
|
||||
|
||||
|
||||
def _install_shared_infra(
|
||||
project_path: Path,
|
||||
script_type: str,
|
||||
tracker: StepTracker | None = None,
|
||||
) -> bool:
|
||||
"""Install shared infrastructure files into *project_path*.
|
||||
|
||||
Copies ``.specify/scripts/`` and ``.specify/templates/`` from the
|
||||
bundled core_pack or source checkout. Tracks all installed files
|
||||
in ``speckit.manifest.json``.
|
||||
Returns ``True`` on success.
|
||||
"""
|
||||
from .integrations.manifest import IntegrationManifest
|
||||
|
||||
core = _locate_core_pack()
|
||||
manifest = IntegrationManifest("speckit", project_path, version=get_speckit_version())
|
||||
|
||||
# Scripts
|
||||
if core and (core / "scripts").is_dir():
|
||||
scripts_src = core / "scripts"
|
||||
else:
|
||||
repo_root = Path(__file__).parent.parent.parent
|
||||
scripts_src = repo_root / "scripts"
|
||||
|
||||
skipped_files: list[str] = []
|
||||
|
||||
if scripts_src.is_dir():
|
||||
dest_scripts = project_path / ".specify" / "scripts"
|
||||
dest_scripts.mkdir(parents=True, exist_ok=True)
|
||||
variant_dir = "bash" if script_type == "sh" else "powershell"
|
||||
variant_src = scripts_src / variant_dir
|
||||
if variant_src.is_dir():
|
||||
dest_variant = dest_scripts / variant_dir
|
||||
dest_variant.mkdir(parents=True, exist_ok=True)
|
||||
# Merge without overwriting — only add files that don't exist yet
|
||||
for src_path in variant_src.rglob("*"):
|
||||
if src_path.is_file():
|
||||
rel_path = src_path.relative_to(variant_src)
|
||||
dst_path = dest_variant / rel_path
|
||||
if dst_path.exists():
|
||||
skipped_files.append(str(dst_path.relative_to(project_path)))
|
||||
else:
|
||||
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(src_path, dst_path)
|
||||
rel = dst_path.relative_to(project_path).as_posix()
|
||||
manifest.record_existing(rel)
|
||||
|
||||
# Page templates (not command templates, not vscode-settings.json)
|
||||
if core and (core / "templates").is_dir():
|
||||
templates_src = core / "templates"
|
||||
else:
|
||||
repo_root = Path(__file__).parent.parent.parent
|
||||
templates_src = repo_root / "templates"
|
||||
|
||||
if templates_src.is_dir():
|
||||
dest_templates = project_path / ".specify" / "templates"
|
||||
dest_templates.mkdir(parents=True, exist_ok=True)
|
||||
for f in templates_src.iterdir():
|
||||
if f.is_file() and f.name != "vscode-settings.json" and not f.name.startswith("."):
|
||||
dst = dest_templates / f.name
|
||||
if dst.exists():
|
||||
skipped_files.append(str(dst.relative_to(project_path)))
|
||||
else:
|
||||
shutil.copy2(f, dst)
|
||||
rel = dst.relative_to(project_path).as_posix()
|
||||
manifest.record_existing(rel)
|
||||
|
||||
if skipped_files:
|
||||
import logging
|
||||
logging.getLogger(__name__).warning(
|
||||
"The following shared files already exist and were not overwritten:\n%s",
|
||||
"\n".join(f" {f}" for f in skipped_files),
|
||||
)
|
||||
|
||||
manifest.save()
|
||||
return True
|
||||
|
||||
|
||||
def scaffold_from_core_pack(
|
||||
project_path: Path,
|
||||
ai_assistant: str,
|
||||
@@ -1906,7 +1825,6 @@ def init(
|
||||
offline: bool = typer.Option(False, "--offline", help="Use assets bundled in the specify-cli package instead of downloading from GitHub (no network access required). Bundled assets will become the default in v0.6.0 and this flag will be removed."),
|
||||
preset: str = typer.Option(None, "--preset", help="Install a preset during initialization (by preset ID)"),
|
||||
branch_numbering: str = typer.Option(None, "--branch-numbering", help="Branch numbering strategy: 'sequential' (001, 002, ...) or 'timestamp' (YYYYMMDD-HHMMSS)"),
|
||||
integration: str = typer.Option(None, "--integration", help="Use the new integration system (e.g. --integration copilot). Mutually exclusive with --ai."),
|
||||
):
|
||||
"""
|
||||
Initialize a new Specify project.
|
||||
@@ -1968,35 +1886,6 @@ def init(
|
||||
if ai_assistant:
|
||||
ai_assistant = AI_ASSISTANT_ALIASES.get(ai_assistant, ai_assistant)
|
||||
|
||||
# --integration and --ai are mutually exclusive
|
||||
if integration and ai_assistant:
|
||||
console.print("[red]Error:[/red] --integration and --ai are mutually exclusive")
|
||||
console.print("[yellow]Use:[/yellow] --integration for the new integration system, or --ai for the legacy path")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Auto-promote: --ai copilot → integration path with a nudge
|
||||
use_integration = False
|
||||
if integration:
|
||||
from .integrations import INTEGRATION_REGISTRY, get_integration
|
||||
resolved_integration = get_integration(integration)
|
||||
if not resolved_integration:
|
||||
console.print(f"[red]Error:[/red] Unknown integration: '{integration}'")
|
||||
available = ", ".join(sorted(INTEGRATION_REGISTRY))
|
||||
console.print(f"[yellow]Available integrations:[/yellow] {available}")
|
||||
raise typer.Exit(1)
|
||||
use_integration = True
|
||||
# Map integration key to the ai_assistant variable for downstream compatibility
|
||||
ai_assistant = integration
|
||||
elif ai_assistant == "copilot":
|
||||
from .integrations import get_integration
|
||||
resolved_integration = get_integration("copilot")
|
||||
if resolved_integration:
|
||||
use_integration = True
|
||||
console.print(
|
||||
"[dim]Tip: Use [bold]--integration copilot[/bold] instead of "
|
||||
"--ai copilot. The --ai flag will be deprecated in a future release.[/dim]"
|
||||
)
|
||||
|
||||
if project_name == ".":
|
||||
here = True
|
||||
project_name = None # Clear project_name to use existing validation logic
|
||||
@@ -2165,10 +2054,7 @@ def init(
|
||||
"This will become the default in v0.6.0."
|
||||
)
|
||||
|
||||
if use_integration:
|
||||
tracker.add("integration", "Install integration")
|
||||
tracker.add("shared-infra", "Install shared infrastructure")
|
||||
elif use_github:
|
||||
if use_github:
|
||||
for key, label in [
|
||||
("fetch", "Fetch latest release"),
|
||||
("download", "Download template"),
|
||||
@@ -2203,39 +2089,7 @@ def init(
|
||||
verify = not skip_tls
|
||||
local_ssl_context = ssl_context if verify else False
|
||||
|
||||
if use_integration:
|
||||
# Integration-based scaffolding (new path)
|
||||
from .integrations.manifest import IntegrationManifest
|
||||
tracker.start("integration")
|
||||
manifest = IntegrationManifest(
|
||||
resolved_integration.key, project_path, version=get_speckit_version()
|
||||
)
|
||||
resolved_integration.setup(
|
||||
project_path, manifest,
|
||||
script_type=selected_script,
|
||||
)
|
||||
manifest.save()
|
||||
|
||||
# Write .specify/integration.json
|
||||
script_ext = "sh" if selected_script == "sh" else "ps1"
|
||||
integration_json = project_path / ".specify" / "integration.json"
|
||||
integration_json.parent.mkdir(parents=True, exist_ok=True)
|
||||
integration_json.write_text(json.dumps({
|
||||
"integration": resolved_integration.key,
|
||||
"version": get_speckit_version(),
|
||||
"scripts": {
|
||||
"update-context": f".specify/integrations/{resolved_integration.key}/scripts/update-context.{script_ext}",
|
||||
},
|
||||
}, indent=2) + "\n", encoding="utf-8")
|
||||
|
||||
tracker.complete("integration", resolved_integration.config.get("name", resolved_integration.key))
|
||||
|
||||
# Install shared infrastructure (scripts, templates)
|
||||
tracker.start("shared-infra")
|
||||
_install_shared_infra(project_path, selected_script, tracker=tracker)
|
||||
tracker.complete("shared-infra", f"scripts ({selected_script}) + templates")
|
||||
|
||||
elif use_github:
|
||||
if use_github:
|
||||
with httpx.Client(verify=local_ssl_context) as local_client:
|
||||
download_and_extract_template(
|
||||
project_path,
|
||||
@@ -2370,7 +2224,7 @@ def init(
|
||||
# Persist the CLI options so later operations (e.g. preset add)
|
||||
# can adapt their behaviour without re-scanning the filesystem.
|
||||
# Must be saved BEFORE preset install so _get_skills_dir() works.
|
||||
init_opts = {
|
||||
save_init_options(project_path, {
|
||||
"ai": selected_ai,
|
||||
"ai_skills": ai_skills,
|
||||
"ai_commands_dir": ai_commands_dir,
|
||||
@@ -2380,10 +2234,7 @@ def init(
|
||||
"offline": offline,
|
||||
"script": selected_script,
|
||||
"speckit_version": get_speckit_version(),
|
||||
}
|
||||
if use_integration:
|
||||
init_opts["integration"] = resolved_integration.key
|
||||
save_init_options(project_path, init_opts)
|
||||
})
|
||||
|
||||
# Install preset if specified
|
||||
if preset:
|
||||
|
||||
@@ -25,49 +25,6 @@ import yaml
|
||||
from packaging import version as pkg_version
|
||||
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
||||
|
||||
_FALLBACK_CORE_COMMAND_NAMES = frozenset({
|
||||
"analyze",
|
||||
"checklist",
|
||||
"clarify",
|
||||
"constitution",
|
||||
"implement",
|
||||
"plan",
|
||||
"specify",
|
||||
"tasks",
|
||||
"taskstoissues",
|
||||
})
|
||||
EXTENSION_COMMAND_NAME_PATTERN = re.compile(r"^speckit\.([a-z0-9-]+)\.([a-z0-9-]+)$")
|
||||
|
||||
|
||||
def _load_core_command_names() -> frozenset[str]:
|
||||
"""Discover bundled core command names from the packaged templates.
|
||||
|
||||
Prefer the wheel-time ``core_pack`` bundle when present, and fall back to
|
||||
the source checkout when running from the repository. If neither is
|
||||
available, use the baked-in fallback set so validation still works.
|
||||
"""
|
||||
candidate_dirs = [
|
||||
Path(__file__).parent / "core_pack" / "commands",
|
||||
Path(__file__).resolve().parent.parent.parent / "templates" / "commands",
|
||||
]
|
||||
|
||||
for commands_dir in candidate_dirs:
|
||||
if not commands_dir.is_dir():
|
||||
continue
|
||||
|
||||
command_names = {
|
||||
command_file.stem
|
||||
for command_file in commands_dir.iterdir()
|
||||
if command_file.is_file() and command_file.suffix == ".md"
|
||||
}
|
||||
if command_names:
|
||||
return frozenset(command_names)
|
||||
|
||||
return _FALLBACK_CORE_COMMAND_NAMES
|
||||
|
||||
|
||||
CORE_COMMAND_NAMES = _load_core_command_names()
|
||||
|
||||
|
||||
class ExtensionError(Exception):
|
||||
"""Base exception for extension-related errors."""
|
||||
@@ -192,7 +149,7 @@ class ExtensionManifest:
|
||||
raise ValidationError("Command missing 'name' or 'file'")
|
||||
|
||||
# Validate command name format
|
||||
if EXTENSION_COMMAND_NAME_PATTERN.match(cmd["name"]) is None:
|
||||
if not re.match(r'^speckit\.[a-z0-9-]+\.[a-z0-9-]+$', cmd["name"]):
|
||||
raise ValidationError(
|
||||
f"Invalid command name '{cmd['name']}': "
|
||||
"must follow pattern 'speckit.{extension}.{command}'"
|
||||
@@ -489,126 +446,6 @@ class ExtensionManager:
|
||||
self.extensions_dir = project_root / ".specify" / "extensions"
|
||||
self.registry = ExtensionRegistry(self.extensions_dir)
|
||||
|
||||
@staticmethod
|
||||
def _collect_manifest_command_names(manifest: ExtensionManifest) -> Dict[str, str]:
|
||||
"""Collect command and alias names declared by a manifest.
|
||||
|
||||
Performs install-time validation for extension-specific constraints:
|
||||
- commands and aliases must use the canonical `speckit.{extension}.{command}` shape
|
||||
- commands and aliases must use this extension's namespace
|
||||
- command namespaces must not shadow core commands
|
||||
- duplicate command/alias names inside one manifest are rejected
|
||||
|
||||
Args:
|
||||
manifest: Parsed extension manifest
|
||||
|
||||
Returns:
|
||||
Mapping of declared command/alias name -> kind ("command"/"alias")
|
||||
|
||||
Raises:
|
||||
ValidationError: If any declared name is invalid
|
||||
"""
|
||||
if manifest.id in CORE_COMMAND_NAMES:
|
||||
raise ValidationError(
|
||||
f"Extension ID '{manifest.id}' conflicts with core command namespace '{manifest.id}'"
|
||||
)
|
||||
|
||||
declared_names: Dict[str, str] = {}
|
||||
|
||||
for cmd in manifest.commands:
|
||||
primary_name = cmd["name"]
|
||||
aliases = cmd.get("aliases", [])
|
||||
|
||||
if aliases is None:
|
||||
aliases = []
|
||||
if not isinstance(aliases, list):
|
||||
raise ValidationError(
|
||||
f"Aliases for command '{primary_name}' must be a list"
|
||||
)
|
||||
|
||||
for kind, name in [("command", primary_name)] + [
|
||||
("alias", alias) for alias in aliases
|
||||
]:
|
||||
if not isinstance(name, str):
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} for command '{primary_name}' must be a string"
|
||||
)
|
||||
|
||||
match = EXTENSION_COMMAND_NAME_PATTERN.match(name)
|
||||
if match is None:
|
||||
raise ValidationError(
|
||||
f"Invalid {kind} '{name}': "
|
||||
"must follow pattern 'speckit.{extension}.{command}'"
|
||||
)
|
||||
|
||||
namespace = match.group(1)
|
||||
if namespace != manifest.id:
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} '{name}' must use extension namespace '{manifest.id}'"
|
||||
)
|
||||
|
||||
if namespace in CORE_COMMAND_NAMES:
|
||||
raise ValidationError(
|
||||
f"{kind.capitalize()} '{name}' conflicts with core command namespace '{namespace}'"
|
||||
)
|
||||
|
||||
if name in declared_names:
|
||||
raise ValidationError(
|
||||
f"Duplicate command or alias '{name}' in extension manifest"
|
||||
)
|
||||
|
||||
declared_names[name] = kind
|
||||
|
||||
return declared_names
|
||||
|
||||
def _get_installed_command_name_map(
|
||||
self,
|
||||
exclude_extension_id: Optional[str] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Return registered command and alias names for installed extensions."""
|
||||
installed_names: Dict[str, str] = {}
|
||||
|
||||
for ext_id in self.registry.keys():
|
||||
if ext_id == exclude_extension_id:
|
||||
continue
|
||||
|
||||
manifest = self.get_extension(ext_id)
|
||||
if manifest is None:
|
||||
continue
|
||||
|
||||
for cmd in manifest.commands:
|
||||
cmd_name = cmd.get("name")
|
||||
if isinstance(cmd_name, str):
|
||||
installed_names.setdefault(cmd_name, ext_id)
|
||||
|
||||
aliases = cmd.get("aliases", [])
|
||||
if not isinstance(aliases, list):
|
||||
continue
|
||||
|
||||
for alias in aliases:
|
||||
if isinstance(alias, str):
|
||||
installed_names.setdefault(alias, ext_id)
|
||||
|
||||
return installed_names
|
||||
|
||||
def _validate_install_conflicts(self, manifest: ExtensionManifest) -> None:
|
||||
"""Reject installs that would shadow core or installed extension commands."""
|
||||
declared_names = self._collect_manifest_command_names(manifest)
|
||||
installed_names = self._get_installed_command_name_map(
|
||||
exclude_extension_id=manifest.id
|
||||
)
|
||||
|
||||
collisions = [
|
||||
f"{name} (already provided by extension '{installed_names[name]}')"
|
||||
for name in sorted(declared_names)
|
||||
if name in installed_names
|
||||
]
|
||||
if collisions:
|
||||
raise ValidationError(
|
||||
"Extension commands conflict with installed extensions:\n- "
|
||||
+ "\n- ".join(collisions)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
|
||||
"""Load .extensionignore and return an ignore function for shutil.copytree.
|
||||
@@ -1024,9 +861,6 @@ class ExtensionManager:
|
||||
f"Use 'specify extension remove {manifest.id}' first."
|
||||
)
|
||||
|
||||
# Reject manifests that would shadow core commands or installed extensions.
|
||||
self._validate_install_conflicts(manifest)
|
||||
|
||||
# Install extension
|
||||
dest_dir = self.extensions_dir / manifest.id
|
||||
if dest_dir.exists():
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
"""Integration registry for AI coding assistants.
|
||||
|
||||
Each integration is a self-contained subpackage that handles setup/teardown
|
||||
for a specific AI assistant (Copilot, Claude, Gemini, etc.).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .base import IntegrationBase
|
||||
|
||||
# Maps integration key → IntegrationBase instance.
|
||||
# Populated by later stages as integrations are migrated.
|
||||
INTEGRATION_REGISTRY: dict[str, IntegrationBase] = {}
|
||||
|
||||
|
||||
def _register(integration: IntegrationBase) -> None:
|
||||
"""Register an integration instance in the global registry.
|
||||
|
||||
Raises ``ValueError`` for falsy keys and ``KeyError`` for duplicates.
|
||||
"""
|
||||
key = integration.key
|
||||
if not key:
|
||||
raise ValueError("Cannot register integration with an empty key.")
|
||||
if key in INTEGRATION_REGISTRY:
|
||||
raise KeyError(f"Integration with key {key!r} is already registered.")
|
||||
INTEGRATION_REGISTRY[key] = integration
|
||||
|
||||
|
||||
def get_integration(key: str) -> IntegrationBase | None:
|
||||
"""Return the integration for *key*, or ``None`` if not registered."""
|
||||
return INTEGRATION_REGISTRY.get(key)
|
||||
|
||||
|
||||
# -- Register built-in integrations --------------------------------------
|
||||
|
||||
def _register_builtins() -> None:
|
||||
"""Register all built-in integrations."""
|
||||
from .copilot import CopilotIntegration
|
||||
|
||||
_register(CopilotIntegration())
|
||||
|
||||
|
||||
_register_builtins()
|
||||
@@ -1,415 +0,0 @@
|
||||
"""Base classes for AI-assistant integrations.
|
||||
|
||||
Provides:
|
||||
- ``IntegrationOption`` — declares a CLI option an integration accepts.
|
||||
- ``IntegrationBase`` — abstract base every integration must implement.
|
||||
- ``MarkdownIntegration`` — concrete base for standard Markdown-format
|
||||
integrations (the common case — subclass, set three class attrs, done).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import shutil
|
||||
from abc import ABC
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manifest import IntegrationManifest
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# IntegrationOption
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class IntegrationOption:
|
||||
"""Declares an option that an integration accepts via ``--integration-options``.
|
||||
|
||||
Attributes:
|
||||
name: The flag name (e.g. ``"--commands-dir"``).
|
||||
is_flag: ``True`` for boolean flags (``--skills``).
|
||||
required: ``True`` if the option must be supplied.
|
||||
default: Default value when not supplied (``None`` → no default).
|
||||
help: One-line description shown in ``specify integrate info``.
|
||||
"""
|
||||
|
||||
name: str
|
||||
is_flag: bool = False
|
||||
required: bool = False
|
||||
default: Any = None
|
||||
help: str = ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# IntegrationBase — abstract base class
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class IntegrationBase(ABC):
|
||||
"""Abstract base class every integration must implement.
|
||||
|
||||
Subclasses must set the following class-level attributes:
|
||||
|
||||
* ``key`` — unique identifier, matches actual CLI tool name
|
||||
* ``config`` — dict compatible with ``AGENT_CONFIG`` entries
|
||||
* ``registrar_config`` — dict compatible with ``CommandRegistrar.AGENT_CONFIGS``
|
||||
|
||||
And may optionally set:
|
||||
|
||||
* ``context_file`` — path (relative to project root) of the agent
|
||||
context/instructions file (e.g. ``"CLAUDE.md"``)
|
||||
"""
|
||||
|
||||
# -- Must be set by every subclass ------------------------------------
|
||||
|
||||
key: str = ""
|
||||
"""Unique integration key — should match the actual CLI tool name."""
|
||||
|
||||
config: dict[str, Any] | None = None
|
||||
"""Metadata dict matching the ``AGENT_CONFIG`` shape."""
|
||||
|
||||
registrar_config: dict[str, Any] | None = None
|
||||
"""Registration dict matching ``CommandRegistrar.AGENT_CONFIGS`` shape."""
|
||||
|
||||
# -- Optional ---------------------------------------------------------
|
||||
|
||||
context_file: str | None = None
|
||||
"""Relative path to the agent context file (e.g. ``CLAUDE.md``)."""
|
||||
|
||||
# -- Public API -------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def options(cls) -> list[IntegrationOption]:
|
||||
"""Return options this integration accepts. Default: none."""
|
||||
return []
|
||||
|
||||
# -- Primitives — building blocks for setup() -------------------------
|
||||
|
||||
def shared_commands_dir(self) -> Path | None:
|
||||
"""Return path to the shared command templates directory.
|
||||
|
||||
Checks ``core_pack/commands/`` (wheel install) first, then
|
||||
``templates/commands/`` (source checkout). Returns ``None``
|
||||
if neither exists.
|
||||
"""
|
||||
import inspect
|
||||
|
||||
pkg_dir = Path(inspect.getfile(IntegrationBase)).resolve().parent.parent
|
||||
for candidate in [
|
||||
pkg_dir / "core_pack" / "commands",
|
||||
pkg_dir.parent.parent / "templates" / "commands",
|
||||
]:
|
||||
if candidate.is_dir():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
def shared_templates_dir(self) -> Path | None:
|
||||
"""Return path to the shared page templates directory.
|
||||
|
||||
Contains ``vscode-settings.json``, ``spec-template.md``, etc.
|
||||
Checks ``core_pack/templates/`` then ``templates/``.
|
||||
"""
|
||||
import inspect
|
||||
|
||||
pkg_dir = Path(inspect.getfile(IntegrationBase)).resolve().parent.parent
|
||||
for candidate in [
|
||||
pkg_dir / "core_pack" / "templates",
|
||||
pkg_dir.parent.parent / "templates",
|
||||
]:
|
||||
if candidate.is_dir():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
def list_command_templates(self) -> list[Path]:
|
||||
"""Return sorted list of command template files from the shared directory."""
|
||||
cmd_dir = self.shared_commands_dir()
|
||||
if not cmd_dir or not cmd_dir.is_dir():
|
||||
return []
|
||||
return sorted(f for f in cmd_dir.iterdir() if f.is_file() and f.suffix == ".md")
|
||||
|
||||
def command_filename(self, template_name: str) -> str:
|
||||
"""Return the destination filename for a command template.
|
||||
|
||||
*template_name* is the stem of the source file (e.g. ``"plan"``).
|
||||
Default: ``speckit.{template_name}.md``. Subclasses override
|
||||
to change the extension or naming convention.
|
||||
"""
|
||||
return f"speckit.{template_name}.md"
|
||||
|
||||
def commands_dest(self, project_root: Path) -> Path:
|
||||
"""Return the absolute path to the commands output directory.
|
||||
|
||||
Derived from ``config["folder"]`` and ``config["commands_subdir"]``.
|
||||
Raises ``ValueError`` if ``config`` or ``folder`` is missing.
|
||||
"""
|
||||
if not self.config:
|
||||
raise ValueError(
|
||||
f"{type(self).__name__}.config is not set; integration "
|
||||
"subclasses must define a non-empty 'config' mapping."
|
||||
)
|
||||
folder = self.config.get("folder")
|
||||
if not folder:
|
||||
raise ValueError(
|
||||
f"{type(self).__name__}.config is missing required 'folder' entry."
|
||||
)
|
||||
subdir = self.config.get("commands_subdir", "commands")
|
||||
return project_root / folder / subdir
|
||||
|
||||
# -- File operations — granular primitives for setup() ----------------
|
||||
|
||||
@staticmethod
|
||||
def copy_command_to_directory(
|
||||
src: Path,
|
||||
dest_dir: Path,
|
||||
filename: str,
|
||||
) -> Path:
|
||||
"""Copy a command template to *dest_dir* with the given *filename*.
|
||||
|
||||
Creates *dest_dir* if needed. Returns the absolute path of the
|
||||
written file. The caller can post-process the file before
|
||||
recording it in the manifest.
|
||||
"""
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
dst = dest_dir / filename
|
||||
shutil.copy2(src, dst)
|
||||
return dst
|
||||
|
||||
@staticmethod
|
||||
def record_file_in_manifest(
|
||||
file_path: Path,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
) -> None:
|
||||
"""Hash *file_path* and record it in *manifest*.
|
||||
|
||||
*file_path* must be inside *project_root*.
|
||||
"""
|
||||
rel = file_path.resolve().relative_to(project_root.resolve())
|
||||
manifest.record_existing(rel)
|
||||
|
||||
@staticmethod
|
||||
def write_file_and_record(
|
||||
content: str,
|
||||
dest: Path,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
) -> Path:
|
||||
"""Write *content* to *dest*, hash it, and record in *manifest*.
|
||||
|
||||
Creates parent directories as needed. Returns *dest*.
|
||||
"""
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest.write_text(content, encoding="utf-8")
|
||||
rel = dest.resolve().relative_to(project_root.resolve())
|
||||
manifest.record_existing(rel)
|
||||
return dest
|
||||
|
||||
@staticmethod
|
||||
def process_template(
|
||||
content: str,
|
||||
agent_name: str,
|
||||
script_type: str,
|
||||
arg_placeholder: str = "$ARGUMENTS",
|
||||
) -> str:
|
||||
"""Process a raw command template into agent-ready content.
|
||||
|
||||
Performs the same transformations as the release script:
|
||||
1. Extract ``scripts.<script_type>`` value from YAML frontmatter
|
||||
2. Replace ``{SCRIPT}`` with the extracted script command
|
||||
3. Extract ``agent_scripts.<script_type>`` and replace ``{AGENT_SCRIPT}``
|
||||
4. Strip ``scripts:`` and ``agent_scripts:`` sections from frontmatter
|
||||
5. Replace ``{ARGS}`` with *arg_placeholder*
|
||||
6. Replace ``__AGENT__`` with *agent_name*
|
||||
7. Rewrite paths: ``scripts/`` → ``.specify/scripts/`` etc.
|
||||
"""
|
||||
# 1. Extract script command from frontmatter
|
||||
script_command = ""
|
||||
script_pattern = re.compile(
|
||||
rf"^\s*{re.escape(script_type)}:\s*(.+)$", re.MULTILINE
|
||||
)
|
||||
# Find the scripts: block
|
||||
in_scripts = False
|
||||
for line in content.splitlines():
|
||||
if line.strip() == "scripts:":
|
||||
in_scripts = True
|
||||
continue
|
||||
if in_scripts and line and not line[0].isspace():
|
||||
in_scripts = False
|
||||
if in_scripts:
|
||||
m = script_pattern.match(line)
|
||||
if m:
|
||||
script_command = m.group(1).strip()
|
||||
break
|
||||
|
||||
# 2. Replace {SCRIPT}
|
||||
if script_command:
|
||||
content = content.replace("{SCRIPT}", script_command)
|
||||
|
||||
# 3. Extract agent_script command
|
||||
agent_script_command = ""
|
||||
in_agent_scripts = False
|
||||
for line in content.splitlines():
|
||||
if line.strip() == "agent_scripts:":
|
||||
in_agent_scripts = True
|
||||
continue
|
||||
if in_agent_scripts and line and not line[0].isspace():
|
||||
in_agent_scripts = False
|
||||
if in_agent_scripts:
|
||||
m = script_pattern.match(line)
|
||||
if m:
|
||||
agent_script_command = m.group(1).strip()
|
||||
break
|
||||
|
||||
if agent_script_command:
|
||||
content = content.replace("{AGENT_SCRIPT}", agent_script_command)
|
||||
|
||||
# 4. Strip scripts: and agent_scripts: sections from frontmatter
|
||||
lines = content.splitlines(keepends=True)
|
||||
output_lines: list[str] = []
|
||||
in_frontmatter = False
|
||||
skip_section = False
|
||||
dash_count = 0
|
||||
for line in lines:
|
||||
stripped = line.rstrip("\n\r")
|
||||
if stripped == "---":
|
||||
dash_count += 1
|
||||
if dash_count == 1:
|
||||
in_frontmatter = True
|
||||
else:
|
||||
in_frontmatter = False
|
||||
skip_section = False
|
||||
output_lines.append(line)
|
||||
continue
|
||||
if in_frontmatter:
|
||||
if stripped in ("scripts:", "agent_scripts:"):
|
||||
skip_section = True
|
||||
continue
|
||||
if skip_section:
|
||||
if line[0:1].isspace():
|
||||
continue # skip indented content under scripts/agent_scripts
|
||||
skip_section = False
|
||||
output_lines.append(line)
|
||||
content = "".join(output_lines)
|
||||
|
||||
# 5. Replace {ARGS}
|
||||
content = content.replace("{ARGS}", arg_placeholder)
|
||||
|
||||
# 6. Replace __AGENT__
|
||||
content = content.replace("__AGENT__", agent_name)
|
||||
|
||||
# 7. Rewrite paths (matches release script's rewrite_paths())
|
||||
content = re.sub(r"(/?)memory/", r".specify/memory/", content)
|
||||
content = re.sub(r"(/?)scripts/", r".specify/scripts/", content)
|
||||
content = re.sub(r"(/?)templates/", r".specify/templates/", content)
|
||||
# Fix double-prefix (same as release script's .specify.specify/ fix)
|
||||
content = content.replace(".specify.specify/", ".specify/")
|
||||
content = content.replace(".specify/.specify/", ".specify/")
|
||||
|
||||
return content
|
||||
|
||||
def setup(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""Install integration command files into *project_root*.
|
||||
|
||||
Returns the list of files created. Copies raw templates without
|
||||
processing. Integrations that need placeholder replacement
|
||||
(e.g. ``{SCRIPT}``, ``__AGENT__``) should override ``setup()``
|
||||
and call ``process_template()`` in their own loop — see
|
||||
``CopilotIntegration`` for an example.
|
||||
"""
|
||||
templates = self.list_command_templates()
|
||||
if not templates:
|
||||
return []
|
||||
|
||||
project_root_resolved = project_root.resolve()
|
||||
if manifest.project_root != project_root_resolved:
|
||||
raise ValueError(
|
||||
f"manifest.project_root ({manifest.project_root}) does not match "
|
||||
f"project_root ({project_root_resolved})"
|
||||
)
|
||||
|
||||
dest = self.commands_dest(project_root).resolve()
|
||||
try:
|
||||
dest.relative_to(project_root_resolved)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f"Integration destination {dest} escapes "
|
||||
f"project root {project_root_resolved}"
|
||||
) from exc
|
||||
|
||||
created: list[Path] = []
|
||||
|
||||
for src_file in templates:
|
||||
dst_name = self.command_filename(src_file.stem)
|
||||
dst_file = self.copy_command_to_directory(src_file, dest, dst_name)
|
||||
self.record_file_in_manifest(dst_file, project_root, manifest)
|
||||
created.append(dst_file)
|
||||
|
||||
return created
|
||||
|
||||
def teardown(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""Uninstall integration files from *project_root*.
|
||||
|
||||
Delegates to ``manifest.uninstall()`` which only removes files
|
||||
whose hash still matches the recorded value (unless *force*).
|
||||
|
||||
Returns ``(removed, skipped)`` file lists.
|
||||
"""
|
||||
return manifest.uninstall(project_root, force=force)
|
||||
|
||||
# -- Convenience helpers for subclasses -------------------------------
|
||||
|
||||
def install(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""High-level install — calls ``setup()`` and returns created files."""
|
||||
return self.setup(
|
||||
project_root, manifest, parsed_options=parsed_options, **opts
|
||||
)
|
||||
|
||||
def uninstall(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""High-level uninstall — calls ``teardown()``."""
|
||||
return self.teardown(project_root, manifest, force=force)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MarkdownIntegration — covers ~20 standard agents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class MarkdownIntegration(IntegrationBase):
|
||||
"""Concrete base for integrations that use standard Markdown commands.
|
||||
|
||||
Subclasses only need to set ``key``, ``config``, ``registrar_config``
|
||||
(and optionally ``context_file``). Everything else is inherited.
|
||||
|
||||
The default ``setup()`` from ``IntegrationBase`` copies templates
|
||||
into the agent's commands directory — which is correct for the
|
||||
standard Markdown case.
|
||||
"""
|
||||
|
||||
# MarkdownIntegration inherits IntegrationBase.setup() as-is.
|
||||
# Future stages may add markdown-specific path rewriting here.
|
||||
pass
|
||||
@@ -1,197 +0,0 @@
|
||||
"""Copilot integration — GitHub Copilot in VS Code.
|
||||
|
||||
Copilot has several unique behaviors compared to standard markdown agents:
|
||||
- Commands use ``.agent.md`` extension (not ``.md``)
|
||||
- Each command gets a companion ``.prompt.md`` file in ``.github/prompts/``
|
||||
- Installs ``.vscode/settings.json`` with prompt file recommendations
|
||||
- Context file lives at ``.github/copilot-instructions.md``
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from ..base import IntegrationBase
|
||||
from ..manifest import IntegrationManifest
|
||||
|
||||
|
||||
class CopilotIntegration(IntegrationBase):
|
||||
"""Integration for GitHub Copilot in VS Code."""
|
||||
|
||||
key = "copilot"
|
||||
config = {
|
||||
"name": "GitHub Copilot",
|
||||
"folder": ".github/",
|
||||
"commands_subdir": "agents",
|
||||
"install_url": None,
|
||||
"requires_cli": False,
|
||||
}
|
||||
registrar_config = {
|
||||
"dir": ".github/agents",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".agent.md",
|
||||
}
|
||||
context_file = ".github/copilot-instructions.md"
|
||||
|
||||
def command_filename(self, template_name: str) -> str:
|
||||
"""Copilot commands use ``.agent.md`` extension."""
|
||||
return f"speckit.{template_name}.agent.md"
|
||||
|
||||
def setup(
|
||||
self,
|
||||
project_root: Path,
|
||||
manifest: IntegrationManifest,
|
||||
parsed_options: dict[str, Any] | None = None,
|
||||
**opts: Any,
|
||||
) -> list[Path]:
|
||||
"""Install copilot commands, companion prompts, and VS Code settings.
|
||||
|
||||
Uses base class primitives to: read templates, process them
|
||||
(replace placeholders, strip script blocks, rewrite paths),
|
||||
write as ``.agent.md``, then add companion prompts and VS Code settings.
|
||||
"""
|
||||
project_root_resolved = project_root.resolve()
|
||||
if manifest.project_root != project_root_resolved:
|
||||
raise ValueError(
|
||||
f"manifest.project_root ({manifest.project_root}) does not match "
|
||||
f"project_root ({project_root_resolved})"
|
||||
)
|
||||
|
||||
templates = self.list_command_templates()
|
||||
if not templates:
|
||||
return []
|
||||
|
||||
dest = self.commands_dest(project_root)
|
||||
dest_resolved = dest.resolve()
|
||||
try:
|
||||
dest_resolved.relative_to(project_root_resolved)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f"Integration destination {dest_resolved} escapes "
|
||||
f"project root {project_root_resolved}"
|
||||
) from exc
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
created: list[Path] = []
|
||||
|
||||
script_type = opts.get("script_type", "sh")
|
||||
arg_placeholder = self.registrar_config.get("args", "$ARGUMENTS")
|
||||
|
||||
# 1. Process and write command files as .agent.md
|
||||
for src_file in templates:
|
||||
raw = src_file.read_text(encoding="utf-8")
|
||||
processed = self.process_template(raw, self.key, script_type, arg_placeholder)
|
||||
dst_name = self.command_filename(src_file.stem)
|
||||
dst_file = self.write_file_and_record(
|
||||
processed, dest / dst_name, project_root, manifest
|
||||
)
|
||||
created.append(dst_file)
|
||||
|
||||
# 2. Generate companion .prompt.md files from the templates we just wrote
|
||||
prompts_dir = project_root / ".github" / "prompts"
|
||||
for src_file in templates:
|
||||
cmd_name = f"speckit.{src_file.stem}"
|
||||
prompt_content = f"---\nagent: {cmd_name}\n---\n"
|
||||
prompt_file = self.write_file_and_record(
|
||||
prompt_content,
|
||||
prompts_dir / f"{cmd_name}.prompt.md",
|
||||
project_root,
|
||||
manifest,
|
||||
)
|
||||
created.append(prompt_file)
|
||||
|
||||
# Write .vscode/settings.json
|
||||
settings_src = self._vscode_settings_path()
|
||||
if settings_src and settings_src.is_file():
|
||||
dst_settings = project_root / ".vscode" / "settings.json"
|
||||
dst_settings.parent.mkdir(parents=True, exist_ok=True)
|
||||
if dst_settings.exists():
|
||||
# Merge into existing — don't track since we can't safely
|
||||
# remove the user's settings file on uninstall.
|
||||
self._merge_vscode_settings(settings_src, dst_settings)
|
||||
else:
|
||||
shutil.copy2(settings_src, dst_settings)
|
||||
self.record_file_in_manifest(dst_settings, project_root, manifest)
|
||||
created.append(dst_settings)
|
||||
|
||||
# 4. Install integration-specific update-context scripts
|
||||
scripts_src = Path(__file__).resolve().parent / "scripts"
|
||||
if scripts_src.is_dir():
|
||||
scripts_dest = project_root / ".specify" / "integrations" / "copilot" / "scripts"
|
||||
scripts_dest.mkdir(parents=True, exist_ok=True)
|
||||
for src_script in sorted(scripts_src.iterdir()):
|
||||
if src_script.is_file():
|
||||
dst_script = scripts_dest / src_script.name
|
||||
shutil.copy2(src_script, dst_script)
|
||||
# Make shell scripts executable
|
||||
if dst_script.suffix == ".sh":
|
||||
dst_script.chmod(dst_script.stat().st_mode | 0o111)
|
||||
self.record_file_in_manifest(dst_script, project_root, manifest)
|
||||
created.append(dst_script)
|
||||
|
||||
return created
|
||||
|
||||
def _vscode_settings_path(self) -> Path | None:
|
||||
"""Return path to the bundled vscode-settings.json template."""
|
||||
tpl_dir = self.shared_templates_dir()
|
||||
if tpl_dir:
|
||||
candidate = tpl_dir / "vscode-settings.json"
|
||||
if candidate.is_file():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _merge_vscode_settings(src: Path, dst: Path) -> None:
|
||||
"""Merge settings from *src* into existing *dst* JSON file.
|
||||
|
||||
Top-level keys from *src* are added only if missing in *dst*.
|
||||
For dict-valued keys, sub-keys are merged the same way.
|
||||
|
||||
If *dst* cannot be parsed (e.g. JSONC with comments), the merge
|
||||
is skipped to avoid overwriting user settings.
|
||||
"""
|
||||
try:
|
||||
existing = json.loads(dst.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
# Cannot parse existing file (likely JSONC with comments).
|
||||
# Skip merge to preserve the user's settings, but show
|
||||
# what they should add manually.
|
||||
import logging
|
||||
template_content = src.read_text(encoding="utf-8")
|
||||
logging.getLogger(__name__).warning(
|
||||
"Could not parse %s (may contain JSONC comments). "
|
||||
"Skipping settings merge to preserve existing file.\n"
|
||||
"Please add the following settings manually:\n%s",
|
||||
dst, template_content,
|
||||
)
|
||||
return
|
||||
|
||||
new_settings = json.loads(src.read_text(encoding="utf-8"))
|
||||
|
||||
if not isinstance(existing, dict) or not isinstance(new_settings, dict):
|
||||
import logging
|
||||
logging.getLogger(__name__).warning(
|
||||
"Skipping settings merge: %s or template is not a JSON object.", dst
|
||||
)
|
||||
return
|
||||
|
||||
changed = False
|
||||
for key, value in new_settings.items():
|
||||
if key not in existing:
|
||||
existing[key] = value
|
||||
changed = True
|
||||
elif isinstance(existing[key], dict) and isinstance(value, dict):
|
||||
for sub_key, sub_value in value.items():
|
||||
if sub_key not in existing[key]:
|
||||
existing[key][sub_key] = sub_value
|
||||
changed = True
|
||||
|
||||
if not changed:
|
||||
return
|
||||
|
||||
dst.write_text(
|
||||
json.dumps(existing, indent=4) + "\n", encoding="utf-8"
|
||||
)
|
||||
@@ -1,22 +0,0 @@
|
||||
# update-context.ps1 — Copilot integration: create/update .github/copilot-instructions.md
|
||||
#
|
||||
# This is the copilot-specific implementation that produces the GitHub
|
||||
# Copilot instructions file. The shared dispatcher reads
|
||||
# .specify/integration.json and calls this script.
|
||||
#
|
||||
# NOTE: This script is not yet active. It will be activated in Stage 7
|
||||
# when the shared update-agent-context.ps1 replaces its switch statement
|
||||
# with integration.json-based dispatch. The shared script must also be
|
||||
# refactored to support SPECKIT_SOURCE_ONLY (guard the Main call) before
|
||||
# dot-sourcing will work.
|
||||
#
|
||||
# Until then, this delegates to the shared script as a subprocess.
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$repoRoot = git rev-parse --show-toplevel 2>$null
|
||||
if (-not $repoRoot) { $repoRoot = $PWD.Path }
|
||||
|
||||
# Invoke shared update-agent-context script as a separate process.
|
||||
# Dot-sourcing is unsafe until that script guards its Main call.
|
||||
& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType copilot
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# update-context.sh — Copilot integration: create/update .github/copilot-instructions.md
|
||||
#
|
||||
# This is the copilot-specific implementation that produces the GitHub
|
||||
# Copilot instructions file. The shared dispatcher reads
|
||||
# .specify/integration.json and calls this script.
|
||||
#
|
||||
# NOTE: This script is not yet active. It will be activated in Stage 7
|
||||
# when the shared update-agent-context.sh replaces its case statement
|
||||
# with integration.json-based dispatch. The shared script must also be
|
||||
# refactored to support SPECKIT_SOURCE_ONLY (guard the main logic)
|
||||
# before sourcing will work.
|
||||
#
|
||||
# Until then, this delegates to the shared script as a subprocess.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel 2>/dev/null || pwd)}"
|
||||
|
||||
# Invoke shared update-agent-context script as a separate process.
|
||||
# Sourcing is unsafe until that script guards its main logic.
|
||||
exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" copilot
|
||||
@@ -1,265 +0,0 @@
|
||||
"""Hash-tracked installation manifest for integrations.
|
||||
|
||||
Each installed integration records the files it created together with
|
||||
their SHA-256 hashes. On uninstall only files whose hash still matches
|
||||
the recorded value are removed — modified files are left in place and
|
||||
reported to the caller.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _sha256(path: Path) -> str:
|
||||
"""Return the hex SHA-256 digest of *path*."""
|
||||
h = hashlib.sha256()
|
||||
with open(path, "rb") as fh:
|
||||
for chunk in iter(lambda: fh.read(8192), b""):
|
||||
h.update(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def _validate_rel_path(rel: Path, root: Path) -> Path:
|
||||
"""Resolve *rel* against *root* and verify it stays within *root*.
|
||||
|
||||
Raises ``ValueError`` if *rel* is absolute, contains ``..`` segments
|
||||
that escape *root*, or otherwise resolves outside the project root.
|
||||
"""
|
||||
if rel.is_absolute():
|
||||
raise ValueError(
|
||||
f"Absolute paths are not allowed in manifests: {rel}"
|
||||
)
|
||||
resolved = (root / rel).resolve()
|
||||
root_resolved = root.resolve()
|
||||
try:
|
||||
resolved.relative_to(root_resolved)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Path {rel} resolves to {resolved} which is outside "
|
||||
f"the project root {root_resolved}"
|
||||
) from None
|
||||
return resolved
|
||||
|
||||
|
||||
class IntegrationManifest:
|
||||
"""Tracks files installed by a single integration.
|
||||
|
||||
Parameters:
|
||||
key: Integration identifier (e.g. ``"copilot"``).
|
||||
project_root: Absolute path to the project directory.
|
||||
version: CLI version string recorded in the manifest.
|
||||
"""
|
||||
|
||||
def __init__(self, key: str, project_root: Path, version: str = "") -> None:
|
||||
self.key = key
|
||||
self.project_root = project_root.resolve()
|
||||
self.version = version
|
||||
self._files: dict[str, str] = {} # rel_path → sha256 hex
|
||||
self._installed_at: str = ""
|
||||
|
||||
# -- Manifest file location -------------------------------------------
|
||||
|
||||
@property
|
||||
def manifest_path(self) -> Path:
|
||||
"""Path to the on-disk manifest JSON."""
|
||||
return self.project_root / ".specify" / "integrations" / f"{self.key}.manifest.json"
|
||||
|
||||
# -- Recording files --------------------------------------------------
|
||||
|
||||
def record_file(self, rel_path: str | Path, content: bytes | str) -> Path:
|
||||
"""Write *content* to *rel_path* (relative to project root) and record its hash.
|
||||
|
||||
Creates parent directories as needed. Returns the absolute path
|
||||
of the written file.
|
||||
|
||||
Raises ``ValueError`` if *rel_path* resolves outside the project root.
|
||||
"""
|
||||
rel = Path(rel_path)
|
||||
abs_path = _validate_rel_path(rel, self.project_root)
|
||||
abs_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
abs_path.write_bytes(content)
|
||||
|
||||
normalized = abs_path.relative_to(self.project_root).as_posix()
|
||||
self._files[normalized] = hashlib.sha256(content).hexdigest()
|
||||
return abs_path
|
||||
|
||||
def record_existing(self, rel_path: str | Path) -> None:
|
||||
"""Record the hash of an already-existing file at *rel_path*.
|
||||
|
||||
Raises ``ValueError`` if *rel_path* resolves outside the project root.
|
||||
"""
|
||||
rel = Path(rel_path)
|
||||
abs_path = _validate_rel_path(rel, self.project_root)
|
||||
normalized = abs_path.relative_to(self.project_root).as_posix()
|
||||
self._files[normalized] = _sha256(abs_path)
|
||||
|
||||
# -- Querying ---------------------------------------------------------
|
||||
|
||||
@property
|
||||
def files(self) -> dict[str, str]:
|
||||
"""Return a copy of the ``{rel_path: sha256}`` mapping."""
|
||||
return dict(self._files)
|
||||
|
||||
def check_modified(self) -> list[str]:
|
||||
"""Return relative paths of tracked files whose content changed on disk."""
|
||||
modified: list[str] = []
|
||||
for rel, expected_hash in self._files.items():
|
||||
rel_path = Path(rel)
|
||||
# Skip paths that are absolute or attempt to escape the project root
|
||||
if rel_path.is_absolute() or ".." in rel_path.parts:
|
||||
continue
|
||||
abs_path = self.project_root / rel_path
|
||||
if not abs_path.exists() and not abs_path.is_symlink():
|
||||
continue
|
||||
# Treat symlinks and non-regular-files as modified
|
||||
if abs_path.is_symlink() or not abs_path.is_file():
|
||||
modified.append(rel)
|
||||
continue
|
||||
if _sha256(abs_path) != expected_hash:
|
||||
modified.append(rel)
|
||||
return modified
|
||||
|
||||
# -- Uninstall --------------------------------------------------------
|
||||
|
||||
def uninstall(
|
||||
self,
|
||||
project_root: Path | None = None,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> tuple[list[Path], list[Path]]:
|
||||
"""Remove tracked files whose hash still matches.
|
||||
|
||||
Parameters:
|
||||
project_root: Override for the project root.
|
||||
force: If ``True``, remove files even if modified.
|
||||
|
||||
Returns:
|
||||
``(removed, skipped)`` — absolute paths.
|
||||
"""
|
||||
root = (project_root or self.project_root).resolve()
|
||||
removed: list[Path] = []
|
||||
skipped: list[Path] = []
|
||||
|
||||
for rel, expected_hash in self._files.items():
|
||||
# Use non-resolved path for deletion so symlinks themselves
|
||||
# are removed, not their targets.
|
||||
path = root / rel
|
||||
# Validate containment lexically (without following symlinks)
|
||||
# by collapsing .. segments via Path resolution on the string parts.
|
||||
try:
|
||||
normed = Path(os.path.normpath(path))
|
||||
normed.relative_to(root)
|
||||
except (ValueError, OSError):
|
||||
continue
|
||||
if not path.exists() and not path.is_symlink():
|
||||
continue
|
||||
# Skip directories — manifest only tracks files
|
||||
if not path.is_file() and not path.is_symlink():
|
||||
skipped.append(path)
|
||||
continue
|
||||
# Never follow symlinks when comparing hashes. Only remove
|
||||
# symlinks when forced, to avoid acting on tampered entries.
|
||||
if path.is_symlink():
|
||||
if not force:
|
||||
skipped.append(path)
|
||||
continue
|
||||
else:
|
||||
if not force and _sha256(path) != expected_hash:
|
||||
skipped.append(path)
|
||||
continue
|
||||
try:
|
||||
path.unlink()
|
||||
except OSError:
|
||||
skipped.append(path)
|
||||
continue
|
||||
removed.append(path)
|
||||
# Clean up empty parent directories up to project root
|
||||
parent = path.parent
|
||||
while parent != root:
|
||||
try:
|
||||
parent.rmdir() # only succeeds if empty
|
||||
except OSError:
|
||||
break
|
||||
parent = parent.parent
|
||||
|
||||
# Remove the manifest file itself
|
||||
manifest = root / ".specify" / "integrations" / f"{self.key}.manifest.json"
|
||||
if manifest.exists():
|
||||
manifest.unlink()
|
||||
parent = manifest.parent
|
||||
while parent != root:
|
||||
try:
|
||||
parent.rmdir()
|
||||
except OSError:
|
||||
break
|
||||
parent = parent.parent
|
||||
|
||||
return removed, skipped
|
||||
|
||||
# -- Persistence ------------------------------------------------------
|
||||
|
||||
def save(self) -> Path:
|
||||
"""Write the manifest to disk. Returns the manifest path."""
|
||||
self._installed_at = self._installed_at or datetime.now(timezone.utc).isoformat()
|
||||
data: dict[str, Any] = {
|
||||
"integration": self.key,
|
||||
"version": self.version,
|
||||
"installed_at": self._installed_at,
|
||||
"files": self._files,
|
||||
}
|
||||
path = self.manifest_path
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
|
||||
return path
|
||||
|
||||
@classmethod
|
||||
def load(cls, key: str, project_root: Path) -> IntegrationManifest:
|
||||
"""Load an existing manifest from disk.
|
||||
|
||||
Raises ``FileNotFoundError`` if the manifest does not exist.
|
||||
"""
|
||||
inst = cls(key, project_root)
|
||||
path = inst.manifest_path
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ValueError(
|
||||
f"Integration manifest at {path} contains invalid JSON"
|
||||
) from exc
|
||||
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(
|
||||
f"Integration manifest at {path} must be a JSON object, "
|
||||
f"got {type(data).__name__}"
|
||||
)
|
||||
|
||||
files = data.get("files", {})
|
||||
if not isinstance(files, dict) or not all(
|
||||
isinstance(k, str) and isinstance(v, str) for k, v in files.items()
|
||||
):
|
||||
raise ValueError(
|
||||
f"Integration manifest 'files' at {path} must be a "
|
||||
"mapping of string paths to string hashes"
|
||||
)
|
||||
|
||||
inst.version = data.get("version", "")
|
||||
inst._installed_at = data.get("installed_at", "")
|
||||
inst._files = files
|
||||
|
||||
stored_key = data.get("integration", "")
|
||||
if stored_key and stored_key != key:
|
||||
raise ValueError(
|
||||
f"Manifest at {path} belongs to integration {stored_key!r}, "
|
||||
f"not {key!r}"
|
||||
)
|
||||
|
||||
return inst
|
||||
@@ -1,23 +0,0 @@
|
||||
"""Shared test helpers for integration tests."""
|
||||
|
||||
from specify_cli.integrations.base import MarkdownIntegration
|
||||
|
||||
|
||||
class StubIntegration(MarkdownIntegration):
|
||||
"""Minimal concrete integration for testing."""
|
||||
|
||||
key = "stub"
|
||||
config = {
|
||||
"name": "Stub Agent",
|
||||
"folder": ".stub/",
|
||||
"commands_subdir": "commands",
|
||||
"install_url": None,
|
||||
"requires_cli": False,
|
||||
}
|
||||
registrar_config = {
|
||||
"dir": ".stub/commands",
|
||||
"format": "markdown",
|
||||
"args": "$ARGUMENTS",
|
||||
"extension": ".md",
|
||||
}
|
||||
context_file = "STUB.md"
|
||||
@@ -1,169 +0,0 @@
|
||||
"""Tests for IntegrationOption, IntegrationBase, MarkdownIntegration, and primitives."""
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations.base import (
|
||||
IntegrationBase,
|
||||
IntegrationOption,
|
||||
MarkdownIntegration,
|
||||
)
|
||||
from specify_cli.integrations.manifest import IntegrationManifest
|
||||
from .conftest import StubIntegration
|
||||
|
||||
|
||||
class TestIntegrationOption:
|
||||
def test_defaults(self):
|
||||
opt = IntegrationOption(name="--flag")
|
||||
assert opt.name == "--flag"
|
||||
assert opt.is_flag is False
|
||||
assert opt.required is False
|
||||
assert opt.default is None
|
||||
assert opt.help == ""
|
||||
|
||||
def test_flag_option(self):
|
||||
opt = IntegrationOption(name="--skills", is_flag=True, default=True, help="Enable skills")
|
||||
assert opt.is_flag is True
|
||||
assert opt.default is True
|
||||
assert opt.help == "Enable skills"
|
||||
|
||||
def test_required_option(self):
|
||||
opt = IntegrationOption(name="--commands-dir", required=True, help="Dir path")
|
||||
assert opt.required is True
|
||||
|
||||
def test_frozen(self):
|
||||
opt = IntegrationOption(name="--x")
|
||||
with pytest.raises(AttributeError):
|
||||
opt.name = "--y" # type: ignore[misc]
|
||||
|
||||
|
||||
class TestIntegrationBase:
|
||||
def test_key_and_config(self):
|
||||
i = StubIntegration()
|
||||
assert i.key == "stub"
|
||||
assert i.config["name"] == "Stub Agent"
|
||||
assert i.registrar_config["format"] == "markdown"
|
||||
assert i.context_file == "STUB.md"
|
||||
|
||||
def test_options_default_empty(self):
|
||||
assert StubIntegration.options() == []
|
||||
|
||||
def test_shared_commands_dir(self):
|
||||
i = StubIntegration()
|
||||
cmd_dir = i.shared_commands_dir()
|
||||
assert cmd_dir is not None
|
||||
assert cmd_dir.is_dir()
|
||||
|
||||
def test_setup_uses_shared_templates(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
created = i.setup(tmp_path, manifest)
|
||||
assert len(created) > 0
|
||||
for f in created:
|
||||
assert f.parent == tmp_path / ".stub" / "commands"
|
||||
assert f.name.startswith("speckit.")
|
||||
assert f.name.endswith(".md")
|
||||
|
||||
def test_setup_copies_templates(self, tmp_path, monkeypatch):
|
||||
tpl = tmp_path / "_templates"
|
||||
tpl.mkdir()
|
||||
(tpl / "plan.md").write_text("plan content", encoding="utf-8")
|
||||
(tpl / "specify.md").write_text("spec content", encoding="utf-8")
|
||||
|
||||
i = StubIntegration()
|
||||
monkeypatch.setattr(type(i), "list_command_templates", lambda self: sorted(tpl.glob("*.md")))
|
||||
|
||||
project = tmp_path / "project"
|
||||
project.mkdir()
|
||||
created = i.setup(project, IntegrationManifest("stub", project))
|
||||
assert len(created) == 2
|
||||
assert (project / ".stub" / "commands" / "speckit.plan.md").exists()
|
||||
assert (project / ".stub" / "commands" / "speckit.specify.md").exists()
|
||||
|
||||
def test_install_delegates_to_setup(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
result = i.install(tmp_path, manifest)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_uninstall_delegates_to_teardown(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
manifest = IntegrationManifest("stub", tmp_path)
|
||||
removed, skipped = i.uninstall(tmp_path, manifest)
|
||||
assert removed == []
|
||||
assert skipped == []
|
||||
|
||||
|
||||
class TestMarkdownIntegration:
|
||||
def test_is_subclass_of_base(self):
|
||||
assert issubclass(MarkdownIntegration, IntegrationBase)
|
||||
|
||||
def test_stub_is_markdown(self):
|
||||
assert isinstance(StubIntegration(), MarkdownIntegration)
|
||||
|
||||
|
||||
class TestBasePrimitives:
|
||||
def test_shared_commands_dir_returns_path(self):
|
||||
i = StubIntegration()
|
||||
cmd_dir = i.shared_commands_dir()
|
||||
assert cmd_dir is not None
|
||||
assert cmd_dir.is_dir()
|
||||
|
||||
def test_shared_templates_dir_returns_path(self):
|
||||
i = StubIntegration()
|
||||
tpl_dir = i.shared_templates_dir()
|
||||
assert tpl_dir is not None
|
||||
assert tpl_dir.is_dir()
|
||||
|
||||
def test_list_command_templates_returns_md_files(self):
|
||||
i = StubIntegration()
|
||||
templates = i.list_command_templates()
|
||||
assert len(templates) > 0
|
||||
assert all(t.suffix == ".md" for t in templates)
|
||||
|
||||
def test_command_filename_default(self):
|
||||
i = StubIntegration()
|
||||
assert i.command_filename("plan") == "speckit.plan.md"
|
||||
|
||||
def test_commands_dest(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
dest = i.commands_dest(tmp_path)
|
||||
assert dest == tmp_path / ".stub" / "commands"
|
||||
|
||||
def test_commands_dest_no_config_raises(self, tmp_path):
|
||||
class NoConfig(MarkdownIntegration):
|
||||
key = "noconfig"
|
||||
with pytest.raises(ValueError, match="config is not set"):
|
||||
NoConfig().commands_dest(tmp_path)
|
||||
|
||||
def test_copy_command_to_directory(self, tmp_path):
|
||||
src = tmp_path / "source.md"
|
||||
src.write_text("content", encoding="utf-8")
|
||||
dest_dir = tmp_path / "output"
|
||||
result = IntegrationBase.copy_command_to_directory(src, dest_dir, "speckit.plan.md")
|
||||
assert result == dest_dir / "speckit.plan.md"
|
||||
assert result.read_text(encoding="utf-8") == "content"
|
||||
|
||||
def test_record_file_in_manifest(self, tmp_path):
|
||||
f = tmp_path / "f.txt"
|
||||
f.write_text("hello", encoding="utf-8")
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
IntegrationBase.record_file_in_manifest(f, tmp_path, m)
|
||||
assert "f.txt" in m.files
|
||||
|
||||
def test_write_file_and_record(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
dest = tmp_path / "sub" / "f.txt"
|
||||
result = IntegrationBase.write_file_and_record("content", dest, tmp_path, m)
|
||||
assert result == dest
|
||||
assert dest.read_text(encoding="utf-8") == "content"
|
||||
assert "sub/f.txt" in m.files
|
||||
|
||||
def test_setup_copies_shared_templates(self, tmp_path):
|
||||
i = StubIntegration()
|
||||
m = IntegrationManifest("stub", tmp_path)
|
||||
created = i.setup(tmp_path, m)
|
||||
assert len(created) > 0
|
||||
for f in created:
|
||||
assert f.parent.name == "commands"
|
||||
assert f.name.startswith("speckit.")
|
||||
assert f.name.endswith(".md")
|
||||
@@ -1,122 +0,0 @@
|
||||
"""Tests for --integration flag on specify init (CLI-level)."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class TestInitIntegrationFlag:
|
||||
def test_integration_and_ai_mutually_exclusive(self):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "test-project", "--ai", "claude", "--integration", "copilot",
|
||||
])
|
||||
assert result.exit_code != 0
|
||||
assert "mutually exclusive" in result.output
|
||||
|
||||
def test_unknown_integration_rejected(self):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "test-project", "--integration", "nonexistent",
|
||||
])
|
||||
assert result.exit_code != 0
|
||||
assert "Unknown integration" in result.output
|
||||
|
||||
def test_integration_copilot_creates_files(self, tmp_path):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
runner = CliRunner()
|
||||
project = tmp_path / "int-test"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
result = runner.invoke(app, [
|
||||
"init", "--here", "--integration", "copilot", "--script", "sh", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0, f"init failed: {result.output}"
|
||||
assert (project / ".github" / "agents" / "speckit.plan.agent.md").exists()
|
||||
assert (project / ".github" / "prompts" / "speckit.plan.prompt.md").exists()
|
||||
assert (project / ".specify" / "scripts" / "bash" / "common.sh").exists()
|
||||
|
||||
data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8"))
|
||||
assert data["integration"] == "copilot"
|
||||
assert "scripts" in data
|
||||
assert "update-context" in data["scripts"]
|
||||
|
||||
opts = json.loads((project / ".specify" / "init-options.json").read_text(encoding="utf-8"))
|
||||
assert opts["integration"] == "copilot"
|
||||
|
||||
assert (project / ".specify" / "integrations" / "copilot.manifest.json").exists()
|
||||
assert (project / ".specify" / "integrations" / "copilot" / "scripts" / "update-context.sh").exists()
|
||||
|
||||
shared_manifest = project / ".specify" / "integrations" / "speckit.manifest.json"
|
||||
assert shared_manifest.exists()
|
||||
|
||||
def test_ai_copilot_auto_promotes(self, tmp_path):
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
project = tmp_path / "promote-test"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "--here", "--ai", "copilot", "--script", "sh", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0
|
||||
assert "--integration copilot" in result.output
|
||||
assert (project / ".github" / "agents" / "speckit.plan.agent.md").exists()
|
||||
|
||||
def test_shared_infra_skips_existing_files(self, tmp_path):
|
||||
"""Pre-existing shared files are not overwritten by _install_shared_infra."""
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
|
||||
project = tmp_path / "skip-test"
|
||||
project.mkdir()
|
||||
|
||||
# Pre-create a shared script with custom content
|
||||
scripts_dir = project / ".specify" / "scripts" / "bash"
|
||||
scripts_dir.mkdir(parents=True)
|
||||
custom_content = "# user-modified common.sh\n"
|
||||
(scripts_dir / "common.sh").write_text(custom_content, encoding="utf-8")
|
||||
|
||||
# Pre-create a shared template with custom content
|
||||
templates_dir = project / ".specify" / "templates"
|
||||
templates_dir.mkdir(parents=True)
|
||||
custom_template = "# user-modified spec-template\n"
|
||||
(templates_dir / "spec-template.md").write_text(custom_template, encoding="utf-8")
|
||||
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(app, [
|
||||
"init", "--here", "--force",
|
||||
"--integration", "copilot",
|
||||
"--script", "sh",
|
||||
"--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
# User's files should be preserved
|
||||
assert (scripts_dir / "common.sh").read_text(encoding="utf-8") == custom_content
|
||||
assert (templates_dir / "spec-template.md").read_text(encoding="utf-8") == custom_template
|
||||
|
||||
# Other shared files should still be installed
|
||||
assert (scripts_dir / "setup-plan.sh").exists()
|
||||
assert (templates_dir / "plan-template.md").exists()
|
||||
@@ -1,266 +0,0 @@
|
||||
"""Tests for CopilotIntegration."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from specify_cli.integrations import get_integration
|
||||
from specify_cli.integrations.manifest import IntegrationManifest
|
||||
|
||||
|
||||
class TestCopilotIntegration:
|
||||
def test_copilot_key_and_config(self):
|
||||
copilot = get_integration("copilot")
|
||||
assert copilot is not None
|
||||
assert copilot.key == "copilot"
|
||||
assert copilot.config["folder"] == ".github/"
|
||||
assert copilot.config["commands_subdir"] == "agents"
|
||||
assert copilot.registrar_config["extension"] == ".agent.md"
|
||||
assert copilot.context_file == ".github/copilot-instructions.md"
|
||||
|
||||
def test_command_filename_agent_md(self):
|
||||
copilot = get_integration("copilot")
|
||||
assert copilot.command_filename("plan") == "speckit.plan.agent.md"
|
||||
|
||||
def test_setup_creates_agent_md_files(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
assert len(created) > 0
|
||||
agent_files = [f for f in created if ".agent." in f.name]
|
||||
assert len(agent_files) > 0
|
||||
for f in agent_files:
|
||||
assert f.parent == tmp_path / ".github" / "agents"
|
||||
assert f.name.endswith(".agent.md")
|
||||
|
||||
def test_setup_creates_companion_prompts(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
prompt_files = [f for f in created if f.parent.name == "prompts"]
|
||||
assert len(prompt_files) > 0
|
||||
for f in prompt_files:
|
||||
assert f.name.endswith(".prompt.md")
|
||||
content = f.read_text(encoding="utf-8")
|
||||
assert content.startswith("---\nagent: speckit.")
|
||||
|
||||
def test_agent_and_prompt_counts_match(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
agents = [f for f in created if ".agent.md" in f.name]
|
||||
prompts = [f for f in created if ".prompt.md" in f.name]
|
||||
assert len(agents) == len(prompts)
|
||||
|
||||
def test_setup_creates_vscode_settings_new(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
assert copilot._vscode_settings_path() is not None
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
settings = tmp_path / ".vscode" / "settings.json"
|
||||
assert settings.exists()
|
||||
assert settings in created
|
||||
assert any("settings.json" in k for k in m.files)
|
||||
|
||||
def test_setup_merges_existing_vscode_settings(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
vscode_dir = tmp_path / ".vscode"
|
||||
vscode_dir.mkdir(parents=True)
|
||||
existing = {"editor.fontSize": 14, "custom.setting": True}
|
||||
(vscode_dir / "settings.json").write_text(json.dumps(existing, indent=4), encoding="utf-8")
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
settings = tmp_path / ".vscode" / "settings.json"
|
||||
data = json.loads(settings.read_text(encoding="utf-8"))
|
||||
assert data["editor.fontSize"] == 14
|
||||
assert data["custom.setting"] is True
|
||||
assert settings not in created
|
||||
assert not any("settings.json" in k for k in m.files)
|
||||
|
||||
def test_all_created_files_tracked_in_manifest(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.setup(tmp_path, m)
|
||||
for f in created:
|
||||
rel = f.resolve().relative_to(tmp_path.resolve()).as_posix()
|
||||
assert rel in m.files, f"Created file {rel} not tracked in manifest"
|
||||
|
||||
def test_install_uninstall_roundtrip(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.install(tmp_path, m)
|
||||
assert len(created) > 0
|
||||
m.save()
|
||||
for f in created:
|
||||
assert f.exists()
|
||||
removed, skipped = copilot.uninstall(tmp_path, m)
|
||||
assert len(removed) == len(created)
|
||||
assert skipped == []
|
||||
|
||||
def test_modified_file_survives_uninstall(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
created = copilot.install(tmp_path, m)
|
||||
m.save()
|
||||
modified_file = created[0]
|
||||
modified_file.write_text("user modified this", encoding="utf-8")
|
||||
removed, skipped = copilot.uninstall(tmp_path, m)
|
||||
assert modified_file.exists()
|
||||
assert modified_file in skipped
|
||||
|
||||
def test_directory_structure(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
copilot.setup(tmp_path, m)
|
||||
agents_dir = tmp_path / ".github" / "agents"
|
||||
assert agents_dir.is_dir()
|
||||
agent_files = sorted(agents_dir.glob("speckit.*.agent.md"))
|
||||
assert len(agent_files) == 9
|
||||
expected_commands = {
|
||||
"analyze", "checklist", "clarify", "constitution",
|
||||
"implement", "plan", "specify", "tasks", "taskstoissues",
|
||||
}
|
||||
actual_commands = {f.name.removeprefix("speckit.").removesuffix(".agent.md") for f in agent_files}
|
||||
assert actual_commands == expected_commands
|
||||
|
||||
def test_templates_are_processed(self, tmp_path):
|
||||
from specify_cli.integrations.copilot import CopilotIntegration
|
||||
copilot = CopilotIntegration()
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
copilot.setup(tmp_path, m)
|
||||
agents_dir = tmp_path / ".github" / "agents"
|
||||
for agent_file in agents_dir.glob("speckit.*.agent.md"):
|
||||
content = agent_file.read_text(encoding="utf-8")
|
||||
assert "{SCRIPT}" not in content, f"{agent_file.name} has unprocessed {{SCRIPT}}"
|
||||
assert "__AGENT__" not in content, f"{agent_file.name} has unprocessed __AGENT__"
|
||||
assert "{ARGS}" not in content, f"{agent_file.name} has unprocessed {{ARGS}}"
|
||||
assert "\nscripts:\n" not in content
|
||||
assert "\nagent_scripts:\n" not in content
|
||||
|
||||
def test_complete_file_inventory_sh(self, tmp_path):
|
||||
"""Every file produced by specify init --integration copilot --script sh."""
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
project = tmp_path / "inventory-sh"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
result = CliRunner().invoke(app, [
|
||||
"init", "--here", "--integration", "copilot", "--script", "sh", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0
|
||||
actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file())
|
||||
expected = sorted([
|
||||
".github/agents/speckit.analyze.agent.md",
|
||||
".github/agents/speckit.checklist.agent.md",
|
||||
".github/agents/speckit.clarify.agent.md",
|
||||
".github/agents/speckit.constitution.agent.md",
|
||||
".github/agents/speckit.implement.agent.md",
|
||||
".github/agents/speckit.plan.agent.md",
|
||||
".github/agents/speckit.specify.agent.md",
|
||||
".github/agents/speckit.tasks.agent.md",
|
||||
".github/agents/speckit.taskstoissues.agent.md",
|
||||
".github/prompts/speckit.analyze.prompt.md",
|
||||
".github/prompts/speckit.checklist.prompt.md",
|
||||
".github/prompts/speckit.clarify.prompt.md",
|
||||
".github/prompts/speckit.constitution.prompt.md",
|
||||
".github/prompts/speckit.implement.prompt.md",
|
||||
".github/prompts/speckit.plan.prompt.md",
|
||||
".github/prompts/speckit.specify.prompt.md",
|
||||
".github/prompts/speckit.tasks.prompt.md",
|
||||
".github/prompts/speckit.taskstoissues.prompt.md",
|
||||
".vscode/settings.json",
|
||||
".specify/integration.json",
|
||||
".specify/init-options.json",
|
||||
".specify/integrations/copilot.manifest.json",
|
||||
".specify/integrations/speckit.manifest.json",
|
||||
".specify/integrations/copilot/scripts/update-context.ps1",
|
||||
".specify/integrations/copilot/scripts/update-context.sh",
|
||||
".specify/scripts/bash/check-prerequisites.sh",
|
||||
".specify/scripts/bash/common.sh",
|
||||
".specify/scripts/bash/create-new-feature.sh",
|
||||
".specify/scripts/bash/setup-plan.sh",
|
||||
".specify/scripts/bash/update-agent-context.sh",
|
||||
".specify/templates/agent-file-template.md",
|
||||
".specify/templates/checklist-template.md",
|
||||
".specify/templates/constitution-template.md",
|
||||
".specify/templates/plan-template.md",
|
||||
".specify/templates/spec-template.md",
|
||||
".specify/templates/tasks-template.md",
|
||||
".specify/memory/constitution.md",
|
||||
])
|
||||
assert actual == expected, (
|
||||
f"Missing: {sorted(set(expected) - set(actual))}\n"
|
||||
f"Extra: {sorted(set(actual) - set(expected))}"
|
||||
)
|
||||
|
||||
def test_complete_file_inventory_ps(self, tmp_path):
|
||||
"""Every file produced by specify init --integration copilot --script ps."""
|
||||
from typer.testing import CliRunner
|
||||
from specify_cli import app
|
||||
project = tmp_path / "inventory-ps"
|
||||
project.mkdir()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(project)
|
||||
result = CliRunner().invoke(app, [
|
||||
"init", "--here", "--integration", "copilot", "--script", "ps", "--no-git",
|
||||
], catch_exceptions=False)
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
assert result.exit_code == 0
|
||||
actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file())
|
||||
expected = sorted([
|
||||
".github/agents/speckit.analyze.agent.md",
|
||||
".github/agents/speckit.checklist.agent.md",
|
||||
".github/agents/speckit.clarify.agent.md",
|
||||
".github/agents/speckit.constitution.agent.md",
|
||||
".github/agents/speckit.implement.agent.md",
|
||||
".github/agents/speckit.plan.agent.md",
|
||||
".github/agents/speckit.specify.agent.md",
|
||||
".github/agents/speckit.tasks.agent.md",
|
||||
".github/agents/speckit.taskstoissues.agent.md",
|
||||
".github/prompts/speckit.analyze.prompt.md",
|
||||
".github/prompts/speckit.checklist.prompt.md",
|
||||
".github/prompts/speckit.clarify.prompt.md",
|
||||
".github/prompts/speckit.constitution.prompt.md",
|
||||
".github/prompts/speckit.implement.prompt.md",
|
||||
".github/prompts/speckit.plan.prompt.md",
|
||||
".github/prompts/speckit.specify.prompt.md",
|
||||
".github/prompts/speckit.tasks.prompt.md",
|
||||
".github/prompts/speckit.taskstoissues.prompt.md",
|
||||
".vscode/settings.json",
|
||||
".specify/integration.json",
|
||||
".specify/init-options.json",
|
||||
".specify/integrations/copilot.manifest.json",
|
||||
".specify/integrations/speckit.manifest.json",
|
||||
".specify/integrations/copilot/scripts/update-context.ps1",
|
||||
".specify/integrations/copilot/scripts/update-context.sh",
|
||||
".specify/scripts/powershell/check-prerequisites.ps1",
|
||||
".specify/scripts/powershell/common.ps1",
|
||||
".specify/scripts/powershell/create-new-feature.ps1",
|
||||
".specify/scripts/powershell/setup-plan.ps1",
|
||||
".specify/scripts/powershell/update-agent-context.ps1",
|
||||
".specify/templates/agent-file-template.md",
|
||||
".specify/templates/checklist-template.md",
|
||||
".specify/templates/constitution-template.md",
|
||||
".specify/templates/plan-template.md",
|
||||
".specify/templates/spec-template.md",
|
||||
".specify/templates/tasks-template.md",
|
||||
".specify/memory/constitution.md",
|
||||
])
|
||||
assert actual == expected, (
|
||||
f"Missing: {sorted(set(expected) - set(actual))}\n"
|
||||
f"Extra: {sorted(set(actual) - set(expected))}"
|
||||
)
|
||||
@@ -1,245 +0,0 @@
|
||||
"""Tests for IntegrationManifest — record, hash, save, load, uninstall, modified detection."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations.manifest import IntegrationManifest, _sha256
|
||||
|
||||
|
||||
class TestManifestRecordFile:
|
||||
def test_record_file_writes_and_hashes(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
content = "hello world"
|
||||
abs_path = m.record_file("a/b.txt", content)
|
||||
assert abs_path == tmp_path / "a" / "b.txt"
|
||||
assert abs_path.read_text(encoding="utf-8") == content
|
||||
expected_hash = hashlib.sha256(content.encode()).hexdigest()
|
||||
assert m.files["a/b.txt"] == expected_hash
|
||||
|
||||
def test_record_file_bytes(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
data = b"\x00\x01\x02"
|
||||
abs_path = m.record_file("bin.dat", data)
|
||||
assert abs_path.read_bytes() == data
|
||||
assert m.files["bin.dat"] == hashlib.sha256(data).hexdigest()
|
||||
|
||||
def test_record_existing(self, tmp_path):
|
||||
f = tmp_path / "existing.txt"
|
||||
f.write_text("content", encoding="utf-8")
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_existing("existing.txt")
|
||||
assert m.files["existing.txt"] == _sha256(f)
|
||||
|
||||
|
||||
class TestManifestPathTraversal:
|
||||
def test_record_file_rejects_parent_traversal(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="outside"):
|
||||
m.record_file("../escape.txt", "bad")
|
||||
|
||||
def test_record_file_rejects_absolute_path(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="Absolute paths"):
|
||||
m.record_file("/tmp/escape.txt", "bad")
|
||||
|
||||
def test_record_existing_rejects_parent_traversal(self, tmp_path):
|
||||
escape = tmp_path.parent / "escape.txt"
|
||||
escape.write_text("evil", encoding="utf-8")
|
||||
try:
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
with pytest.raises(ValueError, match="outside"):
|
||||
m.record_existing("../escape.txt")
|
||||
finally:
|
||||
escape.unlink(missing_ok=True)
|
||||
|
||||
def test_uninstall_skips_traversal_paths(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("safe.txt", "good")
|
||||
m._files["../outside.txt"] = "fakehash"
|
||||
m.save()
|
||||
removed, skipped = m.uninstall()
|
||||
assert len(removed) == 1
|
||||
assert removed[0].name == "safe.txt"
|
||||
|
||||
|
||||
class TestManifestCheckModified:
|
||||
def test_unmodified_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
assert m.check_modified() == []
|
||||
|
||||
def test_modified_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
(tmp_path / "f.txt").write_text("changed", encoding="utf-8")
|
||||
assert m.check_modified() == ["f.txt"]
|
||||
|
||||
def test_deleted_file_not_reported(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
assert m.check_modified() == []
|
||||
|
||||
def test_symlink_treated_as_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
assert m.check_modified() == ["f.txt"]
|
||||
|
||||
|
||||
class TestManifestUninstall:
|
||||
def test_removes_unmodified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("d/f.txt", "content")
|
||||
m.save()
|
||||
removed, skipped = m.uninstall()
|
||||
assert len(removed) == 1
|
||||
assert not (tmp_path / "d" / "f.txt").exists()
|
||||
assert not (tmp_path / "d").exists()
|
||||
assert skipped == []
|
||||
|
||||
def test_skips_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").write_text("modified", encoding="utf-8")
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert len(skipped) == 1
|
||||
assert (tmp_path / "f.txt").exists()
|
||||
|
||||
def test_force_removes_modified(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").write_text("modified", encoding="utf-8")
|
||||
removed, skipped = m.uninstall(force=True)
|
||||
assert len(removed) == 1
|
||||
assert skipped == []
|
||||
|
||||
def test_already_deleted_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
(tmp_path / "f.txt").unlink()
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert skipped == []
|
||||
|
||||
def test_removes_manifest_file(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path, version="1.0")
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
assert m.manifest_path.exists()
|
||||
m.uninstall()
|
||||
assert not m.manifest_path.exists()
|
||||
|
||||
def test_cleans_empty_parent_dirs(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("a/b/c/f.txt", "content")
|
||||
m.save()
|
||||
m.uninstall()
|
||||
assert not (tmp_path / "a").exists()
|
||||
|
||||
def test_preserves_nonempty_parent_dirs(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("a/b/tracked.txt", "content")
|
||||
(tmp_path / "a" / "b" / "other.txt").write_text("keep", encoding="utf-8")
|
||||
m.save()
|
||||
m.uninstall()
|
||||
assert not (tmp_path / "a" / "b" / "tracked.txt").exists()
|
||||
assert (tmp_path / "a" / "b" / "other.txt").exists()
|
||||
|
||||
def test_symlink_skipped_without_force(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
removed, skipped = m.uninstall()
|
||||
assert removed == []
|
||||
assert len(skipped) == 1
|
||||
|
||||
def test_symlink_removed_with_force(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "original")
|
||||
m.save()
|
||||
target = tmp_path / "target.txt"
|
||||
target.write_text("target", encoding="utf-8")
|
||||
(tmp_path / "f.txt").unlink()
|
||||
(tmp_path / "f.txt").symlink_to(target)
|
||||
removed, skipped = m.uninstall(force=True)
|
||||
assert len(removed) == 1
|
||||
assert target.exists()
|
||||
|
||||
|
||||
class TestManifestPersistence:
|
||||
def test_save_and_load_roundtrip(self, tmp_path):
|
||||
m = IntegrationManifest("myagent", tmp_path, version="2.0.1")
|
||||
m.record_file("dir/file.md", "# Hello")
|
||||
m.save()
|
||||
loaded = IntegrationManifest.load("myagent", tmp_path)
|
||||
assert loaded.key == "myagent"
|
||||
assert loaded.version == "2.0.1"
|
||||
assert loaded.files == m.files
|
||||
|
||||
def test_manifest_path(self, tmp_path):
|
||||
m = IntegrationManifest("copilot", tmp_path)
|
||||
assert m.manifest_path == tmp_path / ".specify" / "integrations" / "copilot.manifest.json"
|
||||
|
||||
def test_load_missing_raises(self, tmp_path):
|
||||
with pytest.raises(FileNotFoundError):
|
||||
IntegrationManifest.load("nonexistent", tmp_path)
|
||||
|
||||
def test_save_creates_directories(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
path = m.save()
|
||||
assert path.exists()
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
assert data["integration"] == "test"
|
||||
|
||||
def test_save_preserves_installed_at(self, tmp_path):
|
||||
m = IntegrationManifest("test", tmp_path)
|
||||
m.record_file("f.txt", "content")
|
||||
m.save()
|
||||
first_ts = m._installed_at
|
||||
m.save()
|
||||
assert m._installed_at == first_ts
|
||||
|
||||
|
||||
class TestManifestLoadValidation:
|
||||
def test_load_non_dict_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text('"just a string"', encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="JSON object"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_bad_files_type_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(json.dumps({"files": ["not", "a", "dict"]}), encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="mapping"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_bad_files_values_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(json.dumps({"files": {"a.txt": 123}}), encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="mapping"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
|
||||
def test_load_invalid_json_raises(self, tmp_path):
|
||||
path = tmp_path / ".specify" / "integrations" / "bad.manifest.json"
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text("{not valid json", encoding="utf-8")
|
||||
with pytest.raises(ValueError, match="invalid JSON"):
|
||||
IntegrationManifest.load("bad", tmp_path)
|
||||
@@ -1,45 +0,0 @@
|
||||
"""Tests for INTEGRATION_REGISTRY."""
|
||||
|
||||
import pytest
|
||||
|
||||
from specify_cli.integrations import (
|
||||
INTEGRATION_REGISTRY,
|
||||
_register,
|
||||
get_integration,
|
||||
)
|
||||
from specify_cli.integrations.base import MarkdownIntegration
|
||||
from .conftest import StubIntegration
|
||||
|
||||
|
||||
class TestRegistry:
|
||||
def test_registry_is_dict(self):
|
||||
assert isinstance(INTEGRATION_REGISTRY, dict)
|
||||
|
||||
def test_register_and_get(self):
|
||||
stub = StubIntegration()
|
||||
_register(stub)
|
||||
try:
|
||||
assert get_integration("stub") is stub
|
||||
finally:
|
||||
INTEGRATION_REGISTRY.pop("stub", None)
|
||||
|
||||
def test_get_missing_returns_none(self):
|
||||
assert get_integration("nonexistent-xyz") is None
|
||||
|
||||
def test_register_empty_key_raises(self):
|
||||
class EmptyKey(MarkdownIntegration):
|
||||
key = ""
|
||||
with pytest.raises(ValueError, match="empty key"):
|
||||
_register(EmptyKey())
|
||||
|
||||
def test_register_duplicate_raises(self):
|
||||
stub = StubIntegration()
|
||||
_register(stub)
|
||||
try:
|
||||
with pytest.raises(KeyError, match="already registered"):
|
||||
_register(StubIntegration())
|
||||
finally:
|
||||
INTEGRATION_REGISTRY.pop("stub", None)
|
||||
|
||||
def test_copilot_registered(self):
|
||||
assert "copilot" in INTEGRATION_REGISTRY
|
||||
@@ -1,96 +0,0 @@
|
||||
"""Tests for check_tool() — Claude Code CLI detection across install methods.
|
||||
|
||||
Covers issue https://github.com/github/spec-kit/issues/550:
|
||||
`specify check` reports "Claude Code CLI (not found)" even when claude is
|
||||
installed via npm-local (the default `claude` installer path).
|
||||
"""
|
||||
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from specify_cli import check_tool
|
||||
|
||||
|
||||
class TestCheckToolClaude:
|
||||
"""Claude CLI detection must work for all install methods."""
|
||||
|
||||
def test_detected_via_migrate_installer_path(self, tmp_path):
|
||||
"""claude migrate-installer puts binary at ~/.claude/local/claude."""
|
||||
fake_claude = tmp_path / "claude"
|
||||
fake_claude.touch()
|
||||
|
||||
# Ensure npm-local path is missing so we only exercise migrate-installer path
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_claude), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_detected_via_npm_local_path(self, tmp_path):
|
||||
"""npm-local install puts binary at ~/.claude/local/node_modules/.bin/claude."""
|
||||
fake_npm_claude = tmp_path / "node_modules" / ".bin" / "claude"
|
||||
fake_npm_claude.parent.mkdir(parents=True)
|
||||
fake_npm_claude.touch()
|
||||
|
||||
# Neither the migrate-installer path nor PATH has claude
|
||||
fake_migrate = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_migrate), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_npm_claude), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_detected_via_path(self, tmp_path):
|
||||
"""claude on PATH (global npm install) should still work."""
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value="/usr/local/bin/claude"):
|
||||
assert check_tool("claude") is True
|
||||
|
||||
def test_not_found_when_nowhere(self, tmp_path):
|
||||
"""Should return False when claude is genuinely not installed."""
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_missing), \
|
||||
patch("shutil.which", return_value=None):
|
||||
assert check_tool("claude") is False
|
||||
|
||||
def test_tracker_updated_on_npm_local_detection(self, tmp_path):
|
||||
"""StepTracker should be marked 'available' for npm-local installs."""
|
||||
fake_npm_claude = tmp_path / "node_modules" / ".bin" / "claude"
|
||||
fake_npm_claude.parent.mkdir(parents=True)
|
||||
fake_npm_claude.touch()
|
||||
|
||||
fake_missing = tmp_path / "nonexistent" / "claude"
|
||||
tracker = MagicMock()
|
||||
|
||||
with patch("specify_cli.CLAUDE_LOCAL_PATH", fake_missing), \
|
||||
patch("specify_cli.CLAUDE_NPM_LOCAL_PATH", fake_npm_claude), \
|
||||
patch("shutil.which", return_value=None):
|
||||
result = check_tool("claude", tracker=tracker)
|
||||
|
||||
assert result is True
|
||||
tracker.complete.assert_called_once_with("claude", "available")
|
||||
|
||||
|
||||
class TestCheckToolOther:
|
||||
"""Non-Claude tools should be unaffected by the fix."""
|
||||
|
||||
def test_git_detected_via_path(self):
|
||||
with patch("shutil.which", return_value="/usr/bin/git"):
|
||||
assert check_tool("git") is True
|
||||
|
||||
def test_missing_tool(self):
|
||||
with patch("shutil.which", return_value=None):
|
||||
assert check_tool("nonexistent-tool") is False
|
||||
|
||||
def test_kiro_fallback(self):
|
||||
"""kiro-cli detection should try both kiro-cli and kiro."""
|
||||
def fake_which(name):
|
||||
return "/usr/bin/kiro" if name == "kiro" else None
|
||||
|
||||
with patch("shutil.which", side_effect=fake_which):
|
||||
assert check_tool("kiro-cli") is True
|
||||
@@ -18,7 +18,6 @@ from datetime import datetime, timezone
|
||||
|
||||
from specify_cli.extensions import (
|
||||
CatalogEntry,
|
||||
CORE_COMMAND_NAMES,
|
||||
ExtensionManifest,
|
||||
ExtensionRegistry,
|
||||
ExtensionManager,
|
||||
@@ -64,7 +63,7 @@ def valid_manifest_data():
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test-ext.hello",
|
||||
"name": "speckit.test.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test command",
|
||||
}
|
||||
@@ -72,7 +71,7 @@ def valid_manifest_data():
|
||||
},
|
||||
"hooks": {
|
||||
"after_tasks": {
|
||||
"command": "speckit.test-ext.hello",
|
||||
"command": "speckit.test.hello",
|
||||
"optional": True,
|
||||
"prompt": "Run test?",
|
||||
}
|
||||
@@ -190,18 +189,7 @@ class TestExtensionManifest:
|
||||
assert manifest.version == "1.0.0"
|
||||
assert manifest.description == "A test extension"
|
||||
assert len(manifest.commands) == 1
|
||||
assert manifest.commands[0]["name"] == "speckit.test-ext.hello"
|
||||
|
||||
def test_core_command_names_match_bundled_templates(self):
|
||||
"""Core command reservations should stay aligned with bundled templates."""
|
||||
commands_dir = Path(__file__).resolve().parent.parent / "templates" / "commands"
|
||||
expected = {
|
||||
command_file.stem
|
||||
for command_file in commands_dir.iterdir()
|
||||
if command_file.is_file() and command_file.suffix == ".md"
|
||||
}
|
||||
|
||||
assert CORE_COMMAND_NAMES == expected
|
||||
assert manifest.commands[0]["name"] == "speckit.test.hello"
|
||||
|
||||
def test_missing_required_field(self, temp_dir):
|
||||
"""Test manifest missing required field."""
|
||||
@@ -601,172 +589,6 @@ class TestExtensionManager:
|
||||
with pytest.raises(ExtensionError, match="already installed"):
|
||||
manager.install_from_directory(extension_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_extension_id_in_core_namespace(self, temp_dir, project_dir):
|
||||
"""Install should reject extension IDs that shadow core commands."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "analyze-ext"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "analyze",
|
||||
"name": "Analyze Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.analyze.extra",
|
||||
"file": "commands/cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="conflicts with core command namespace"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_alias_without_extension_namespace(self, temp_dir, project_dir):
|
||||
"""Install should reject legacy short aliases that can shadow core commands."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "alias-shortcut"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "alias-shortcut",
|
||||
"name": "Alias Shortcut",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.alias-shortcut.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="Invalid alias 'speckit.shortcut'"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_namespace_squatting(self, temp_dir, project_dir):
|
||||
"""Install should reject commands and aliases outside the extension namespace."""
|
||||
import yaml
|
||||
|
||||
ext_dir = temp_dir / "squat-ext"
|
||||
ext_dir.mkdir()
|
||||
(ext_dir / "commands").mkdir()
|
||||
|
||||
manifest_data = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "squat-ext",
|
||||
"name": "Squat Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.other-ext.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.squat-ext.ok"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
(ext_dir / "extension.yml").write_text(yaml.dump(manifest_data))
|
||||
(ext_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
with pytest.raises(ValidationError, match="must use extension namespace 'squat-ext'"):
|
||||
manager.install_from_directory(ext_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_install_rejects_command_collision_with_installed_extension(self, temp_dir, project_dir):
|
||||
"""Install should reject names already claimed by an installed legacy extension."""
|
||||
import yaml
|
||||
|
||||
first_dir = temp_dir / "ext-one"
|
||||
first_dir.mkdir()
|
||||
(first_dir / "commands").mkdir()
|
||||
first_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "ext-one",
|
||||
"name": "Extension One",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-one.sync",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.shared.sync"],
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
(first_dir / "extension.yml").write_text(yaml.dump(first_manifest))
|
||||
(first_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
installed_ext_dir = project_dir / ".specify" / "extensions" / "ext-one"
|
||||
installed_ext_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copytree(first_dir, installed_ext_dir)
|
||||
|
||||
second_dir = temp_dir / "ext-two"
|
||||
second_dir.mkdir()
|
||||
(second_dir / "commands").mkdir()
|
||||
second_manifest = {
|
||||
"schema_version": "1.0",
|
||||
"extension": {
|
||||
"id": "shared",
|
||||
"name": "Shared Extension",
|
||||
"version": "1.0.0",
|
||||
"description": "Test",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.shared.sync",
|
||||
"file": "commands/cmd.md",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
(second_dir / "extension.yml").write_text(yaml.dump(second_manifest))
|
||||
(second_dir / "commands" / "cmd.md").write_text("---\ndescription: Test\n---\n\nBody")
|
||||
|
||||
manager = ExtensionManager(project_dir)
|
||||
manager.registry.add("ext-one", {"version": "1.0.0", "source": "local"})
|
||||
|
||||
with pytest.raises(ValidationError, match="already provided by extension 'ext-one'"):
|
||||
manager.install_from_directory(second_dir, "0.1.0", register_commands=False)
|
||||
|
||||
def test_remove_extension(self, extension_dir, project_dir):
|
||||
"""Test removing an installed extension."""
|
||||
manager = ExtensionManager(project_dir)
|
||||
@@ -1030,10 +852,10 @@ $ARGUMENTS
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test-ext.hello" in registered
|
||||
assert "speckit.test.hello" in registered
|
||||
|
||||
# Check command file was created
|
||||
cmd_file = claude_dir / "speckit.test-ext.hello.md"
|
||||
cmd_file = claude_dir / "speckit.test.hello.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
@@ -1063,9 +885,9 @@ $ARGUMENTS
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-alias.cmd",
|
||||
"name": "speckit.alias.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.ext-alias.shortcut"],
|
||||
"aliases": ["speckit.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1085,10 +907,10 @@ $ARGUMENTS
|
||||
registered = registrar.register_commands_for_claude(manifest, ext_dir, project_dir)
|
||||
|
||||
assert len(registered) == 2
|
||||
assert "speckit.ext-alias.cmd" in registered
|
||||
assert "speckit.ext-alias.shortcut" in registered
|
||||
assert (claude_dir / "speckit.ext-alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.ext-alias.shortcut.md").exists()
|
||||
assert "speckit.alias.cmd" in registered
|
||||
assert "speckit.shortcut" in registered
|
||||
assert (claude_dir / "speckit.alias.cmd.md").exists()
|
||||
assert (claude_dir / "speckit.shortcut.md").exists()
|
||||
|
||||
def test_unregister_commands_for_codex_skills_uses_mapped_names(self, project_dir):
|
||||
"""Codex skill cleanup should use the same mapped names as registration."""
|
||||
@@ -1129,11 +951,11 @@ $ARGUMENTS
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, extension_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-test-ext-hello" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-test-hello" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
assert "name: speckit-test-ext-hello" in content
|
||||
assert "name: speckit-test-hello" in content
|
||||
assert "description: Test hello command" in content
|
||||
assert "compatibility:" in content
|
||||
assert "metadata:" in content
|
||||
@@ -1160,7 +982,7 @@ $ARGUMENTS
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-scripted.plan",
|
||||
"name": "speckit.test.plan",
|
||||
"file": "commands/plan.md",
|
||||
"description": "Scripted command",
|
||||
}
|
||||
@@ -1198,7 +1020,7 @@ Agent __AGENT__
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-ext-scripted-plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-test-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1229,9 +1051,9 @@ Agent __AGENT__
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-alias-skill.cmd",
|
||||
"name": "speckit.alias.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.ext-alias-skill.shortcut"],
|
||||
"aliases": ["speckit.shortcut"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1248,13 +1070,13 @@ Agent __AGENT__
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
primary = skills_dir / "speckit-ext-alias-skill-cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-ext-alias-skill-shortcut" / "SKILL.md"
|
||||
primary = skills_dir / "speckit-alias-cmd" / "SKILL.md"
|
||||
alias = skills_dir / "speckit-shortcut" / "SKILL.md"
|
||||
|
||||
assert primary.exists()
|
||||
assert alias.exists()
|
||||
assert "name: speckit-ext-alias-skill-cmd" in primary.read_text()
|
||||
assert "name: speckit-ext-alias-skill-shortcut" in alias.read_text()
|
||||
assert "name: speckit-alias-cmd" in primary.read_text()
|
||||
assert "name: speckit-shortcut" in alias.read_text()
|
||||
|
||||
def test_codex_skill_registration_uses_fallback_script_variant_without_init_options(
|
||||
self, project_dir, temp_dir
|
||||
@@ -1278,7 +1100,7 @@ Agent __AGENT__
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-script-fallback.plan",
|
||||
"name": "speckit.fallback.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1310,7 +1132,7 @@ Then {AGENT_SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-ext-script-fallback-plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-fallback-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1341,7 +1163,7 @@ Then {AGENT_SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-script-list-init.plan",
|
||||
"name": "speckit.list.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1372,7 +1194,7 @@ Run {SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
content = (skills_dir / "speckit-ext-script-list-init-plan" / "SKILL.md").read_text()
|
||||
content = (skills_dir / "speckit-list-plan" / "SKILL.md").read_text()
|
||||
assert '.specify/scripts/bash/setup-plan.sh --json "$ARGUMENTS"' in content
|
||||
|
||||
def test_codex_skill_registration_fallback_prefers_powershell_on_windows(
|
||||
@@ -1399,7 +1221,7 @@ Run {SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-script-windows-fallback.plan",
|
||||
"name": "speckit.windows.plan",
|
||||
"file": "commands/plan.md",
|
||||
}
|
||||
]
|
||||
@@ -1431,7 +1253,7 @@ Then {AGENT_SCRIPT}
|
||||
registrar = CommandRegistrar()
|
||||
registrar.register_commands_for_agent("codex", manifest, ext_dir, project_dir)
|
||||
|
||||
skill_file = skills_dir / "speckit-ext-script-windows-fallback-plan" / "SKILL.md"
|
||||
skill_file = skills_dir / "speckit-windows-plan" / "SKILL.md"
|
||||
assert skill_file.exists()
|
||||
|
||||
content = skill_file.read_text()
|
||||
@@ -1453,14 +1275,14 @@ Then {AGENT_SCRIPT}
|
||||
)
|
||||
|
||||
assert len(registered) == 1
|
||||
assert "speckit.test-ext.hello" in registered
|
||||
assert "speckit.test.hello" in registered
|
||||
|
||||
# Verify command file uses .agent.md extension
|
||||
cmd_file = agents_dir / "speckit.test-ext.hello.agent.md"
|
||||
cmd_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify NO plain .md file was created
|
||||
plain_md_file = agents_dir / "speckit.test-ext.hello.md"
|
||||
plain_md_file = agents_dir / "speckit.test.hello.md"
|
||||
assert not plain_md_file.exists()
|
||||
|
||||
content = cmd_file.read_text()
|
||||
@@ -1480,12 +1302,12 @@ Then {AGENT_SCRIPT}
|
||||
)
|
||||
|
||||
# Verify companion .prompt.md file exists
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test-ext.hello.prompt.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
assert prompt_file.exists()
|
||||
|
||||
# Verify content has correct agent frontmatter
|
||||
content = prompt_file.read_text()
|
||||
assert content == "---\nagent: speckit.test-ext.hello\n---\n"
|
||||
assert content == "---\nagent: speckit.test.hello\n---\n"
|
||||
|
||||
def test_copilot_aliases_get_companion_prompts(self, project_dir, temp_dir):
|
||||
"""Test that aliases also get companion .prompt.md files for Copilot."""
|
||||
@@ -1506,9 +1328,9 @@ Then {AGENT_SCRIPT}
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.ext-alias-copilot.cmd",
|
||||
"name": "speckit.alias-copilot.cmd",
|
||||
"file": "commands/cmd.md",
|
||||
"aliases": ["speckit.ext-alias-copilot.shortcut"],
|
||||
"aliases": ["speckit.shortcut-copilot"],
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1535,8 +1357,8 @@ Then {AGENT_SCRIPT}
|
||||
|
||||
# Both primary and alias get companion .prompt.md
|
||||
prompts_dir = project_dir / ".github" / "prompts"
|
||||
assert (prompts_dir / "speckit.ext-alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.ext-alias-copilot.shortcut.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.alias-copilot.cmd.prompt.md").exists()
|
||||
assert (prompts_dir / "speckit.shortcut-copilot.prompt.md").exists()
|
||||
|
||||
def test_non_copilot_agent_no_companion_file(self, extension_dir, project_dir):
|
||||
"""Test that non-copilot agents do NOT create .prompt.md files."""
|
||||
@@ -1609,7 +1431,7 @@ class TestIntegration:
|
||||
assert installed[0]["id"] == "test-ext"
|
||||
|
||||
# Verify command registered
|
||||
cmd_file = project_dir / ".claude" / "commands" / "speckit.test-ext.hello.md"
|
||||
cmd_file = project_dir / ".claude" / "commands" / "speckit.test.hello.md"
|
||||
assert cmd_file.exists()
|
||||
|
||||
# Verify registry has registered commands (now a dict keyed by agent)
|
||||
@@ -1617,7 +1439,7 @@ class TestIntegration:
|
||||
registered_commands = metadata["registered_commands"]
|
||||
# Check that the command is registered for at least one agent
|
||||
assert any(
|
||||
"speckit.test-ext.hello" in cmds
|
||||
"speckit.test.hello" in cmds
|
||||
for cmds in registered_commands.values()
|
||||
)
|
||||
|
||||
@@ -1643,8 +1465,8 @@ class TestIntegration:
|
||||
assert "copilot" in metadata["registered_commands"]
|
||||
|
||||
# Verify files exist before cleanup
|
||||
agent_file = agents_dir / "speckit.test-ext.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test-ext.hello.prompt.md"
|
||||
agent_file = agents_dir / "speckit.test.hello.agent.md"
|
||||
prompt_file = project_dir / ".github" / "prompts" / "speckit.test.hello.prompt.md"
|
||||
assert agent_file.exists()
|
||||
assert prompt_file.exists()
|
||||
|
||||
@@ -2954,7 +2776,7 @@ class TestExtensionUpdateCLI:
|
||||
"provides": {
|
||||
"commands": [
|
||||
{
|
||||
"name": "speckit.test-ext.hello",
|
||||
"name": "speckit.test.hello",
|
||||
"file": "commands/hello.md",
|
||||
"description": "Test command",
|
||||
}
|
||||
@@ -2962,7 +2784,7 @@ class TestExtensionUpdateCLI:
|
||||
},
|
||||
"hooks": {
|
||||
"after_tasks": {
|
||||
"command": "speckit.test-ext.hello",
|
||||
"command": "speckit.test.hello",
|
||||
"optional": True,
|
||||
}
|
||||
},
|
||||
@@ -2991,7 +2813,7 @@ class TestExtensionUpdateCLI:
|
||||
"description": "A test extension",
|
||||
},
|
||||
"requires": {"speckit_version": ">=0.1.0"},
|
||||
"provides": {"commands": [{"name": "speckit.test-ext.hello", "file": "commands/hello.md"}]},
|
||||
"provides": {"commands": [{"name": "speckit.test.hello", "file": "commands/hello.md"}]},
|
||||
}
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||
@@ -3620,15 +3442,15 @@ class TestHookInvocationRendering:
|
||||
[
|
||||
{
|
||||
"extension": "test-ext",
|
||||
"command": "speckit.test-ext.hello",
|
||||
"command": "speckit.test.hello",
|
||||
"optional": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert "Executing: `/skill:speckit-test-ext-hello`" in message
|
||||
assert "EXECUTE_COMMAND: speckit.test-ext.hello" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /skill:speckit-test-ext-hello" in message
|
||||
assert "Executing: `/skill:speckit-test-hello`" in message
|
||||
assert "EXECUTE_COMMAND: speckit.test.hello" in message
|
||||
assert "EXECUTE_COMMAND_INVOCATION: /skill:speckit-test-hello" in message
|
||||
|
||||
def test_hook_executor_caches_init_options_lookup(self, project_dir, monkeypatch):
|
||||
"""Init options should be loaded once per executor instance."""
|
||||
|
||||
@@ -269,146 +269,3 @@ class TestE2EFlow:
|
||||
assert (git_repo / "specs" / branch).is_dir()
|
||||
val = source_and_call(f'check_feature_branch "{branch}" "true"')
|
||||
assert val.returncode == 0
|
||||
|
||||
|
||||
# ── Allow Existing Branch Tests ──────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestAllowExistingBranch:
|
||||
def test_allow_existing_switches_to_branch(self, git_repo: Path):
|
||||
"""T006: Pre-create branch, verify script switches to it."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "004-pre-exist"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "pre-exist",
|
||||
"--number", "4", "Pre-existing feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo, capture_output=True, text=True,
|
||||
).stdout.strip()
|
||||
assert current == "004-pre-exist", f"expected 004-pre-exist, got {current}"
|
||||
|
||||
def test_allow_existing_already_on_branch(self, git_repo: Path):
|
||||
"""T007: Verify success when already on the target branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "005-already-on"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "already-on",
|
||||
"--number", "5", "Already on branch",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
|
||||
def test_allow_existing_creates_spec_dir(self, git_repo: Path):
|
||||
"""T008: Verify spec directory created on existing branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "006-spec-dir"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "spec-dir",
|
||||
"--number", "6", "Spec dir feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert (git_repo / "specs" / "006-spec-dir").is_dir()
|
||||
assert (git_repo / "specs" / "006-spec-dir" / "spec.md").exists()
|
||||
|
||||
def test_without_flag_still_errors(self, git_repo: Path):
|
||||
"""T009: Verify backwards compatibility (error without flag)."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "007-no-flag"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--short-name", "no-flag", "--number", "7", "No flag feature",
|
||||
)
|
||||
assert result.returncode != 0, "should fail without --allow-existing-branch"
|
||||
assert "already exists" in result.stderr
|
||||
|
||||
def test_allow_existing_no_overwrite_spec(self, git_repo: Path):
|
||||
"""T010: Pre-create spec.md with content, verify it is preserved."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "008-no-overwrite"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
spec_dir = git_repo / "specs" / "008-no-overwrite"
|
||||
spec_dir.mkdir(parents=True)
|
||||
spec_file = spec_dir / "spec.md"
|
||||
spec_file.write_text("# My custom spec content\n")
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "no-overwrite",
|
||||
"--number", "8", "No overwrite feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
assert spec_file.read_text() == "# My custom spec content\n"
|
||||
|
||||
def test_allow_existing_creates_branch_if_not_exists(self, git_repo: Path):
|
||||
"""T011: Verify normal creation when branch doesn't exist."""
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--short-name", "new-branch",
|
||||
"New branch feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=git_repo, capture_output=True, text=True,
|
||||
).stdout.strip()
|
||||
assert "new-branch" in current
|
||||
|
||||
def test_allow_existing_with_json(self, git_repo: Path):
|
||||
"""T012: Verify JSON output is correct."""
|
||||
import json
|
||||
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", "009-json-test"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "checkout", "-"],
|
||||
cwd=git_repo, check=True, capture_output=True,
|
||||
)
|
||||
result = run_script(
|
||||
git_repo, "--allow-existing-branch", "--json", "--short-name", "json-test",
|
||||
"--number", "9", "JSON test",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
data = json.loads(result.stdout)
|
||||
assert data["BRANCH_NAME"] == "009-json-test"
|
||||
|
||||
def test_allow_existing_no_git(self, no_git_dir: Path):
|
||||
"""T013: Verify flag is silently ignored in non-git repos."""
|
||||
result = run_script(
|
||||
no_git_dir, "--allow-existing-branch", "--short-name", "no-git",
|
||||
"No git feature",
|
||||
)
|
||||
assert result.returncode == 0, result.stderr
|
||||
|
||||
|
||||
class TestAllowExistingBranchPowerShell:
|
||||
def test_powershell_supports_allow_existing_branch_flag(self):
|
||||
"""Static guard: PS script exposes and uses -AllowExistingBranch."""
|
||||
contents = CREATE_FEATURE_PS.read_text(encoding="utf-8")
|
||||
assert "-AllowExistingBranch" in contents
|
||||
# Ensure the flag is referenced in script logic, not just declared
|
||||
assert "AllowExistingBranch" in contents.replace("-AllowExistingBranch", "")
|
||||
|
||||
Reference in New Issue
Block a user