Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c9d9a40ac | ||
|
|
ee6b83c1dd | ||
|
|
b31ca19962 | ||
|
|
15917c2094 | ||
|
|
f89361cd3d | ||
|
|
0f0e19da33 | ||
|
|
21b3dbf904 | ||
|
|
708e887022 | ||
|
|
78e6c9953c | ||
|
|
e21820fb92 | ||
|
|
51705217d4 | ||
|
|
e979ef0c7c | ||
|
|
5d1a174a95 | ||
|
|
f13eb86c0f | ||
|
|
6e2af26867 | ||
|
|
167038ca3c | ||
|
|
9140e9b009 | ||
|
|
fc8eb0434a | ||
|
|
fd61b8742d | ||
|
|
4591cf7df6 | ||
|
|
03ee3401e7 | ||
|
|
4b98c20f5d |
67
.github/workflows/docs.yml
vendored
Normal file
67
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# Build and deploy DocFX documentation to GitHub Pages
|
||||
name: Deploy Documentation to Pages
|
||||
|
||||
on:
|
||||
# Runs on pushes targeting the default branch
|
||||
push:
|
||||
branches: ["main"]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for git info
|
||||
|
||||
- name: Setup .NET
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: '8.x'
|
||||
|
||||
- name: Setup DocFX
|
||||
run: dotnet tool install -g docfx
|
||||
|
||||
- name: Build with DocFX
|
||||
run: |
|
||||
cd docs
|
||||
docfx docfx.json
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: 'docs/_site'
|
||||
|
||||
# Deploy job
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
191
.github/workflows/manual-release.yml
vendored
191
.github/workflows/manual-release.yml
vendored
@@ -1,191 +0,0 @@
|
||||
name: Manual Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_bump:
|
||||
description: 'Version bump type'
|
||||
required: true
|
||||
default: 'patch'
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
|
||||
jobs:
|
||||
manual_release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Calculate new version
|
||||
id: version
|
||||
run: |
|
||||
# Get the latest tag, or use v0.0.0 if no tags exist
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
# Extract version number
|
||||
VERSION=$(echo $LATEST_TAG | sed 's/v//')
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$VERSION"
|
||||
MAJOR=${VERSION_PARTS[0]:-0}
|
||||
MINOR=${VERSION_PARTS[1]:-0}
|
||||
PATCH=${VERSION_PARTS[2]:-0}
|
||||
|
||||
# Increment based on input
|
||||
case "${{ github.event.inputs.version_bump }}" in
|
||||
"major")
|
||||
MAJOR=$((MAJOR + 1))
|
||||
MINOR=0
|
||||
PATCH=0
|
||||
;;
|
||||
"minor")
|
||||
MINOR=$((MINOR + 1))
|
||||
PATCH=0
|
||||
;;
|
||||
"patch")
|
||||
PATCH=$((PATCH + 1))
|
||||
;;
|
||||
esac
|
||||
|
||||
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "New version will be: $NEW_VERSION (was $LATEST_TAG)"
|
||||
|
||||
- name: Create release package
|
||||
run: |
|
||||
# Create base package directory structure
|
||||
mkdir -p sdd-package-base
|
||||
|
||||
# Copy common folders to base
|
||||
echo "Packaging SDD common components..."
|
||||
|
||||
if [ -d "memory" ]; then
|
||||
cp -r memory sdd-package-base/
|
||||
echo "✓ Copied memory folder ($(find memory -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ memory folder not found"
|
||||
fi
|
||||
|
||||
if [ -d "scripts" ]; then
|
||||
cp -r scripts sdd-package-base/
|
||||
echo "✓ Copied scripts folder ($(find scripts -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ scripts folder not found"
|
||||
fi
|
||||
|
||||
# Create Claude Code package
|
||||
echo "Creating Claude Code package..."
|
||||
mkdir -p sdd-claude-package
|
||||
cp -r sdd-package-base/* sdd-claude-package/
|
||||
if [ -d "agent_templates/claude" ]; then
|
||||
cp -r agent_templates/claude sdd-claude-package/.claude
|
||||
echo "✓ Added Claude Code commands ($(find agent_templates/claude -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ agent_templates/claude folder not found"
|
||||
fi
|
||||
|
||||
# Create Gemini CLI package
|
||||
echo "Creating Gemini CLI package..."
|
||||
mkdir -p sdd-gemini-package
|
||||
cp -r sdd-package-base/* sdd-gemini-package/
|
||||
if [ -d "agent_templates/gemini" ]; then
|
||||
cp -r agent_templates/gemini sdd-gemini-package/.gemini
|
||||
# Move GEMINI.md to root for easier access
|
||||
if [ -f "sdd-gemini-package/.gemini/GEMINI.md" ]; then
|
||||
mv sdd-gemini-package/.gemini/GEMINI.md sdd-gemini-package/GEMINI.md
|
||||
echo "✓ Moved GEMINI.md to root of Gemini package"
|
||||
fi
|
||||
# Remove empty .gemini folder if it only contained GEMINI.md
|
||||
if [ -d "sdd-gemini-package/.gemini" ] && [ -z "$(find sdd-gemini-package/.gemini -type f)" ]; then
|
||||
rm -rf sdd-gemini-package/.gemini
|
||||
echo "✓ Removed empty .gemini folder"
|
||||
fi
|
||||
echo "✓ Added Gemini CLI commands ($(find agent_templates/gemini -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ agent_templates/gemini folder not found"
|
||||
fi
|
||||
|
||||
# Create GitHub Copilot package
|
||||
echo "Creating GitHub Copilot package..."
|
||||
mkdir -p sdd-copilot-package
|
||||
cp -r sdd-package-base/* sdd-copilot-package/
|
||||
if [ -d "agent_templates/copilot" ]; then
|
||||
mkdir -p sdd-copilot-package/.github
|
||||
cp -r agent_templates/copilot/* sdd-copilot-package/.github/
|
||||
echo "✓ Added Copilot instructions to .github ($(find agent_templates/copilot -type f | wc -l) files)"
|
||||
else
|
||||
echo "⚠️ agent_templates/copilot folder not found"
|
||||
fi
|
||||
|
||||
# Create archive files for each package
|
||||
echo "Creating archive files..."
|
||||
cd sdd-claude-package && zip -r ../spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
echo ""
|
||||
echo "📦 Packages created:"
|
||||
echo "Claude: $(ls -lh spec-kit-template-claude-*.zip | awk '{print $5}')"
|
||||
echo "Gemini: $(ls -lh spec-kit-template-gemini-*.zip | awk '{print $5}')"
|
||||
echo "Copilot: $(ls -lh spec-kit-template-copilot-*.zip | awk '{print $5}')"
|
||||
echo "Copilot: $(ls -lh sdd-template-copilot-*.zip | awk '{print $5}')"
|
||||
|
||||
- name: Generate detailed release notes
|
||||
run: |
|
||||
LAST_TAG=${{ steps.version.outputs.latest_tag }}
|
||||
|
||||
# Get commit range
|
||||
if [ "$LAST_TAG" = "v0.0.0" ]; then
|
||||
COMMIT_RANGE="HEAD~10..HEAD"
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $COMMIT_RANGE 2>/dev/null || echo "- Initial release")
|
||||
else
|
||||
COMMIT_RANGE="$LAST_TAG..HEAD"
|
||||
COMMITS=$(git log --oneline --pretty=format:"- %s" $COMMIT_RANGE 2>/dev/null || echo "- No changes since last release")
|
||||
fi
|
||||
|
||||
# Count files in each directory
|
||||
CLAUDE_COUNT=$(find agent_templates/claude -type f 2>/dev/null | wc -l || echo "0")
|
||||
GEMINI_COUNT=$(find agent_templates/gemini -type f 2>/dev/null | wc -l || echo "0")
|
||||
COPILOT_COUNT=$(find agent_templates/copilot -type f 2>/dev/null | wc -l || echo "0")
|
||||
MEMORY_COUNT=$(find memory -type f 2>/dev/null | wc -l || echo "0")
|
||||
SCRIPTS_COUNT=$(find scripts -type f 2>/dev/null | wc -l || echo "0")
|
||||
|
||||
cat > release_notes.md << EOF
|
||||
Template release ${{ steps.version.outputs.new_version }}
|
||||
|
||||
Updated specification-driven development templates for GitHub Copilot, Claude Code, and Gemini CLI.
|
||||
|
||||
Download the template for your preferred AI assistant:
|
||||
- spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip
|
||||
- spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip
|
||||
- spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip
|
||||
|
||||
Changes since $LAST_TAG:
|
||||
$COMMITS
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
run: |
|
||||
# Remove 'v' prefix from version for release title
|
||||
VERSION_NO_V=${{ steps.version.outputs.new_version }}
|
||||
VERSION_NO_V=${VERSION_NO_V#v}
|
||||
|
||||
gh release create ${{ steps.version.outputs.new_version }} \
|
||||
spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip \
|
||||
spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip \
|
||||
spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip \
|
||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||
--notes-file release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
105
.github/workflows/release.yml
vendored
105
.github/workflows/release.yml
vendored
@@ -3,6 +3,10 @@ name: Create Release
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'memory/**'
|
||||
- 'scripts/**'
|
||||
- 'templates/**'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -57,105 +61,8 @@ jobs:
|
||||
- name: Create release package
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
run: |
|
||||
# Create base package directory structure
|
||||
mkdir -p sdd-package-base
|
||||
|
||||
# Copy common folders to base
|
||||
if [ -d "memory" ]; then
|
||||
cp -r memory sdd-package-base/
|
||||
echo "Copied memory folder"
|
||||
fi
|
||||
|
||||
if [ -d "scripts" ]; then
|
||||
cp -r scripts sdd-package-base/
|
||||
echo "Copied scripts folder"
|
||||
fi
|
||||
|
||||
if [ -d "templates" ]; then
|
||||
mkdir -p sdd-package-base/templates
|
||||
# Copy templates folder but exclude the commands directory
|
||||
find templates -type f -not -path "templates/commands/*" -exec cp --parents {} sdd-package-base/ \;
|
||||
echo "Copied templates folder (excluding commands directory)"
|
||||
fi
|
||||
|
||||
# Generate command files for each agent from source templates
|
||||
generate_commands() {
|
||||
local agent=$1
|
||||
local ext=$2
|
||||
local arg_format=$3
|
||||
local output_dir=$4
|
||||
|
||||
mkdir -p "$output_dir"
|
||||
|
||||
for template in templates/commands/*.md; do
|
||||
if [[ -f "$template" ]]; then
|
||||
name=$(basename "$template" .md)
|
||||
description=$(awk '/^description:/ {gsub(/^description: *"?/, ""); gsub(/"$/, ""); print; exit}' "$template" | tr -d '\r')
|
||||
content=$(awk '/^---$/{if(++count==2) start=1; next} start' "$template" | sed "s/{ARGS}/$arg_format/g")
|
||||
|
||||
case $ext in
|
||||
"toml")
|
||||
{
|
||||
echo "description = \"$description\""
|
||||
echo ""
|
||||
echo "prompt = \"\"\""
|
||||
echo "$content"
|
||||
echo "\"\"\""
|
||||
} > "$output_dir/$name.$ext"
|
||||
;;
|
||||
"md")
|
||||
echo "$content" > "$output_dir/$name.$ext"
|
||||
;;
|
||||
"prompt.md")
|
||||
{
|
||||
echo "# $(echo "$description" | sed 's/\. .*//')"
|
||||
echo ""
|
||||
echo "$content"
|
||||
} > "$output_dir/$name.$ext"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Create Claude Code package
|
||||
mkdir -p sdd-claude-package
|
||||
cp -r sdd-package-base/* sdd-claude-package/
|
||||
mkdir -p sdd-claude-package/.claude/commands
|
||||
generate_commands "claude" "md" "\$ARGUMENTS" "sdd-claude-package/.claude/commands"
|
||||
echo "Created Claude Code package"
|
||||
|
||||
# Create Gemini CLI package
|
||||
mkdir -p sdd-gemini-package
|
||||
cp -r sdd-package-base/* sdd-gemini-package/
|
||||
mkdir -p sdd-gemini-package/.gemini/commands
|
||||
generate_commands "gemini" "toml" "{{args}}" "sdd-gemini-package/.gemini/commands"
|
||||
if [ -f "agent_templates/gemini/GEMINI.md" ]; then
|
||||
cp agent_templates/gemini/GEMINI.md sdd-gemini-package/GEMINI.md
|
||||
fi
|
||||
echo "Created Gemini CLI package"
|
||||
|
||||
# Create GitHub Copilot package
|
||||
mkdir -p sdd-copilot-package
|
||||
cp -r sdd-package-base/* sdd-copilot-package/
|
||||
mkdir -p sdd-copilot-package/.github/prompts
|
||||
generate_commands "copilot" "prompt.md" "\$ARGUMENTS" "sdd-copilot-package/.github/prompts"
|
||||
echo "Created GitHub Copilot package"
|
||||
|
||||
# Create archive files for each package
|
||||
cd sdd-claude-package && zip -r ../spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip . && cd ..
|
||||
|
||||
# List contents for verification
|
||||
echo "Claude package contents:"
|
||||
unzip -l spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip | head -10
|
||||
echo "Gemini package contents:"
|
||||
unzip -l spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip | head -10
|
||||
echo "Copilot package contents:"
|
||||
unzip -l spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip | head -10
|
||||
chmod +x scripts/create-release-packages.sh
|
||||
./scripts/create-release-packages.sh ${{ steps.get_tag.outputs.new_version }}
|
||||
|
||||
- name: Generate release notes
|
||||
if: steps.check_release.outputs.exists == 'false'
|
||||
|
||||
@@ -11,10 +11,13 @@ These are one time installations required to be able to test your changes locall
|
||||
1. Install [Python 3.11+](https://www.python.org/downloads/)
|
||||
1. Install [uv](https://docs.astral.sh/uv/) for package management
|
||||
1. Install [Git](https://git-scm.com/downloads)
|
||||
1. Have an AI coding agent available: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||
1. Have an AI coding agent available: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli) are recommended, but we're working on adding support for other agents as well.
|
||||
|
||||
## Submitting a pull request
|
||||
|
||||
>[!NOTE]
|
||||
>If your pull request introduces a large change that materially impacts the work of the CLI or the rest of the repository (e.g., you're introducing new templates, arguments, or otherwise major changes), make sure that it was **discussed and agreed upon** by the project maintainers. Pull requests with large changes that did not have a prior conversation and agreement will be closed.
|
||||
|
||||
1. Fork and clone the repository
|
||||
1. Configure and install the dependencies: `uv sync`
|
||||
1. Make sure the CLI works on your machine: `uv run specify --help`
|
||||
|
||||
@@ -118,7 +118,7 @@ Our research and experimentation focus on:
|
||||
## 📖 Learn more
|
||||
|
||||
- **[Complete Spec-Driven Development Methodology](./spec-driven.md)** - Deep dive into the full process
|
||||
- **[Detailed Walkthrough](#detailed-process)** - Step-by-step implementation guide
|
||||
- **[Detailed Walkthrough](#-detailed-process)** - Step-by-step implementation guide
|
||||
|
||||
---
|
||||
|
||||
|
||||
8
docs/.gitignore
vendored
Normal file
8
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# DocFX build output
|
||||
_site/
|
||||
obj/
|
||||
.docfx/
|
||||
|
||||
# Temporary files
|
||||
*.tmp
|
||||
*.log
|
||||
33
docs/README.md
Normal file
33
docs/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Documentation
|
||||
|
||||
This folder contains the documentation source files for Spec Kit, built using [DocFX](https://dotnet.github.io/docfx/).
|
||||
|
||||
## Building Locally
|
||||
|
||||
To build the documentation locally:
|
||||
|
||||
1. Install DocFX:
|
||||
```bash
|
||||
dotnet tool install -g docfx
|
||||
```
|
||||
|
||||
2. Build the documentation:
|
||||
```bash
|
||||
cd docs
|
||||
docfx docfx.json --serve
|
||||
```
|
||||
|
||||
3. Open your browser to `http://localhost:8080` to view the documentation.
|
||||
|
||||
## Structure
|
||||
|
||||
- `docfx.json` - DocFX configuration file
|
||||
- `index.md` - Main documentation homepage
|
||||
- `toc.yml` - Table of contents configuration
|
||||
- `installation.md` - Installation guide
|
||||
- `quickstart.md` - Quick start guide
|
||||
- `_site/` - Generated documentation output (ignored by git)
|
||||
|
||||
## Deployment
|
||||
|
||||
Documentation is automatically built and deployed to GitHub Pages when changes are pushed to the `main` branch. The workflow is defined in `.github/workflows/docs.yml`.
|
||||
70
docs/docfx.json
Normal file
70
docs/docfx.json
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"build": {
|
||||
"content": [
|
||||
{
|
||||
"files": [
|
||||
"*.md",
|
||||
"toc.yml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"../README.md",
|
||||
"../CONTRIBUTING.md",
|
||||
"../CODE_OF_CONDUCT.md",
|
||||
"../SECURITY.md",
|
||||
"../SUPPORT.md"
|
||||
],
|
||||
"dest": "."
|
||||
}
|
||||
],
|
||||
"resource": [
|
||||
{
|
||||
"files": [
|
||||
"images/**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"../media/**"
|
||||
],
|
||||
"dest": "media"
|
||||
}
|
||||
],
|
||||
"overwrite": [
|
||||
{
|
||||
"files": [
|
||||
"apidoc/**.md"
|
||||
],
|
||||
"exclude": [
|
||||
"obj/**",
|
||||
"_site/**"
|
||||
]
|
||||
}
|
||||
],
|
||||
"dest": "_site",
|
||||
"globalMetadataFiles": [],
|
||||
"fileMetadataFiles": [],
|
||||
"template": [
|
||||
"default",
|
||||
"modern"
|
||||
],
|
||||
"postProcessors": [],
|
||||
"markdownEngineName": "markdig",
|
||||
"noLangKeyword": false,
|
||||
"keepFileLink": false,
|
||||
"cleanupCacheHistory": false,
|
||||
"disableGitFeatures": false,
|
||||
"globalMetadata": {
|
||||
"_appTitle": "Spec Kit Documentation",
|
||||
"_appName": "Spec Kit",
|
||||
"_appFooter": "Spec Kit - A specification-driven development toolkit",
|
||||
"_enableSearch": true,
|
||||
"_disableContribution": false,
|
||||
"_gitContribute": {
|
||||
"repo": "https://github.com/github/spec-kit",
|
||||
"branch": "main"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
62
docs/index.md
Normal file
62
docs/index.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Spec Kit
|
||||
|
||||
*Build high-quality software faster.*
|
||||
|
||||
**An effort to allow organizations to focus on product scenarios rather than writing undifferentiated code with the help of Spec-Driven Development.**
|
||||
|
||||
## What is Spec-Driven Development?
|
||||
|
||||
Spec-Driven Development **flips the script** on traditional software development. For decades, code has been king — specifications were just scaffolding we built and discarded once the "real work" of coding began. Spec-Driven Development changes this: **specifications become executable**, directly generating working implementations rather than just guiding them.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Installation Guide](installation.md)
|
||||
- [Quick Start Guide](quickstart.md)
|
||||
- [Local Development](local-development.md)
|
||||
|
||||
## Core Philosophy
|
||||
|
||||
Spec-Driven Development is a structured process that emphasizes:
|
||||
|
||||
- **Intent-driven development** where specifications define the "_what_" before the "_how_"
|
||||
- **Rich specification creation** using guardrails and organizational principles
|
||||
- **Multi-step refinement** rather than one-shot code generation from prompts
|
||||
- **Heavy reliance** on advanced AI model capabilities for specification interpretation
|
||||
|
||||
## Development Phases
|
||||
|
||||
| Phase | Focus | Key Activities |
|
||||
|-------|-------|----------------|
|
||||
| **0-to-1 Development** ("Greenfield") | Generate from scratch | <ul><li>Start with high-level requirements</li><li>Generate specifications</li><li>Plan implementation steps</li><li>Build production-ready applications</li></ul> |
|
||||
| **Creative Exploration** | Parallel implementations | <ul><li>Explore diverse solutions</li><li>Support multiple technology stacks & architectures</li><li>Experiment with UX patterns</li></ul> |
|
||||
| **Iterative Enhancement** ("Brownfield") | Brownfield modernization | <ul><li>Add features iteratively</li><li>Modernize legacy systems</li><li>Adapt processes</li></ul> |
|
||||
|
||||
## Experimental Goals
|
||||
|
||||
Our research and experimentation focus on:
|
||||
|
||||
### Technology Independence
|
||||
- Create applications using diverse technology stacks
|
||||
- Validate the hypothesis that Spec-Driven Development is a process not tied to specific technologies, programming languages, or frameworks
|
||||
|
||||
### Enterprise Constraints
|
||||
- Demonstrate mission-critical application development
|
||||
- Incorporate organizational constraints (cloud providers, tech stacks, engineering practices)
|
||||
- Support enterprise design systems and compliance requirements
|
||||
|
||||
### User-Centric Development
|
||||
- Build applications for different user cohorts and preferences
|
||||
- Support various development approaches (from vibe-coding to AI-native development)
|
||||
|
||||
### Creative & Iterative Processes
|
||||
- Validate the concept of parallel implementation exploration
|
||||
- Provide robust iterative feature development workflows
|
||||
- Extend processes to handle upgrades and modernization tasks
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see our [Contributing Guide](CONTRIBUTING.md) for information on how to contribute to this project.
|
||||
|
||||
## Support
|
||||
|
||||
For support, please check our [Support Guide](SUPPORT.md) or open an issue on GitHub.
|
||||
69
docs/installation.md
Normal file
69
docs/installation.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Installation Guide
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Linux/macOS** (or WSL2 on Windows)
|
||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||
- [uv](https://docs.astral.sh/uv/) for package management
|
||||
- [Python 3.11+](https://www.python.org/downloads/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
|
||||
## Installation
|
||||
|
||||
### Initialize a New Project
|
||||
|
||||
The easiest way to get started is to initialize a new project:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
```
|
||||
|
||||
Or initialize in the current directory:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init --here
|
||||
```
|
||||
|
||||
### Specify AI Agent
|
||||
|
||||
You can proactively specify your AI agent during initialization:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
|
||||
```
|
||||
|
||||
### Ignore Agent Tools Check
|
||||
|
||||
If you prefer to get the templates without checking for the right tools:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude --ignore-agent-tools
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
After initialization, you should see the following commands available in your AI agent:
|
||||
- `/specify` - Create specifications
|
||||
- `/plan` - Generate implementation plans
|
||||
- `/tasks` - Break down into actionable tasks
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Git Credential Manager on Linux
|
||||
|
||||
If you're having issues with Git authentication on Linux, you can install Git Credential Manager:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
echo "Downloading Git Credential Manager v2.6.1..."
|
||||
wget https://github.com/git-ecosystem/git-credential-manager/releases/download/v2.6.1/gcm-linux_amd64.2.6.1.deb
|
||||
echo "Installing Git Credential Manager..."
|
||||
sudo dpkg -i gcm-linux_amd64.2.6.1.deb
|
||||
echo "Configuring Git to use GCM..."
|
||||
git config --global credential.helper manager
|
||||
echo "Cleaning up..."
|
||||
rm gcm-linux_amd64.2.6.1.deb
|
||||
```
|
||||
165
docs/local-development.md
Normal file
165
docs/local-development.md
Normal file
@@ -0,0 +1,165 @@
|
||||
# Local Development Guide
|
||||
|
||||
This guide shows how to iterate on the `specify` CLI locally without publishing a release or committing to `main` first.
|
||||
|
||||
## 1. Clone and Switch Branches
|
||||
|
||||
```bash
|
||||
git clone https://github.com/github/spec-kit.git
|
||||
cd spec-kit
|
||||
# Work on a feature branch
|
||||
git checkout -b your-feature-branch
|
||||
```
|
||||
|
||||
## 2. Run the CLI Directly (Fastest Feedback)
|
||||
|
||||
You can execute the CLI via the module entrypoint without installing anything:
|
||||
|
||||
```bash
|
||||
# From repo root
|
||||
python -m src.specify_cli --help
|
||||
python -m src.specify_cli init demo-project --ai claude --ignore-agent-tools
|
||||
```
|
||||
|
||||
If you prefer invoking the script file style (uses shebang):
|
||||
|
||||
```bash
|
||||
python src/specify_cli/__init__.py init demo-project
|
||||
```
|
||||
|
||||
## 3. Use Editable Install (Isolated Environment)
|
||||
|
||||
Create an isolated environment using `uv` so dependencies resolve exactly like end users get them:
|
||||
|
||||
```bash
|
||||
# Create & activate virtual env (uv auto-manages .venv)
|
||||
uv venv
|
||||
source .venv/bin/activate # or on Windows: .venv\\Scripts\\activate
|
||||
|
||||
# Install project in editable mode
|
||||
uv pip install -e .
|
||||
|
||||
# Now 'specify' entrypoint is available
|
||||
specify --help
|
||||
```
|
||||
|
||||
Re-running after code edits requires no reinstall because of editable mode.
|
||||
|
||||
## 4. Invoke with uvx Directly From Git (Current Branch)
|
||||
|
||||
`uvx` can run from a local path (or a Git ref) to simulate user flows:
|
||||
|
||||
```bash
|
||||
uvx --from . specify init demo-uvx --ai copilot --ignore-agent-tools
|
||||
```
|
||||
|
||||
You can also point uvx at a specific branch without merging:
|
||||
|
||||
```bash
|
||||
# Push your working branch first
|
||||
git push origin your-feature-branch
|
||||
uvx --from git+https://github.com/github/spec-kit.git@your-feature-branch specify init demo-branch-test
|
||||
```
|
||||
|
||||
### 4a. Absolute Path uvx (Run From Anywhere)
|
||||
|
||||
If you're in another directory, use an absolute path instead of `.`:
|
||||
|
||||
```bash
|
||||
uvx --from /mnt/c/GitHub/spec-kit specify --help
|
||||
uvx --from /mnt/c/GitHub/spec-kit specify init demo-anywhere --ai copilot --ignore-agent-tools
|
||||
```
|
||||
|
||||
Set an environment variable for convenience:
|
||||
```bash
|
||||
export SPEC_KIT_SRC=/mnt/c/GitHub/spec-kit
|
||||
uvx --from "$SPEC_KIT_SRC" specify init demo-env --ai copilot --ignore-agent-tools
|
||||
```
|
||||
|
||||
(Optional) Define a shell function:
|
||||
```bash
|
||||
specify-dev() { uvx --from /mnt/c/GitHub/spec-kit specify "$@"; }
|
||||
# Then
|
||||
specify-dev --help
|
||||
```
|
||||
|
||||
## 5. Testing Script Permission Logic
|
||||
|
||||
After running an `init`, check that shell scripts are executable on POSIX systems:
|
||||
|
||||
```bash
|
||||
ls -l scripts | grep .sh
|
||||
# Expect owner execute bit (e.g. -rwxr-xr-x)
|
||||
```
|
||||
On Windows this step is a no-op.
|
||||
|
||||
## 6. Run Lint / Basic Checks (Add Your Own)
|
||||
|
||||
Currently no enforced lint config is bundled, but you can quickly sanity check importability:
|
||||
```bash
|
||||
python -c "import specify_cli; print('Import OK')"
|
||||
```
|
||||
|
||||
## 7. Build a Wheel Locally (Optional)
|
||||
|
||||
Validate packaging before publishing:
|
||||
|
||||
```bash
|
||||
uv build
|
||||
ls dist/
|
||||
```
|
||||
Install the built artifact into a fresh throwaway environment if needed.
|
||||
|
||||
## 8. Using a Temporary Workspace
|
||||
|
||||
When testing `init --here` in a dirty directory, create a temp workspace:
|
||||
|
||||
```bash
|
||||
mkdir /tmp/spec-test && cd /tmp/spec-test
|
||||
python -m src.specify_cli init --here --ai claude --ignore-agent-tools # if repo copied here
|
||||
```
|
||||
Or copy only the modified CLI portion if you want a lighter sandbox.
|
||||
|
||||
## 9. Debug Network / TLS Skips
|
||||
|
||||
If you need to bypass TLS validation while experimenting:
|
||||
|
||||
```bash
|
||||
specify check --skip-tls
|
||||
specify init demo --skip-tls --ai gemini --ignore-agent-tools
|
||||
```
|
||||
(Use only for local experimentation.)
|
||||
|
||||
## 10. Rapid Edit Loop Summary
|
||||
|
||||
| Action | Command |
|
||||
|--------|---------|
|
||||
| Run CLI directly | `python -m src.specify_cli --help` |
|
||||
| Editable install | `uv pip install -e .` then `specify ...` |
|
||||
| Local uvx run (repo root) | `uvx --from . specify ...` |
|
||||
| Local uvx run (abs path) | `uvx --from /mnt/c/GitHub/spec-kit specify ...` |
|
||||
| Git branch uvx | `uvx --from git+URL@branch specify ...` |
|
||||
| Build wheel | `uv build` |
|
||||
|
||||
## 11. Cleaning Up
|
||||
|
||||
Remove build artifacts / virtual env quickly:
|
||||
```bash
|
||||
rm -rf .venv dist build *.egg-info
|
||||
```
|
||||
|
||||
## 12. Common Issues
|
||||
|
||||
| Symptom | Fix |
|
||||
|---------|-----|
|
||||
| `ModuleNotFoundError: typer` | Run `uv pip install -e .` |
|
||||
| Scripts not executable (Linux) | Re-run init (logic adds bits) or `chmod +x scripts/*.sh` |
|
||||
| Git step skipped | You passed `--no-git` or Git not installed |
|
||||
| TLS errors on corporate network | Try `--skip-tls` (not for production) |
|
||||
|
||||
## 13. Next Steps
|
||||
|
||||
- Update docs and run through Quick Start using your modified CLI
|
||||
- Open a PR when satisfied
|
||||
- (Optional) Tag a release once changes land in `main`
|
||||
|
||||
114
docs/quickstart.md
Normal file
114
docs/quickstart.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Quick Start Guide
|
||||
|
||||
This guide will help you get started with Spec-Driven Development using Spec Kit.
|
||||
|
||||
## The 4-Step Process
|
||||
|
||||
### 1. Install Specify
|
||||
|
||||
Initialize your project depending on the coding agent you're using:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME>
|
||||
```
|
||||
|
||||
### 2. Create the Spec
|
||||
|
||||
Use the `/specify` command to describe what you want to build. Focus on the **what** and **why**, not the tech stack.
|
||||
|
||||
```bash
|
||||
/specify Build an application that can help me organize my photos in separate photo albums. Albums are grouped by date and can be re-organized by dragging and dropping on the main page. Albums are never in other nested albums. Within each album, photos are previewed in a tile-like interface.
|
||||
```
|
||||
|
||||
### 3. Create a Technical Implementation Plan
|
||||
|
||||
Use the `/plan` command to provide your tech stack and architecture choices.
|
||||
|
||||
```bash
|
||||
/plan The application uses Vite with minimal number of libraries. Use vanilla HTML, CSS, and JavaScript as much as possible. Images are not uploaded anywhere and metadata is stored in a local SQLite database.
|
||||
```
|
||||
|
||||
### 4. Break Down and Implement
|
||||
|
||||
Use `/tasks` to create an actionable task list, then ask your agent to implement the feature.
|
||||
|
||||
## Detailed Example: Building Taskify
|
||||
|
||||
Here's a complete example of building a team productivity platform:
|
||||
|
||||
### Step 1: Define Requirements with `/specify`
|
||||
|
||||
```text
|
||||
Develop Taskify, a team productivity platform. It should allow users to create projects, add team members,
|
||||
assign tasks, comment and move tasks between boards in Kanban style. In this initial phase for this feature,
|
||||
let's call it "Create Taskify," let's have multiple users but the users will be declared ahead of time, predefined.
|
||||
I want five users in two different categories, one product manager and four engineers. Let's create three
|
||||
different sample projects. Let's have the standard Kanban columns for the status of each task, such as "To Do,"
|
||||
"In Progress," "In Review," and "Done." There will be no login for this application as this is just the very
|
||||
first testing thing to ensure that our basic features are set up. For each task in the UI for a task card,
|
||||
you should be able to change the current status of the task between the different columns in the Kanban work board.
|
||||
You should be able to leave an unlimited number of comments for a particular card. You should be able to, from that task
|
||||
card, assign one of the valid users. When you first launch Taskify, it's going to give you a list of the five users to pick
|
||||
from. There will be no password required. When you click on a user, you go into the main view, which displays the list of
|
||||
projects. When you click on a project, you open the Kanban board for that project. You're going to see the columns.
|
||||
You'll be able to drag and drop cards back and forth between different columns. You will see any cards that are
|
||||
assigned to you, the currently logged in user, in a different color from all the other ones, so you can quickly
|
||||
see yours. You can edit any comments that you make, but you can't edit comments that other people made. You can
|
||||
delete any comments that you made, but you can't delete comments anybody else made.
|
||||
```
|
||||
|
||||
### Step 2: Refine the Specification
|
||||
|
||||
After the initial specification is created, clarify any missing requirements:
|
||||
|
||||
```text
|
||||
For each sample project or project that you create there should be a variable number of tasks between 5 and 15
|
||||
tasks for each one randomly distributed into different states of completion. Make sure that there's at least
|
||||
one task in each stage of completion.
|
||||
```
|
||||
|
||||
Also validate the specification checklist:
|
||||
|
||||
```text
|
||||
Read the review and acceptance checklist, and check off each item in the checklist if the feature spec meets the criteria. Leave it empty if it does not.
|
||||
```
|
||||
|
||||
### Step 3: Generate Technical Plan with `/plan`
|
||||
|
||||
Be specific about your tech stack and technical requirements:
|
||||
|
||||
```text
|
||||
We are going to generate this using .NET Aspire, using Postgres as the database. The frontend should use
|
||||
Blazor server with drag-and-drop task boards, real-time updates. There should be a REST API created with a projects API,
|
||||
tasks API, and a notifications API.
|
||||
```
|
||||
|
||||
### Step 4: Validate and Implement
|
||||
|
||||
Have your AI agent audit the implementation plan:
|
||||
|
||||
```text
|
||||
Now I want you to go and audit the implementation plan and the implementation detail files.
|
||||
Read through it with an eye on determining whether or not there is a sequence of tasks that you need
|
||||
to be doing that are obvious from reading this. Because I don't know if there's enough here.
|
||||
```
|
||||
|
||||
Finally, implement the solution:
|
||||
|
||||
```text
|
||||
implement specs/002-create-taskify/plan.md
|
||||
```
|
||||
|
||||
## Key Principles
|
||||
|
||||
- **Be explicit** about what you're building and why
|
||||
- **Don't focus on tech stack** during specification phase
|
||||
- **Iterate and refine** your specifications before implementation
|
||||
- **Validate** the plan before coding begins
|
||||
- **Let the AI agent handle** the implementation details
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Read the complete methodology for in-depth guidance
|
||||
- Check out more examples in the repository
|
||||
- Explore the source code on GitHub
|
||||
17
docs/toc.yml
Normal file
17
docs/toc.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# Home page
|
||||
- name: Home
|
||||
href: index.md
|
||||
|
||||
# Getting started section
|
||||
- name: Getting Started
|
||||
items:
|
||||
- name: Installation
|
||||
href: installation.md
|
||||
- name: Quick Start
|
||||
href: quickstart.md
|
||||
|
||||
# Development workflows
|
||||
- name: Development
|
||||
items:
|
||||
- name: Local Development
|
||||
href: local-development.md
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "specify-cli"
|
||||
version = "0.0.2"
|
||||
version = "0.0.3"
|
||||
description = "Setup tool for Specify spec-driven development projects"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
@@ -9,6 +9,7 @@ dependencies = [
|
||||
"httpx",
|
||||
"platformdirs",
|
||||
"readchar",
|
||||
"truststore>=0.10.4",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
115
scripts/create-release-packages.sh
Normal file
115
scripts/create-release-packages.sh
Normal file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# create-release-packages.sh
|
||||
# Build Spec Kit template release archives for each supported AI assistant.
|
||||
# Usage: ./scripts/create-release-packages.sh <version>
|
||||
# <version> should include the leading 'v' (e.g. v0.0.4)
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 <version-with-v-prefix>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEW_VERSION="$1"
|
||||
if [[ ! $NEW_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Version must look like v0.0.0" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building release packages for $NEW_VERSION"
|
||||
|
||||
# Clean any previous build dirs
|
||||
rm -rf sdd-package-base sdd-claude-package sdd-gemini-package sdd-copilot-package \
|
||||
spec-kit-template-claude-${NEW_VERSION}.zip \
|
||||
spec-kit-template-gemini-${NEW_VERSION}.zip \
|
||||
spec-kit-template-copilot-${NEW_VERSION}.zip || true
|
||||
|
||||
mkdir -p sdd-package-base
|
||||
|
||||
# Copy common folders to base
|
||||
if [[ -d memory ]]; then
|
||||
cp -r memory sdd-package-base/
|
||||
echo "Copied memory folder"
|
||||
fi
|
||||
|
||||
if [[ -d scripts ]]; then
|
||||
# Exclude this script itself from being copied
|
||||
rsync -a --exclude 'create-release-packages.sh' scripts/ sdd-package-base/scripts/
|
||||
echo "Copied scripts folder (excluding create-release-packages.sh)"
|
||||
fi
|
||||
|
||||
if [[ -d templates ]]; then
|
||||
mkdir -p sdd-package-base/templates
|
||||
# Copy all template files excluding commands (processed separately per assistant)
|
||||
find templates -type f -not -path "templates/commands/*" -exec cp --parents {} sdd-package-base/ \;
|
||||
echo "Copied templates folder (excluding commands directory)"
|
||||
fi
|
||||
|
||||
# Function to generate assistant command files/prompts
|
||||
# Args: agent ext arg_format output_dir
|
||||
generate_commands() {
|
||||
local agent=$1
|
||||
local ext=$2
|
||||
local arg_format=$3
|
||||
local output_dir=$4
|
||||
mkdir -p "$output_dir"
|
||||
for template in templates/commands/*.md; do
|
||||
[[ -f "$template" ]] || continue
|
||||
local name
|
||||
name=$(basename "$template" .md)
|
||||
local description
|
||||
description=$(awk '/^description:/ {gsub(/^description: *"?/, ""); gsub(/"$/, ""); print; exit}' "$template" | tr -d '\r')
|
||||
local content
|
||||
content=$(awk '/^---$/{if(++count==2) start=1; next} start' "$template" | sed "s/{ARGS}/$arg_format/g")
|
||||
case $ext in
|
||||
"toml")
|
||||
{
|
||||
echo "description = \"$description\""; echo ""; echo "prompt = \"\"\""; echo "$content"; echo "\"\"\"";
|
||||
} > "$output_dir/$name.$ext"
|
||||
;;
|
||||
"md")
|
||||
echo "$content" > "$output_dir/$name.$ext"
|
||||
;;
|
||||
"prompt.md")
|
||||
# Preserve front matter exactly, just substitute {ARGS}
|
||||
sed "s/{ARGS}/$arg_format/g" "$template" > "$output_dir/$name.$ext"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Claude package
|
||||
mkdir -p sdd-claude-package
|
||||
cp -r sdd-package-base/* sdd-claude-package/
|
||||
mkdir -p sdd-claude-package/.claude/commands
|
||||
generate_commands "claude" "md" "\$ARGUMENTS" "sdd-claude-package/.claude/commands"
|
||||
echo "Created Claude Code package"
|
||||
|
||||
# Gemini package
|
||||
mkdir -p sdd-gemini-package
|
||||
cp -r sdd-package-base/* sdd-gemini-package/
|
||||
mkdir -p sdd-gemini-package/.gemini/commands
|
||||
generate_commands "gemini" "toml" "{{args}}" "sdd-gemini-package/.gemini/commands"
|
||||
if [[ -f agent_templates/gemini/GEMINI.md ]]; then
|
||||
cp agent_templates/gemini/GEMINI.md sdd-gemini-package/GEMINI.md
|
||||
fi
|
||||
echo "Created Gemini CLI package"
|
||||
|
||||
# Copilot package
|
||||
mkdir -p sdd-copilot-package
|
||||
cp -r sdd-package-base/* sdd-copilot-package/
|
||||
mkdir -p sdd-copilot-package/.github/prompts
|
||||
generate_commands "copilot" "prompt.md" "\$ARGUMENTS" "sdd-copilot-package/.github/prompts"
|
||||
echo "Created GitHub Copilot package"
|
||||
|
||||
# Archives
|
||||
( cd sdd-claude-package && zip -r ../spec-kit-template-claude-${NEW_VERSION}.zip . )
|
||||
( cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${NEW_VERSION}.zip . )
|
||||
( cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${NEW_VERSION}.zip . )
|
||||
|
||||
echo "Package archives created:"
|
||||
ls -1 spec-kit-template-*-${NEW_VERSION}.zip
|
||||
|
||||
# Basic verification snippet
|
||||
unzip -l spec-kit-template-copilot-${NEW_VERSION}.zip | head -10 || true
|
||||
@@ -70,11 +70,11 @@ Today, practicing SDD requires assembling existing tools and maintaining discipl
|
||||
|
||||
The key is treating specifications as the source of truth, with code as the generated output that serves the specification rather than the other way around.
|
||||
|
||||
## Streamlining SDD with Claude Commands
|
||||
## Streamlining SDD with Commands
|
||||
|
||||
The SDD methodology is significantly enhanced through two powerful Claude commands that automate the specification and planning workflow:
|
||||
The SDD methodology is significantly enhanced through three powerful commands that automate the specification → planning → tasking workflow:
|
||||
|
||||
### The `new_feature` Command
|
||||
### The `/specify` Command
|
||||
|
||||
This command transforms a simple feature description (the user-prompt) into a complete, structured specification with automatic repository management:
|
||||
|
||||
@@ -83,7 +83,7 @@ This command transforms a simple feature description (the user-prompt) into a co
|
||||
3. **Template-Based Generation**: Copies and customizes the feature specification template with your requirements
|
||||
4. **Directory Structure**: Creates the proper `specs/[branch-name]/` structure for all related documents
|
||||
|
||||
### The `generate_plan` Command
|
||||
### The `/plan` Command
|
||||
|
||||
Once a feature specification exists, this command creates a comprehensive implementation plan:
|
||||
|
||||
@@ -91,14 +91,24 @@ Once a feature specification exists, this command creates a comprehensive implem
|
||||
2. **Constitutional Compliance**: Ensures alignment with project constitution and architectural principles
|
||||
3. **Technical Translation**: Converts business requirements into technical architecture and implementation details
|
||||
4. **Detailed Documentation**: Generates supporting documents for data models, API contracts, and test scenarios
|
||||
5. **Manual Testing Plans**: Creates step-by-step validation procedures for each user story
|
||||
5. **Quickstart Validation**: Produces a quickstart guide capturing key validation scenarios
|
||||
|
||||
### The `/tasks` Command
|
||||
|
||||
After a plan is created, this command analyzes the plan and related design documents to generate an executable task list:
|
||||
|
||||
1. **Inputs**: Reads `plan.md` (required) and, if present, `data-model.md`, `contracts/`, and `research.md`
|
||||
2. **Task Derivation**: Converts contracts, entities, and scenarios into specific tasks
|
||||
3. **Parallelization**: Marks independent tasks `[P]` and outlines safe parallel groups
|
||||
4. **Output**: Writes `tasks.md` in the feature directory, ready for execution by a Task agent
|
||||
|
||||
### Example: Building a Chat Feature
|
||||
|
||||
Here's how these commands transform the traditional development workflow:
|
||||
|
||||
**Traditional Approach:**
|
||||
```
|
||||
|
||||
```text
|
||||
1. Write a PRD in a document (2-3 hours)
|
||||
2. Create design documents (2-3 hours)
|
||||
3. Set up project structure manually (30 minutes)
|
||||
@@ -108,30 +118,33 @@ Total: ~12 hours of documentation work
|
||||
```
|
||||
|
||||
**SDD with Commands Approach:**
|
||||
|
||||
```bash
|
||||
# Step 1: Create the feature specification (5 minutes)
|
||||
/new_feature Real-time chat system with message history and user presence
|
||||
/specify Real-time chat system with message history and user presence
|
||||
|
||||
# This automatically:
|
||||
# - Creates branch "003-chat-system"
|
||||
# - Generates specs/003-chat-system/feature-spec.md
|
||||
# - Generates specs/003-chat-system/spec.md
|
||||
# - Populates it with structured requirements
|
||||
|
||||
# Step 2: Generate implementation plan (10 minutes)
|
||||
/generate_plan WebSocket for real-time messaging, PostgreSQL for history, Redis for presence
|
||||
# Step 2: Generate implementation plan (5 minutes)
|
||||
/plan WebSocket for real-time messaging, PostgreSQL for history, Redis for presence
|
||||
|
||||
# Step 3: Generate executable tasks (5 minutes)
|
||||
/tasks
|
||||
|
||||
# This automatically creates:
|
||||
# - specs/003-chat-system/implementation-plan.md
|
||||
# - specs/003-chat-system/implementation-details/
|
||||
# - 00-research.md (WebSocket library comparisons)
|
||||
# - 02-data-model.md (Message and User schemas)
|
||||
# - 03-api-contracts.md (WebSocket events, REST endpoints)
|
||||
# - 06-contract-tests.md (Message flow scenarios)
|
||||
# - 08-inter-library-tests.md (Database-WebSocket integration)
|
||||
# - specs/003-chat-system/manual-testing.md
|
||||
# - specs/003-chat-system/plan.md
|
||||
# - specs/003-chat-system/research.md (WebSocket library comparisons)
|
||||
# - specs/003-chat-system/data-model.md (Message and User schemas)
|
||||
# - specs/003-chat-system/contracts/ (WebSocket events, REST endpoints)
|
||||
# - specs/003-chat-system/quickstart.md (Key validation scenarios)
|
||||
# - specs/003-chat-system/tasks.md (Task list derived from the plan)
|
||||
```
|
||||
|
||||
In 15 minutes, you have:
|
||||
|
||||
- A complete feature specification with user stories and acceptance criteria
|
||||
- A detailed implementation plan with technology choices and rationale
|
||||
- API contracts and data models ready for code generation
|
||||
@@ -156,7 +169,8 @@ The true power of these commands lies not just in automation, but in how the tem
|
||||
#### 1. **Preventing Premature Implementation Details**
|
||||
|
||||
The feature specification template explicitly instructs:
|
||||
```
|
||||
|
||||
```text
|
||||
- ✅ Focus on WHAT users need and WHY
|
||||
- ❌ Avoid HOW to implement (no tech stack, APIs, code structure)
|
||||
```
|
||||
@@ -166,7 +180,8 @@ This constraint forces the LLM to maintain proper abstraction levels. When an LL
|
||||
#### 2. **Forcing Explicit Uncertainty Markers**
|
||||
|
||||
Both templates mandate the use of `[NEEDS CLARIFICATION]` markers:
|
||||
```
|
||||
|
||||
```text
|
||||
When creating this spec from a user prompt:
|
||||
1. **Mark all ambiguities**: Use [NEEDS CLARIFICATION: specific question]
|
||||
2. **Don't guess**: If the prompt doesn't specify something, mark it
|
||||
@@ -177,7 +192,8 @@ This prevents the common LLM behavior of making plausible but potentially incorr
|
||||
#### 3. **Structured Thinking Through Checklists**
|
||||
|
||||
The templates include comprehensive checklists that act as "unit tests" for the specification:
|
||||
```
|
||||
|
||||
```markdown
|
||||
### Requirement Completeness
|
||||
- [ ] No [NEEDS CLARIFICATION] markers remain
|
||||
- [ ] Requirements are testable and unambiguous
|
||||
@@ -189,7 +205,8 @@ These checklists force the LLM to self-review its output systematically, catchin
|
||||
#### 4. **Constitutional Compliance Through Gates**
|
||||
|
||||
The implementation plan template enforces architectural principles through phase gates:
|
||||
```
|
||||
|
||||
```markdown
|
||||
### Phase -1: Pre-Implementation Gates
|
||||
#### Simplicity Gate (Article VII)
|
||||
- [ ] Using ≤3 projects?
|
||||
@@ -204,7 +221,8 @@ These gates prevent over-engineering by making the LLM explicitly justify any co
|
||||
#### 5. **Hierarchical Detail Management**
|
||||
|
||||
The templates enforce proper information architecture:
|
||||
```
|
||||
|
||||
```text
|
||||
**IMPORTANT**: This implementation plan should remain high-level and readable.
|
||||
Any code samples, detailed algorithms, or extensive technical specifications
|
||||
must be placed in the appropriate `implementation-details/` file
|
||||
@@ -215,7 +233,8 @@ This prevents the common problem of specifications becoming unreadable code dump
|
||||
#### 6. **Test-First Thinking**
|
||||
|
||||
The implementation template enforces test-first development:
|
||||
```
|
||||
|
||||
```text
|
||||
### File Creation Order
|
||||
1. Create `contracts/` with API specifications
|
||||
2. Create test files in order: contract → integration → e2e → unit
|
||||
@@ -227,7 +246,8 @@ This ordering constraint ensures the LLM thinks about testability and contracts
|
||||
#### 7. **Preventing Speculative Features**
|
||||
|
||||
Templates explicitly discourage speculation:
|
||||
```
|
||||
|
||||
```text
|
||||
- [ ] No speculative or "might need" features
|
||||
- [ ] All phases have clear prerequisites and deliverables
|
||||
```
|
||||
@@ -237,6 +257,7 @@ This stops the LLM from adding "nice to have" features that complicate implement
|
||||
### The Compound Effect
|
||||
|
||||
These constraints work together to produce specifications that are:
|
||||
|
||||
- **Complete**: Checklists ensure nothing is forgotten
|
||||
- **Unambiguous**: Forced clarification markers highlight uncertainties
|
||||
- **Testable**: Test-first thinking baked into the process
|
||||
@@ -247,15 +268,17 @@ The templates transform the LLM from a creative writer into a disciplined specif
|
||||
|
||||
## The Constitutional Foundation: Enforcing Architectural Discipline
|
||||
|
||||
At the heart of SDD lies a constitution—a set of immutable principles that govern how specifications become code. The constitution (`base/memory/constitution.md`) acts as the architectural DNA of the system, ensuring that every generated implementation maintains consistency, simplicity, and quality.
|
||||
At the heart of SDD lies a constitution—a set of immutable principles that govern how specifications become code. The constitution (`memory/constitution.md`) acts as the architectural DNA of the system, ensuring that every generated implementation maintains consistency, simplicity, and quality.
|
||||
|
||||
### The Nine Articles of Development
|
||||
|
||||
The constitution defines nine articles that shape every aspect of the development process:
|
||||
|
||||
#### Article I: Library-First Principle
|
||||
|
||||
Every feature must begin as a standalone library—no exceptions. This forces modular design from the start:
|
||||
```
|
||||
|
||||
```text
|
||||
Every feature in Specify MUST begin its existence as a standalone library.
|
||||
No feature shall be implemented directly within application code without
|
||||
first being abstracted into a reusable library component.
|
||||
@@ -264,8 +287,10 @@ first being abstracted into a reusable library component.
|
||||
This principle ensures that specifications generate modular, reusable code rather than monolithic applications. When the LLM generates an implementation plan, it must structure features as libraries with clear boundaries and minimal dependencies.
|
||||
|
||||
#### Article II: CLI Interface Mandate
|
||||
|
||||
Every library must expose its functionality through a command-line interface:
|
||||
```
|
||||
|
||||
```text
|
||||
All CLI interfaces MUST:
|
||||
- Accept text as input (via stdin, arguments, or files)
|
||||
- Produce text as output (via stdout)
|
||||
@@ -275,8 +300,10 @@ All CLI interfaces MUST:
|
||||
This enforces observability and testability. The LLM cannot hide functionality inside opaque classes—everything must be accessible and verifiable through text-based interfaces.
|
||||
|
||||
#### Article III: Test-First Imperative
|
||||
|
||||
The most transformative article—no code before tests:
|
||||
```
|
||||
|
||||
```text
|
||||
This is NON-NEGOTIABLE: All implementation MUST follow strict Test-Driven Development.
|
||||
No implementation code shall be written before:
|
||||
1. Unit tests are written
|
||||
@@ -287,8 +314,10 @@ No implementation code shall be written before:
|
||||
This completely inverts traditional AI code generation. Instead of generating code and hoping it works, the LLM must first generate comprehensive tests that define behavior, get them approved, and only then generate implementation.
|
||||
|
||||
#### Articles VII & VIII: Simplicity and Anti-Abstraction
|
||||
|
||||
These paired articles combat over-engineering:
|
||||
```
|
||||
|
||||
```text
|
||||
Section 7.3: Minimal Project Structure
|
||||
- Maximum 3 projects for initial implementation
|
||||
- Additional projects require documented justification
|
||||
@@ -300,8 +329,10 @@ Section 8.1: Framework Trust
|
||||
When an LLM might naturally create elaborate abstractions, these articles force it to justify every layer of complexity. The implementation plan template's "Phase -1 Gates" directly enforce these principles.
|
||||
|
||||
#### Article IX: Integration-First Testing
|
||||
|
||||
Prioritizes real-world testing over isolated unit tests:
|
||||
```
|
||||
|
||||
```text
|
||||
Tests MUST use realistic environments:
|
||||
- Prefer real databases over mocks
|
||||
- Use actual service instances over stubs
|
||||
@@ -343,7 +374,8 @@ The constitution's power lies in its immutability. While implementation details
|
||||
### Constitutional Evolution
|
||||
|
||||
While principles are immutable, their application can evolve:
|
||||
```
|
||||
|
||||
```text
|
||||
Section 4.2: Amendment Process
|
||||
Modifications to this constitution require:
|
||||
- Explicit documentation of the rationale for change
|
||||
|
||||
@@ -46,6 +46,11 @@ from typer.core import TyperGroup
|
||||
|
||||
# For cross-platform keyboard input
|
||||
import readchar
|
||||
import ssl
|
||||
import truststore
|
||||
|
||||
ssl_context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||
client = httpx.Client(verify=ssl_context)
|
||||
|
||||
# Constants
|
||||
AI_CHOICES = {
|
||||
@@ -385,19 +390,18 @@ def init_git_repo(project_path: Path, quiet: bool = False) -> bool:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
|
||||
def download_template_from_github(ai_assistant: str, download_dir: Path, *, verbose: bool = True, show_progress: bool = True):
|
||||
"""Download the latest template release from GitHub using HTTP requests.
|
||||
Returns (zip_path, metadata_dict)
|
||||
"""
|
||||
def download_template_from_github(ai_assistant: str, download_dir: Path, *, verbose: bool = True, show_progress: bool = True, client: httpx.Client = None):
|
||||
repo_owner = "github"
|
||||
repo_name = "spec-kit"
|
||||
if client is None:
|
||||
client = httpx.Client(verify=ssl_context)
|
||||
|
||||
if verbose:
|
||||
console.print("[cyan]Fetching latest release information...[/cyan]")
|
||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
response = httpx.get(api_url, timeout=30, follow_redirects=True)
|
||||
response = client.get(api_url, timeout=30, follow_redirects=True)
|
||||
response.raise_for_status()
|
||||
release_data = response.json()
|
||||
except httpx.RequestError as e:
|
||||
@@ -437,18 +441,15 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, verb
|
||||
console.print(f"[cyan]Downloading template...[/cyan]")
|
||||
|
||||
try:
|
||||
with httpx.stream("GET", download_url, timeout=30, follow_redirects=True) as response:
|
||||
with client.stream("GET", download_url, timeout=30, follow_redirects=True) as response:
|
||||
response.raise_for_status()
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
|
||||
with open(zip_path, 'wb') as f:
|
||||
if total_size == 0:
|
||||
# No content-length header, download without progress
|
||||
for chunk in response.iter_bytes(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
else:
|
||||
if show_progress:
|
||||
# Show progress bar
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
@@ -462,10 +463,8 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, verb
|
||||
downloaded += len(chunk)
|
||||
progress.update(task, completed=downloaded)
|
||||
else:
|
||||
# Silent download loop
|
||||
for chunk in response.iter_bytes(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
except httpx.RequestError as e:
|
||||
if verbose:
|
||||
console.print(f"[red]Error downloading template:[/red] {e}")
|
||||
@@ -483,7 +482,7 @@ def download_template_from_github(ai_assistant: str, download_dir: Path, *, verb
|
||||
return zip_path, metadata
|
||||
|
||||
|
||||
def download_and_extract_template(project_path: Path, ai_assistant: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None) -> Path:
|
||||
def download_and_extract_template(project_path: Path, ai_assistant: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None, client: httpx.Client = None) -> Path:
|
||||
"""Download the latest release and extract it to create a new project.
|
||||
Returns project_path. Uses tracker if provided (with keys: fetch, download, extract, cleanup)
|
||||
"""
|
||||
@@ -497,12 +496,13 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, is_curr
|
||||
ai_assistant,
|
||||
current_dir,
|
||||
verbose=verbose and tracker is None,
|
||||
show_progress=(tracker is None)
|
||||
show_progress=(tracker is None),
|
||||
client=client
|
||||
)
|
||||
if tracker:
|
||||
tracker.complete("fetch", f"release {meta['release']} ({meta['size']:,} bytes)")
|
||||
tracker.add("download", "Download template")
|
||||
tracker.complete("download", meta['filename']) # already downloaded inside helper
|
||||
tracker.complete("download", meta['filename'])
|
||||
except Exception as e:
|
||||
if tracker:
|
||||
tracker.error("fetch", str(e))
|
||||
@@ -635,6 +635,67 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, is_curr
|
||||
return project_path
|
||||
|
||||
|
||||
def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = None) -> None:
|
||||
"""Ensure POSIX .sh scripts in the project scripts directory have execute bits (no-op on Windows)."""
|
||||
if os.name == "nt":
|
||||
return # Windows: skip silently
|
||||
scripts_dir = project_path / "scripts"
|
||||
if not scripts_dir.is_dir():
|
||||
return
|
||||
failures: list[str] = []
|
||||
updated = 0
|
||||
for script in scripts_dir.glob("*.sh"):
|
||||
try:
|
||||
# Skip symlinks
|
||||
if script.is_symlink():
|
||||
continue
|
||||
# Must be a regular file
|
||||
if not script.is_file():
|
||||
continue
|
||||
# Quick shebang check
|
||||
try:
|
||||
with script.open("rb") as f:
|
||||
first_two = f.read(2)
|
||||
if first_two != b"#!":
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
st = script.stat()
|
||||
mode = st.st_mode
|
||||
# If already any execute bit set, skip
|
||||
if mode & 0o111:
|
||||
continue
|
||||
# Only add execute bits that correspond to existing read bits
|
||||
new_mode = mode
|
||||
if mode & 0o400: # owner read
|
||||
new_mode |= 0o100
|
||||
if mode & 0o040: # group read
|
||||
new_mode |= 0o010
|
||||
if mode & 0o004: # other read
|
||||
new_mode |= 0o001
|
||||
# Fallback: ensure at least owner execute
|
||||
if not (new_mode & 0o100):
|
||||
new_mode |= 0o100
|
||||
os.chmod(script, new_mode)
|
||||
updated += 1
|
||||
except Exception as e:
|
||||
failures.append(f"{script.name}: {e}")
|
||||
if tracker:
|
||||
detail = f"{updated} updated" + (f", {len(failures)} failed" if failures else "")
|
||||
tracker.add("chmod", "Set script permissions")
|
||||
if failures:
|
||||
tracker.error("chmod", detail)
|
||||
else:
|
||||
tracker.complete("chmod", detail)
|
||||
else:
|
||||
if updated:
|
||||
console.print(f"[cyan]Updated execute permissions on {updated} script(s)[/cyan]")
|
||||
if failures:
|
||||
console.print("[yellow]Some scripts could not be updated:[/yellow]")
|
||||
for f in failures:
|
||||
console.print(f" - {f}")
|
||||
|
||||
|
||||
@app.command()
|
||||
def init(
|
||||
project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here)"),
|
||||
@@ -642,6 +703,7 @@ def init(
|
||||
ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"),
|
||||
no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"),
|
||||
here: bool = typer.Option(False, "--here", help="Initialize project in the current directory instead of creating a new one"),
|
||||
skip_tls: bool = typer.Option(False, "--skip-tls", help="Skip SSL/TLS verification (not recommended)"),
|
||||
):
|
||||
"""
|
||||
Initialize a new Specify project from the latest template.
|
||||
@@ -760,6 +822,7 @@ def init(
|
||||
("extract", "Extract template"),
|
||||
("zip-list", "Archive contents"),
|
||||
("extracted-summary", "Extraction summary"),
|
||||
("chmod", "Ensure scripts executable"),
|
||||
("cleanup", "Cleanup"),
|
||||
("git", "Initialize git repository"),
|
||||
("final", "Finalize")
|
||||
@@ -770,7 +833,15 @@ def init(
|
||||
with Live(tracker.render(), console=console, refresh_per_second=8, transient=True) as live:
|
||||
tracker.attach_refresh(lambda: live.update(tracker.render()))
|
||||
try:
|
||||
download_and_extract_template(project_path, selected_ai, here, verbose=False, tracker=tracker)
|
||||
# Create a httpx client with verify based on skip_tls
|
||||
verify = not skip_tls
|
||||
local_ssl_context = ssl_context if verify else False
|
||||
local_client = httpx.Client(verify=local_ssl_context)
|
||||
|
||||
download_and_extract_template(project_path, selected_ai, here, verbose=False, tracker=tracker, client=local_client)
|
||||
|
||||
# Ensure scripts are executable (POSIX)
|
||||
ensure_executable_scripts(project_path, tracker=tracker)
|
||||
|
||||
# Git step
|
||||
if not no_git:
|
||||
@@ -820,6 +891,7 @@ def init(
|
||||
steps_lines.append(f"{step_num}. Use / commands with Gemini CLI")
|
||||
steps_lines.append(" - Run gemini /specify to create specifications")
|
||||
steps_lines.append(" - Run gemini /plan to create implementation plans")
|
||||
steps_lines.append(" - Run gemini /tasks to generate tasks")
|
||||
steps_lines.append(" - See GEMINI.md for all available commands")
|
||||
elif selected_ai == "copilot":
|
||||
steps_lines.append(f"{step_num}. Open in Visual Studio Code and use [bold cyan]/specify[/], [bold cyan]/plan[/], [bold cyan]/tasks[/] commands with GitHub Copilot")
|
||||
@@ -834,16 +906,20 @@ def init(
|
||||
# Removed farewell line per user request
|
||||
|
||||
|
||||
# Add skip_tls option to check
|
||||
@app.command()
|
||||
def check():
|
||||
def check(skip_tls: bool = typer.Option(False, "--skip-tls", help="Skip SSL/TLS verification (not recommended)")):
|
||||
"""Check that all required tools are installed."""
|
||||
show_banner()
|
||||
console.print("[bold]Checking Specify requirements...[/bold]\n")
|
||||
|
||||
# Check if we have internet connectivity by trying to reach GitHub API
|
||||
console.print("[cyan]Checking internet connectivity...[/cyan]")
|
||||
verify = not skip_tls
|
||||
local_ssl_context = ssl_context if verify else False
|
||||
local_client = httpx.Client(verify=local_ssl_context)
|
||||
try:
|
||||
response = httpx.get("https://api.github.com", timeout=5, follow_redirects=True)
|
||||
response = local_client.get("https://api.github.com", timeout=5, follow_redirects=True)
|
||||
console.print("[green]✓[/green] Internet connection available")
|
||||
except httpx.RequestError:
|
||||
console.print("[red]✗[/red] No internet connection - required for downloading templates")
|
||||
|
||||
Reference in New Issue
Block a user