mirror of
https://github.com/github/spec-kit.git
synced 2026-01-31 21:13:36 +00:00
Compare commits
61 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3ada747cf | ||
|
|
cbc8ab020c | ||
|
|
e0e62f6757 | ||
|
|
6c22085214 | ||
|
|
02c1549f80 | ||
|
|
ab14090813 | ||
|
|
546e9d6617 | ||
|
|
ea90d02c41 | ||
|
|
3e85f46465 | ||
|
|
015440838a | ||
|
|
7050a3151c | ||
|
|
9e84f46e56 | ||
|
|
5e32de1f3f | ||
|
|
5558b24475 | ||
|
|
f892b9e1cb | ||
|
|
a66af9b7f5 | ||
|
|
a5fdd53a3e | ||
|
|
ed0fa8fffe | ||
|
|
098380a46f | ||
|
|
74f7e508a4 | ||
|
|
8130d98bcc | ||
|
|
315269d9a8 | ||
|
|
71c2c63d55 | ||
|
|
b37a9516d0 | ||
|
|
b009773d5c | ||
|
|
a8514da3e8 | ||
|
|
9d4e8e9eb9 | ||
|
|
f3c77e2f4f | ||
|
|
ecec4bc5e0 | ||
|
|
900bc2ed68 | ||
|
|
03c7021270 | ||
|
|
3b000fce4d | ||
|
|
c59595d065 | ||
|
|
1c16a68df2 | ||
|
|
39bf3e4d9a | ||
|
|
045696641a | ||
|
|
41690cd1d4 | ||
|
|
e45c469709 | ||
|
|
8c9e586662 | ||
|
|
ce844c6259 | ||
|
|
84b46cd1b9 | ||
|
|
0cca67fcd2 | ||
|
|
66fc4c292d | ||
|
|
2baae57b26 | ||
|
|
514b0548fe | ||
|
|
be7db635cc | ||
|
|
a945077b8d | ||
|
|
7b55522213 | ||
|
|
7ca792509b | ||
|
|
4522fb4c44 | ||
|
|
36ff7e6505 | ||
|
|
defb1870da | ||
|
|
b61f04c898 | ||
|
|
64745162df | ||
|
|
97df98b9a0 | ||
|
|
36383b411f | ||
|
|
3e476c2ba6 | ||
|
|
654a00aac9 | ||
|
|
4690d13f88 | ||
|
|
09f57a87fa | ||
|
|
47e5f7c2e2 |
77
.devcontainer/devcontainer.json
Normal file
77
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||||
|
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||||
|
{
|
||||||
|
"name": "SpecKitDevContainer",
|
||||||
|
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||||
|
"image": "mcr.microsoft.com/devcontainers/python:3.13-trixie", // based on Debian "Trixie" (13)
|
||||||
|
"features": {
|
||||||
|
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||||
|
"installZsh": true,
|
||||||
|
"installOhMyZsh": true,
|
||||||
|
"installOhMyZshConfig": true,
|
||||||
|
"upgradePackages": true,
|
||||||
|
"username": "devcontainer",
|
||||||
|
"userUid": "automatic",
|
||||||
|
"userGid": "automatic"
|
||||||
|
},
|
||||||
|
"ghcr.io/devcontainers/features/dotnet:2": {
|
||||||
|
"version": "lts"
|
||||||
|
},
|
||||||
|
"ghcr.io/devcontainers/features/git:1": {
|
||||||
|
"ppa": true,
|
||||||
|
"version": "latest"
|
||||||
|
},
|
||||||
|
"ghcr.io/devcontainers/features/node": {
|
||||||
|
"version": "lts"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||||
|
"forwardPorts": [
|
||||||
|
8080 // for Spec-Kit documentation site
|
||||||
|
],
|
||||||
|
"containerUser": "devcontainer",
|
||||||
|
"updateRemoteUserUID": true,
|
||||||
|
"postCreateCommand": "chmod +x ./.devcontainer/post-create.sh && ./.devcontainer/post-create.sh",
|
||||||
|
"postStartCommand": "git config --global --add safe.directory ${containerWorkspaceFolder}",
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
"mhutchie.git-graph",
|
||||||
|
"eamodio.gitlens",
|
||||||
|
"anweber.reveal-button",
|
||||||
|
"chrisdias.promptboost",
|
||||||
|
// Github Copilot
|
||||||
|
"GitHub.copilot",
|
||||||
|
"GitHub.copilot-chat",
|
||||||
|
// Codex
|
||||||
|
"openai.chatgpt",
|
||||||
|
// Kilo Code
|
||||||
|
"kilocode.Kilo-Code",
|
||||||
|
// Roo Code
|
||||||
|
"RooVeterinaryInc.roo-cline",
|
||||||
|
// Amazon Developer Q
|
||||||
|
"AmazonWebServices.amazon-q-vscode",
|
||||||
|
// Claude Code
|
||||||
|
"anthropic.claude-code"
|
||||||
|
],
|
||||||
|
"settings": {
|
||||||
|
"debug.javascript.autoAttachFilter": "disabled", // fix running commands in integrated terminal
|
||||||
|
|
||||||
|
// Specify settings for Github Copilot
|
||||||
|
"git.autofetch": true,
|
||||||
|
"chat.promptFilesRecommendations": {
|
||||||
|
"speckit.constitution": true,
|
||||||
|
"speckit.specify": true,
|
||||||
|
"speckit.plan": true,
|
||||||
|
"speckit.tasks": true,
|
||||||
|
"speckit.implement": true
|
||||||
|
},
|
||||||
|
"chat.tools.terminal.autoApprove": {
|
||||||
|
".specify/scripts/bash/": true,
|
||||||
|
".specify/scripts/powershell/": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
100
.devcontainer/post-create.sh
Executable file
100
.devcontainer/post-create.sh
Executable file
@@ -0,0 +1,100 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Exit immediately on error, treat unset variables as an error, and fail if any command in a pipeline fails.
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Function to run a command and show logs only on error
|
||||||
|
run_command() {
|
||||||
|
local command_to_run="$*"
|
||||||
|
local output
|
||||||
|
local exit_code
|
||||||
|
|
||||||
|
# Capture all output (stdout and stderr)
|
||||||
|
output=$(eval "$command_to_run" 2>&1) || exit_code=$?
|
||||||
|
exit_code=${exit_code:-0}
|
||||||
|
|
||||||
|
if [ $exit_code -ne 0 ]; then
|
||||||
|
echo -e "\033[0;31m[ERROR] Command failed (Exit Code $exit_code): $command_to_run\033[0m" >&2
|
||||||
|
echo -e "\033[0;31m$output\033[0m" >&2
|
||||||
|
|
||||||
|
exit $exit_code
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Installing CLI-based AI Agents
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Copilot CLI..."
|
||||||
|
run_command "npm install -g @github/copilot@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Claude CLI..."
|
||||||
|
run_command "npm install -g @anthropic-ai/claude-code@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Codex CLI..."
|
||||||
|
run_command "npm install -g @openai/codex@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Gemini CLI..."
|
||||||
|
run_command "npm install -g @google/gemini-cli@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Augie CLI..."
|
||||||
|
run_command "npm install -g @augmentcode/auggie@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Qwen Code CLI..."
|
||||||
|
run_command "npm install -g @qwen-code/qwen-code@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing OpenCode CLI..."
|
||||||
|
run_command "npm install -g opencode-ai@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing Amazon Q CLI..."
|
||||||
|
# 👉🏾 https://docs.aws.amazon.com/amazonq/latest/qdeveloper-ug/command-line-verify-download.html
|
||||||
|
|
||||||
|
run_command "curl --proto '=https' --tlsv1.2 -sSf 'https://desktop-release.q.us-east-1.amazonaws.com/latest/q-x86_64-linux.zip' -o 'q.zip'"
|
||||||
|
run_command "curl --proto '=https' --tlsv1.2 -sSf 'https://desktop-release.q.us-east-1.amazonaws.com/latest/q-x86_64-linux.zip.sig' -o 'q.zip.sig'"
|
||||||
|
cat > amazonq-public-key.asc << 'EOF'
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
mDMEZig60RYJKwYBBAHaRw8BAQdAy/+G05U5/EOA72WlcD4WkYn5SInri8pc4Z6D
|
||||||
|
BKNNGOm0JEFtYXpvbiBRIENMSSBUZWFtIDxxLWNsaUBhbWF6b24uY29tPoiZBBMW
|
||||||
|
CgBBFiEEmvYEF+gnQskUPgPsUNx6jcJMVmcFAmYoOtECGwMFCQPCZwAFCwkIBwIC
|
||||||
|
IgIGFQoJCAsCBBYCAwECHgcCF4AACgkQUNx6jcJMVmef5QD/QWWEGG/cOnbDnp68
|
||||||
|
SJXuFkwiNwlH2rPw9ZRIQMnfAS0A/0V6ZsGB4kOylBfc7CNfzRFGtovdBBgHqA6P
|
||||||
|
zQ/PNscGuDgEZig60RIKKwYBBAGXVQEFAQEHQC4qleONMBCq3+wJwbZSr0vbuRba
|
||||||
|
D1xr4wUPn4Avn4AnAwEIB4h+BBgWCgAmFiEEmvYEF+gnQskUPgPsUNx6jcJMVmcF
|
||||||
|
AmYoOtECGwwFCQPCZwAACgkQUNx6jcJMVmchMgEA6l3RveCM0YHAGQaSFMkguoAo
|
||||||
|
vK6FgOkDawgP0NPIP2oA/jIAO4gsAntuQgMOsPunEdDeji2t+AhV02+DQIsXZpoB
|
||||||
|
=f8yY
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
EOF
|
||||||
|
run_command "gpg --batch --import amazonq-public-key.asc"
|
||||||
|
run_command "gpg --verify q.zip.sig q.zip"
|
||||||
|
run_command "unzip -q q.zip"
|
||||||
|
run_command "chmod +x ./q/install.sh"
|
||||||
|
run_command "./q/install.sh --no-confirm"
|
||||||
|
run_command "rm -rf ./q q.zip q.zip.sig amazonq-public-key.asc"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing CodeBuddy CLI..."
|
||||||
|
run_command "npm install -g @tencent-ai/codebuddy-code@latest"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
# Installing UV (Python package manager)
|
||||||
|
echo -e "\n🐍 Installing UV - Python Package Manager..."
|
||||||
|
run_command "pipx install uv"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
# Installing DocFx (for documentation site)
|
||||||
|
echo -e "\n📚 Installing DocFx..."
|
||||||
|
run_command "dotnet tool update -g docfx"
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
echo -e "\n🧹 Cleaning cache..."
|
||||||
|
run_command "sudo apt-get autoclean"
|
||||||
|
run_command "sudo apt-get clean"
|
||||||
|
|
||||||
|
echo "✅ Setup completed. Happy coding! 🚀"
|
||||||
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
* text=auto eol=lf
|
||||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1,2 +1,3 @@
|
|||||||
# Global code owner
|
# Global code owner
|
||||||
* @localden
|
* @localden
|
||||||
|
|
||||||
|
|||||||
1
.github/workflows/docs.yml
vendored
1
.github/workflows/docs.yml
vendored
@@ -65,3 +65,4 @@ jobs:
|
|||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v4
|
uses: actions/deploy-pages@v4
|
||||||
|
|
||||||
|
|||||||
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@@ -57,3 +57,4 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
chmod +x .github/workflows/scripts/update-version.sh
|
chmod +x .github/workflows/scripts/update-version.sh
|
||||||
.github/workflows/scripts/update-version.sh ${{ steps.get_tag.outputs.new_version }}
|
.github/workflows/scripts/update-version.sh ${{ steps.get_tag.outputs.new_version }}
|
||||||
|
|
||||||
|
|||||||
@@ -18,4 +18,4 @@ if gh release view "$VERSION" >/dev/null 2>&1; then
|
|||||||
else
|
else
|
||||||
echo "exists=false" >> $GITHUB_OUTPUT
|
echo "exists=false" >> $GITHUB_OUTPUT
|
||||||
echo "Release $VERSION does not exist, proceeding..."
|
echo "Release $VERSION does not exist, proceeding..."
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -43,4 +43,4 @@ gh release create "$VERSION" \
|
|||||||
.genreleases/spec-kit-template-q-sh-"$VERSION".zip \
|
.genreleases/spec-kit-template-q-sh-"$VERSION".zip \
|
||||||
.genreleases/spec-kit-template-q-ps-"$VERSION".zip \
|
.genreleases/spec-kit-template-q-ps-"$VERSION".zip \
|
||||||
--title "Spec Kit Templates - $VERSION_NO_V" \
|
--title "Spec Kit Templates - $VERSION_NO_V" \
|
||||||
--notes-file release_notes.md
|
--notes-file release_notes.md
|
||||||
|
|||||||
@@ -237,3 +237,4 @@ done
|
|||||||
|
|
||||||
echo "Archives in $GENRELEASES_DIR:"
|
echo "Archives in $GENRELEASES_DIR:"
|
||||||
ls -1 "$GENRELEASES_DIR"/spec-kit-template-*-"${NEW_VERSION}".zip
|
ls -1 "$GENRELEASES_DIR"/spec-kit-template-*-"${NEW_VERSION}".zip
|
||||||
|
|
||||||
|
|||||||
@@ -33,4 +33,4 @@ This is the latest set of releases that you can use with your agent of choice. W
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
echo "Generated release notes:"
|
echo "Generated release notes:"
|
||||||
cat release_notes.md
|
cat release_notes.md
|
||||||
|
|||||||
@@ -21,4 +21,4 @@ PATCH=$((PATCH + 1))
|
|||||||
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
NEW_VERSION="v$MAJOR.$MINOR.$PATCH"
|
||||||
|
|
||||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||||
echo "New version will be: $NEW_VERSION"
|
echo "New version will be: $NEW_VERSION"
|
||||||
|
|||||||
2
.github/workflows/scripts/update-version.sh
vendored
2
.github/workflows/scripts/update-version.sh
vendored
@@ -20,4 +20,4 @@ if [ -f "pyproject.toml" ]; then
|
|||||||
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
|
echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)"
|
||||||
else
|
else
|
||||||
echo "Warning: pyproject.toml not found, skipping version update"
|
echo "Warning: pyproject.toml not found, skipping version update"
|
||||||
fi
|
fi
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -42,4 +42,4 @@ env/
|
|||||||
# Spec Kit-specific files
|
# Spec Kit-specific files
|
||||||
.genreleases/
|
.genreleases/
|
||||||
*.zip
|
*.zip
|
||||||
sdd-*/
|
sdd-*/
|
||||||
|
|||||||
51
AGENTS.md
51
AGENTS.md
@@ -42,7 +42,7 @@ Specify supports multiple AI agents by generating agent-specific command files a
|
|||||||
| **Kilo Code** | `.kilocode/rules/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
| **Kilo Code** | `.kilocode/rules/` | Markdown | N/A (IDE-based) | Kilo Code IDE |
|
||||||
| **Auggie CLI** | `.augment/rules/` | Markdown | `auggie` | Auggie CLI |
|
| **Auggie CLI** | `.augment/rules/` | Markdown | `auggie` | Auggie CLI |
|
||||||
| **Roo Code** | `.roo/rules/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
| **Roo Code** | `.roo/rules/` | Markdown | N/A (IDE-based) | Roo Code IDE |
|
||||||
| **CodeBuddy** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy |
|
| **CodeBuddy CLI** | `.codebuddy/commands/` | Markdown | `codebuddy` | CodeBuddy CLI |
|
||||||
| **Amazon Q Developer CLI** | `.amazonq/prompts/` | Markdown | `q` | Amazon Q Developer CLI |
|
| **Amazon Q Developer CLI** | `.amazonq/prompts/` | Markdown | `q` | Amazon Q Developer CLI |
|
||||||
|
|
||||||
### Step-by-Step Integration Guide
|
### Step-by-Step Integration Guide
|
||||||
@@ -239,6 +239,51 @@ AGENT_CONFIG = {
|
|||||||
- Reduces the chance of bugs when adding new agents
|
- Reduces the chance of bugs when adding new agents
|
||||||
- Tool checking "just works" without additional mappings
|
- Tool checking "just works" without additional mappings
|
||||||
|
|
||||||
|
#### 7. Update Devcontainer files (Optional)
|
||||||
|
|
||||||
|
For agents that have VS Code extensions or require CLI installation, update the devcontainer configuration files:
|
||||||
|
|
||||||
|
##### VS Code Extension-based Agents
|
||||||
|
|
||||||
|
For agents available as VS Code extensions, add them to `.devcontainer/devcontainer.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
// ... existing extensions ...
|
||||||
|
// [New Agent Name]
|
||||||
|
"[New Agent Extension ID]"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### CLI-based Agents
|
||||||
|
|
||||||
|
For agents that require CLI tools, add installation commands to `.devcontainer/post-create.sh`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Existing installations...
|
||||||
|
|
||||||
|
echo -e "\n🤖 Installing [New Agent Name] CLI..."
|
||||||
|
# run_command "npm install -g [agent-cli-package]@latest" # Example for node-based CLI
|
||||||
|
# or other installation instructions (must be non-interactive and compatible with Linux Debian "Trixie" or later)...
|
||||||
|
echo "✅ Done"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**Quick Tips:**
|
||||||
|
|
||||||
|
- **Extension-based agents**: Add to the `extensions` array in `devcontainer.json`
|
||||||
|
- **CLI-based agents**: Add installation scripts to `post-create.sh`
|
||||||
|
- **Hybrid agents**: May require both extension and CLI installation
|
||||||
|
- **Test thoroughly**: Ensure installations work in the devcontainer environment
|
||||||
|
|
||||||
## Agent Categories
|
## Agent Categories
|
||||||
|
|
||||||
### CLI-Based Agents
|
### CLI-Based Agents
|
||||||
@@ -249,7 +294,8 @@ Require a command-line tool to be installed:
|
|||||||
- **Cursor**: `cursor-agent` CLI
|
- **Cursor**: `cursor-agent` CLI
|
||||||
- **Qwen Code**: `qwen` CLI
|
- **Qwen Code**: `qwen` CLI
|
||||||
- **opencode**: `opencode` CLI
|
- **opencode**: `opencode` CLI
|
||||||
- **CodeBuddy**: `codebuddy` CLI
|
- **Amazon Q Developer CLI**: `q` CLI
|
||||||
|
- **CodeBuddy CLI**: `codebuddy` CLI
|
||||||
|
|
||||||
### IDE-Based Agents
|
### IDE-Based Agents
|
||||||
Work within integrated development environments:
|
Work within integrated development environments:
|
||||||
@@ -326,3 +372,4 @@ When adding new agents:
|
|||||||
---
|
---
|
||||||
|
|
||||||
*This documentation should be updated whenever new agents are added to maintain accuracy and completeness.*
|
*This documentation should be updated whenever new agents are added to maintain accuracy and completeness.*
|
||||||
|
|
||||||
|
|||||||
24
CHANGELOG.md
24
CHANGELOG.md
@@ -7,6 +7,29 @@ All notable changes to the Specify CLI and templates are documented here.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [0.0.20] - 2025-10-14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **Intelligent Branch Naming**: `create-new-feature` scripts now support `--short-name` parameter for custom branch names
|
||||||
|
- When `--short-name` provided: Uses the custom name directly (cleaned and formatted)
|
||||||
|
- When omitted: Automatically generates meaningful names using stop word filtering and length-based filtering
|
||||||
|
- Filters out common stop words (I, want, to, the, for, etc.)
|
||||||
|
- Removes words shorter than 3 characters (unless they're uppercase acronyms)
|
||||||
|
- Takes 3-4 most meaningful words from the description
|
||||||
|
- **Enforces GitHub's 244-byte branch name limit** with automatic truncation and warnings
|
||||||
|
- Examples:
|
||||||
|
- "I want to create user authentication" → `001-create-user-authentication`
|
||||||
|
- "Implement OAuth2 integration for API" → `001-implement-oauth2-integration-api`
|
||||||
|
- "Fix payment processing bug" → `001-fix-payment-processing`
|
||||||
|
- Very long descriptions are automatically truncated at word boundaries to stay within limits
|
||||||
|
- Designed for AI agents to provide semantic short names while maintaining standalone usability
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Enhanced help documentation for `create-new-feature.sh` and `create-new-feature.ps1` scripts with examples
|
||||||
|
- Branch names now validated against GitHub's 244-byte limit with automatic truncation if needed
|
||||||
|
|
||||||
## [0.0.19] - 2025-10-10
|
## [0.0.19] - 2025-10-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@@ -143,3 +166,4 @@ N/A
|
|||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
N/A
|
N/A
|
||||||
|
|
||||||
|
|||||||
@@ -71,4 +71,4 @@ This Code of Conduct is adapted from the [Contributor Covenant][homepage], versi
|
|||||||
available at [http://contributor-covenant.org/version/1/4][version]
|
available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
[homepage]: http://contributor-covenant.org
|
[homepage]: http://contributor-covenant.org
|
||||||
[version]: http://contributor-covenant.org/version/1/4/
|
[version]: http://contributor-covenant.org/version/1/4/
|
||||||
|
|||||||
@@ -13,6 +13,23 @@ These are one time installations required to be able to test your changes locall
|
|||||||
1. Install [Git](https://git-scm.com/downloads)
|
1. Install [Git](https://git-scm.com/downloads)
|
||||||
1. Have an [AI coding agent available](README.md#-supported-ai-agents)
|
1. Have an [AI coding agent available](README.md#-supported-ai-agents)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><b>💡 Hint if you are using <code>VSCode</code> or <code>GitHub Codespaces</code> as your IDE</b></summary>
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
|
Provided you have [Docker](https://docker.com) installed on your machine, you can leverage [Dev Containers](https://containers.dev) through this [VSCode extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers), to easily set up your development environment, with aforementioned tools already installed and configured, thanks to the `.devcontainer/devcontainer.json` file (located at the root of the project).
|
||||||
|
|
||||||
|
To do so, simply:
|
||||||
|
|
||||||
|
- Checkout the repo
|
||||||
|
- Open it with VSCode
|
||||||
|
- Open the [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette) and select "Dev Containers: Open Folder in Container..."
|
||||||
|
|
||||||
|
On [GitHub Codespaces](https://github.com/features/codespaces) it's even simpler, as it leverages the `.devcontainer/devcontainer.json` automatically upon opening the codespace.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
## Submitting a pull request
|
## Submitting a pull request
|
||||||
|
|
||||||
>[!NOTE]
|
>[!NOTE]
|
||||||
@@ -108,3 +125,4 @@ Please be respectful to maintainers and disclose AI assistance.
|
|||||||
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
|
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
|
||||||
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
|
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
|
||||||
- [GitHub Help](https://help.github.com)
|
- [GitHub Help](https://help.github.com)
|
||||||
|
|
||||||
|
|||||||
1
LICENSE
1
LICENSE
@@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
SOFTWARE.
|
SOFTWARE.
|
||||||
|
|
||||||
|
|||||||
37
README.md
37
README.md
@@ -5,7 +5,7 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<strong>An effort to allow organizations to focus on product scenarios rather than writing undifferentiated code with the help of Spec-Driven Development.</strong>
|
<strong>An open source toolkit that allows you to focus on product scenarios and predictable outcomes instead of vibe coding every piece from scratch.</strong>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@@ -20,16 +20,16 @@
|
|||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
- [🤔 What is Spec-Driven Development?](#-what-is-spec-driven-development)
|
||||||
- [⚡ Get started](#-get-started)
|
- [⚡ Get Started](#-get-started)
|
||||||
- [📽️ Video Overview](#️-video-overview)
|
- [📽️ Video Overview](#️-video-overview)
|
||||||
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
- [🤖 Supported AI Agents](#-supported-ai-agents)
|
||||||
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
- [🔧 Specify CLI Reference](#-specify-cli-reference)
|
||||||
- [📚 Core philosophy](#-core-philosophy)
|
- [📚 Core Philosophy](#-core-philosophy)
|
||||||
- [🌟 Development phases](#-development-phases)
|
- [🌟 Development Phases](#-development-phases)
|
||||||
- [🎯 Experimental goals](#-experimental-goals)
|
- [🎯 Experimental Goals](#-experimental-goals)
|
||||||
- [🔧 Prerequisites](#-prerequisites)
|
- [🔧 Prerequisites](#-prerequisites)
|
||||||
- [📖 Learn more](#-learn-more)
|
- [📖 Learn More](#-learn-more)
|
||||||
- [📋 Detailed process](#-detailed-process)
|
- [📋 Detailed Process](#-detailed-process)
|
||||||
- [🔍 Troubleshooting](#-troubleshooting)
|
- [🔍 Troubleshooting](#-troubleshooting)
|
||||||
- [👥 Maintainers](#-maintainers)
|
- [👥 Maintainers](#-maintainers)
|
||||||
- [💬 Support](#-support)
|
- [💬 Support](#-support)
|
||||||
@@ -40,9 +40,9 @@
|
|||||||
|
|
||||||
Spec-Driven Development **flips the script** on traditional software development. For decades, code has been king — specifications were just scaffolding we built and discarded once the "real work" of coding began. Spec-Driven Development changes this: **specifications become executable**, directly generating working implementations rather than just guiding them.
|
Spec-Driven Development **flips the script** on traditional software development. For decades, code has been king — specifications were just scaffolding we built and discarded once the "real work" of coding began. Spec-Driven Development changes this: **specifications become executable**, directly generating working implementations rather than just guiding them.
|
||||||
|
|
||||||
## ⚡ Get started
|
## ⚡ Get Started
|
||||||
|
|
||||||
### 1. Install Specify
|
### 1. Install Specify CLI
|
||||||
|
|
||||||
Choose your preferred installation method:
|
Choose your preferred installation method:
|
||||||
|
|
||||||
@@ -84,6 +84,8 @@ uvx --from git+https://github.com/github/spec-kit.git specify init <PROJECT_NAME
|
|||||||
|
|
||||||
### 2. Establish project principles
|
### 2. Establish project principles
|
||||||
|
|
||||||
|
Launch your AI assistant in the project directory. The `/speckit.*` commands are available in the assistant.
|
||||||
|
|
||||||
Use the **`/speckit.constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
|
Use the **`/speckit.constitution`** command to create your project's governing principles and development guidelines that will guide all subsequent development.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -143,7 +145,7 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c
|
|||||||
| [Windsurf](https://windsurf.com/) | ✅ | |
|
| [Windsurf](https://windsurf.com/) | ✅ | |
|
||||||
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
|
| [Kilo Code](https://github.com/Kilo-Org/kilocode) | ✅ | |
|
||||||
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
| [Auggie CLI](https://docs.augmentcode.com/cli/overview) | ✅ | |
|
||||||
| [CodeBuddy](https://www.codebuddy.ai/) | ✅ | |
|
| [CodeBuddy CLI](https://www.codebuddy.ai/cli) | ✅ | |
|
||||||
| [Roo Code](https://roocode.com/) | ✅ | |
|
| [Roo Code](https://roocode.com/) | ✅ | |
|
||||||
| [Codex CLI](https://github.com/openai/codex) | ✅ | |
|
| [Codex CLI](https://github.com/openai/codex) | ✅ | |
|
||||||
| [Amazon Q Developer CLI](https://aws.amazon.com/developer/learning/q-developer-cli/) | ⚠️ | Amazon Q Developer CLI [does not support](https://github.com/aws/amazon-q-developer-cli/issues/3064) custom arguments for slash commands. |
|
| [Amazon Q Developer CLI](https://aws.amazon.com/developer/learning/q-developer-cli/) | ⚠️ | Amazon Q Developer CLI [does not support](https://github.com/aws/amazon-q-developer-cli/issues/3064) custom arguments for slash commands. |
|
||||||
@@ -247,7 +249,7 @@ Additional commands for enhanced quality and validation:
|
|||||||
|------------------|------------------------------------------------------------------------------------------------|
|
|------------------|------------------------------------------------------------------------------------------------|
|
||||||
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>**Must be set in the context of the agent you're working with prior to using `/speckit.plan` or follow-up commands. |
|
| `SPECIFY_FEATURE` | Override feature detection for non-Git repositories. Set to the feature directory name (e.g., `001-photo-albums`) to work on a specific feature when not using Git branches.<br/>**Must be set in the context of the agent you're working with prior to using `/speckit.plan` or follow-up commands. |
|
||||||
|
|
||||||
## 📚 Core philosophy
|
## 📚 Core Philosophy
|
||||||
|
|
||||||
Spec-Driven Development is a structured process that emphasizes:
|
Spec-Driven Development is a structured process that emphasizes:
|
||||||
|
|
||||||
@@ -256,7 +258,7 @@ Spec-Driven Development is a structured process that emphasizes:
|
|||||||
- **Multi-step refinement** rather than one-shot code generation from prompts
|
- **Multi-step refinement** rather than one-shot code generation from prompts
|
||||||
- **Heavy reliance** on advanced AI model capabilities for specification interpretation
|
- **Heavy reliance** on advanced AI model capabilities for specification interpretation
|
||||||
|
|
||||||
## 🌟 Development phases
|
## 🌟 Development Phases
|
||||||
|
|
||||||
| Phase | Focus | Key Activities |
|
| Phase | Focus | Key Activities |
|
||||||
|-------|-------|----------------|
|
|-------|-------|----------------|
|
||||||
@@ -264,7 +266,7 @@ Spec-Driven Development is a structured process that emphasizes:
|
|||||||
| **Creative Exploration** | Parallel implementations | <ul><li>Explore diverse solutions</li><li>Support multiple technology stacks & architectures</li><li>Experiment with UX patterns</li></ul> |
|
| **Creative Exploration** | Parallel implementations | <ul><li>Explore diverse solutions</li><li>Support multiple technology stacks & architectures</li><li>Experiment with UX patterns</li></ul> |
|
||||||
| **Iterative Enhancement** ("Brownfield") | Brownfield modernization | <ul><li>Add features iteratively</li><li>Modernize legacy systems</li><li>Adapt processes</li></ul> |
|
| **Iterative Enhancement** ("Brownfield") | Brownfield modernization | <ul><li>Add features iteratively</li><li>Modernize legacy systems</li><li>Adapt processes</li></ul> |
|
||||||
|
|
||||||
## 🎯 Experimental goals
|
## 🎯 Experimental Goals
|
||||||
|
|
||||||
Our research and experimentation focus on:
|
Our research and experimentation focus on:
|
||||||
|
|
||||||
@@ -292,22 +294,22 @@ Our research and experimentation focus on:
|
|||||||
|
|
||||||
## 🔧 Prerequisites
|
## 🔧 Prerequisites
|
||||||
|
|
||||||
- **Linux/macOS** (or WSL2 on Windows)
|
- **Linux/macOS/Windows**
|
||||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Gemini CLI](https://github.com/google-gemini/gemini-cli), [Cursor](https://cursor.sh/), [Qwen CLI](https://github.com/QwenLM/qwen-code), [opencode](https://opencode.ai/), [Codex CLI](https://github.com/openai/codex), [Windsurf](https://windsurf.com/), or [Amazon Q Developer CLI](https://aws.amazon.com/developer/learning/q-developer-cli/)
|
- [Supported](#-supported-ai-agents) AI coding agent.
|
||||||
- [uv](https://docs.astral.sh/uv/) for package management
|
- [uv](https://docs.astral.sh/uv/) for package management
|
||||||
- [Python 3.11+](https://www.python.org/downloads/)
|
- [Python 3.11+](https://www.python.org/downloads/)
|
||||||
- [Git](https://git-scm.com/downloads)
|
- [Git](https://git-scm.com/downloads)
|
||||||
|
|
||||||
If you encounter issues with an agent, please open an issue so we can refine the integration.
|
If you encounter issues with an agent, please open an issue so we can refine the integration.
|
||||||
|
|
||||||
## 📖 Learn more
|
## 📖 Learn More
|
||||||
|
|
||||||
- **[Complete Spec-Driven Development Methodology](./spec-driven.md)** - Deep dive into the full process
|
- **[Complete Spec-Driven Development Methodology](./spec-driven.md)** - Deep dive into the full process
|
||||||
- **[Detailed Walkthrough](#-detailed-process)** - Step-by-step implementation guide
|
- **[Detailed Walkthrough](#-detailed-process)** - Step-by-step implementation guide
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 📋 Detailed process
|
## 📋 Detailed Process
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Click to expand the detailed step-by-step walkthrough</summary>
|
<summary>Click to expand the detailed step-by-step walkthrough</summary>
|
||||||
@@ -623,3 +625,4 @@ This project is heavily influenced by and based on the work and research of [Joh
|
|||||||
## 📄 License
|
## 📄 License
|
||||||
|
|
||||||
This project is licensed under the terms of the MIT open source license. Please refer to the [LICENSE](./LICENSE) file for the full terms.
|
This project is licensed under the terms of the MIT open source license. Please refer to the [LICENSE](./LICENSE) file for the full terms.
|
||||||
|
|
||||||
|
|||||||
@@ -28,4 +28,4 @@ This information will help us triage your report more quickly.
|
|||||||
|
|
||||||
## Policy
|
## Policy
|
||||||
|
|
||||||
See [GitHub's Safe Harbor Policy](https://docs.github.com/en/site-policy/security-policies/github-bug-bounty-program-legal-safe-harbor#1-safe-harbor-terms)
|
See [GitHub's Safe Harbor Policy](https://docs.github.com/en/site-policy/security-policies/github-bug-bounty-program-legal-safe-harbor#1-safe-harbor-terms)
|
||||||
|
|||||||
@@ -17,3 +17,4 @@ For help or questions about using this project, please:
|
|||||||
## GitHub Support Policy
|
## GitHub Support Policy
|
||||||
|
|
||||||
Support for this project is limited to the resources listed above.
|
Support for this project is limited to the resources listed above.
|
||||||
|
|
||||||
|
|||||||
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@@ -6,3 +6,4 @@ obj/
|
|||||||
# Temporary files
|
# Temporary files
|
||||||
*.tmp
|
*.tmp
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
|
|||||||
@@ -31,3 +31,4 @@ To build the documentation locally:
|
|||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
Documentation is automatically built and deployed to GitHub Pages when changes are pushed to the `main` branch. The workflow is defined in `.github/workflows/docs.yml`.
|
Documentation is automatically built and deployed to GitHub Pages when changes are pushed to the `main` branch. The workflow is defined in `.github/workflows/docs.yml`.
|
||||||
|
|
||||||
|
|||||||
@@ -68,3 +68,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -60,3 +60,4 @@ Please see our [Contributing Guide](https://github.com/github/spec-kit/blob/main
|
|||||||
## Support
|
## Support
|
||||||
|
|
||||||
For support, please check our [Support Guide](https://github.com/github/spec-kit/blob/main/SUPPORT.md) or open an issue on GitHub.
|
For support, please check our [Support Guide](https://github.com/github/spec-kit/blob/main/SUPPORT.md) or open an issue on GitHub.
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
|
- **Linux/macOS** (or Windows; PowerShell scripts now supported without WSL)
|
||||||
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
- AI coding agent: [Claude Code](https://www.anthropic.com/claude-code), [GitHub Copilot](https://code.visualstudio.com/), [Codebuddy CLI](https://www.codebuddy.ai/cli) or [Gemini CLI](https://github.com/google-gemini/gemini-cli)
|
||||||
- [uv](https://docs.astral.sh/uv/) for package management
|
- [uv](https://docs.astral.sh/uv/) for package management
|
||||||
- [Python 3.11+](https://www.python.org/downloads/)
|
- [Python 3.11+](https://www.python.org/downloads/)
|
||||||
- [Git](https://git-scm.com/downloads)
|
- [Git](https://git-scm.com/downloads)
|
||||||
@@ -34,6 +34,7 @@ You can proactively specify your AI agent during initialization:
|
|||||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai claude
|
||||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai gemini
|
||||||
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai copilot
|
||||||
|
uvx --from git+https://github.com/github/spec-kit.git specify init <project_name> --ai codebuddy
|
||||||
```
|
```
|
||||||
|
|
||||||
### Specify Script Type (Shell vs PowerShell)
|
### Specify Script Type (Shell vs PowerShell)
|
||||||
@@ -86,3 +87,4 @@ git config --global credential.helper manager
|
|||||||
echo "Cleaning up..."
|
echo "Cleaning up..."
|
||||||
rm gcm-linux_amd64.2.6.1.deb
|
rm gcm-linux_amd64.2.6.1.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -166,3 +166,4 @@ rm -rf .venv dist build *.egg-info
|
|||||||
- Open a PR when satisfied
|
- Open a PR when satisfied
|
||||||
- (Optional) Tag a release once changes land in `main`
|
- (Optional) Tag a release once changes land in `main`
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -120,3 +120,4 @@ implement specs/002-create-taskify/plan.md
|
|||||||
- Read the complete methodology for in-depth guidance
|
- Read the complete methodology for in-depth guidance
|
||||||
- Check out more examples in the repository
|
- Check out more examples in the repository
|
||||||
- Explore the source code on GitHub
|
- Explore the source code on GitHub
|
||||||
|
|
||||||
|
|||||||
@@ -15,3 +15,4 @@
|
|||||||
items:
|
items:
|
||||||
- name: Local Development
|
- name: Local Development
|
||||||
href: local-development.md
|
href: local-development.md
|
||||||
|
|
||||||
|
|||||||
@@ -47,4 +47,4 @@
|
|||||||
<!-- Example: All PRs/reviews must verify compliance; Complexity must be justified; Use [GUIDANCE_FILE] for runtime development guidance -->
|
<!-- Example: All PRs/reviews must verify compliance; Complexity must be justified; Use [GUIDANCE_FILE] for runtime development guidance -->
|
||||||
|
|
||||||
**Version**: [CONSTITUTION_VERSION] | **Ratified**: [RATIFICATION_DATE] | **Last Amended**: [LAST_AMENDED_DATE]
|
**Version**: [CONSTITUTION_VERSION] | **Ratified**: [RATIFICATION_DATE] | **Last Amended**: [LAST_AMENDED_DATE]
|
||||||
<!-- Example: Version: 2.1.1 | Ratified: 2025-06-13 | Last Amended: 2025-07-16 -->
|
<!-- Example: Version: 2.1.1 | Ratified: 2025-06-13 | Last Amended: 2025-07-16 -->
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "specify-cli"
|
name = "specify-cli"
|
||||||
version = "0.0.19"
|
version = "0.0.20"
|
||||||
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)."
|
||||||
requires-python = ">=3.11"
|
requires-python = ">=3.11"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
@@ -21,3 +21,4 @@ build-backend = "hatchling.build"
|
|||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
packages = ["src/specify_cli"]
|
packages = ["src/specify_cli"]
|
||||||
|
|
||||||
|
|||||||
@@ -163,4 +163,4 @@ else
|
|||||||
if $INCLUDE_TASKS; then
|
if $INCLUDE_TASKS; then
|
||||||
check_file "$TASKS" "tasks.md"
|
check_file "$TASKS" "tasks.md"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -19,21 +19,21 @@ get_current_branch() {
|
|||||||
echo "$SPECIFY_FEATURE"
|
echo "$SPECIFY_FEATURE"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Then check git if available
|
# Then check git if available
|
||||||
if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
|
if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
|
||||||
git rev-parse --abbrev-ref HEAD
|
git rev-parse --abbrev-ref HEAD
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# For non-git repos, try to find the latest feature directory
|
# For non-git repos, try to find the latest feature directory
|
||||||
local repo_root=$(get_repo_root)
|
local repo_root=$(get_repo_root)
|
||||||
local specs_dir="$repo_root/specs"
|
local specs_dir="$repo_root/specs"
|
||||||
|
|
||||||
if [[ -d "$specs_dir" ]]; then
|
if [[ -d "$specs_dir" ]]; then
|
||||||
local latest_feature=""
|
local latest_feature=""
|
||||||
local highest=0
|
local highest=0
|
||||||
|
|
||||||
for dir in "$specs_dir"/*; do
|
for dir in "$specs_dir"/*; do
|
||||||
if [[ -d "$dir" ]]; then
|
if [[ -d "$dir" ]]; then
|
||||||
local dirname=$(basename "$dir")
|
local dirname=$(basename "$dir")
|
||||||
@@ -47,13 +47,13 @@ get_current_branch() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ -n "$latest_feature" ]]; then
|
if [[ -n "$latest_feature" ]]; then
|
||||||
echo "$latest_feature"
|
echo "$latest_feature"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "main" # Final fallback
|
echo "main" # Final fallback
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,35 +65,77 @@ has_git() {
|
|||||||
check_feature_branch() {
|
check_feature_branch() {
|
||||||
local branch="$1"
|
local branch="$1"
|
||||||
local has_git_repo="$2"
|
local has_git_repo="$2"
|
||||||
|
|
||||||
# For non-git repos, we can't enforce branch naming but still provide output
|
# For non-git repos, we can't enforce branch naming but still provide output
|
||||||
if [[ "$has_git_repo" != "true" ]]; then
|
if [[ "$has_git_repo" != "true" ]]; then
|
||||||
echo "[specify] Warning: Git repository not detected; skipped branch validation" >&2
|
echo "[specify] Warning: Git repository not detected; skipped branch validation" >&2
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
|
||||||
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
|
||||||
echo "Feature branches should be named like: 001-feature-name" >&2
|
echo "Feature branches should be named like: 001-feature-name" >&2
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
get_feature_dir() { echo "$1/specs/$2"; }
|
get_feature_dir() { echo "$1/specs/$2"; }
|
||||||
|
|
||||||
|
# Find feature directory by numeric prefix instead of exact branch match
|
||||||
|
# This allows multiple branches to work on the same spec (e.g., 004-fix-bug, 004-add-feature)
|
||||||
|
find_feature_dir_by_prefix() {
|
||||||
|
local repo_root="$1"
|
||||||
|
local branch_name="$2"
|
||||||
|
local specs_dir="$repo_root/specs"
|
||||||
|
|
||||||
|
# Extract numeric prefix from branch (e.g., "004" from "004-whatever")
|
||||||
|
if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then
|
||||||
|
# If branch doesn't have numeric prefix, fall back to exact match
|
||||||
|
echo "$specs_dir/$branch_name"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local prefix="${BASH_REMATCH[1]}"
|
||||||
|
|
||||||
|
# Search for directories in specs/ that start with this prefix
|
||||||
|
local matches=()
|
||||||
|
if [[ -d "$specs_dir" ]]; then
|
||||||
|
for dir in "$specs_dir"/"$prefix"-*; do
|
||||||
|
if [[ -d "$dir" ]]; then
|
||||||
|
matches+=("$(basename "$dir")")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Handle results
|
||||||
|
if [[ ${#matches[@]} -eq 0 ]]; then
|
||||||
|
# No match found - return the branch name path (will fail later with clear error)
|
||||||
|
echo "$specs_dir/$branch_name"
|
||||||
|
elif [[ ${#matches[@]} -eq 1 ]]; then
|
||||||
|
# Exactly one match - perfect!
|
||||||
|
echo "$specs_dir/${matches[0]}"
|
||||||
|
else
|
||||||
|
# Multiple matches - this shouldn't happen with proper naming convention
|
||||||
|
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
|
||||||
|
echo "Please ensure only one spec directory exists per numeric prefix." >&2
|
||||||
|
echo "$specs_dir/$branch_name" # Return something to avoid breaking the script
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
get_feature_paths() {
|
get_feature_paths() {
|
||||||
local repo_root=$(get_repo_root)
|
local repo_root=$(get_repo_root)
|
||||||
local current_branch=$(get_current_branch)
|
local current_branch=$(get_current_branch)
|
||||||
local has_git_repo="false"
|
local has_git_repo="false"
|
||||||
|
|
||||||
if has_git; then
|
if has_git; then
|
||||||
has_git_repo="true"
|
has_git_repo="true"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local feature_dir=$(get_feature_dir "$repo_root" "$current_branch")
|
# Use prefix-based lookup to support multiple branches per spec
|
||||||
|
local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch")
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
REPO_ROOT='$repo_root'
|
REPO_ROOT='$repo_root'
|
||||||
CURRENT_BRANCH='$current_branch'
|
CURRENT_BRANCH='$current_branch'
|
||||||
@@ -111,3 +153,4 @@ EOF
|
|||||||
|
|
||||||
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
check_file() { [[ -f "$1" ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||||
check_dir() { [[ -d "$1" && -n $(ls -A "$1" 2>/dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
check_dir() { [[ -d "$1" && -n $(ls -A "$1" 2>/dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; }
|
||||||
|
|
||||||
|
|||||||
@@ -3,18 +3,52 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
JSON_MODE=false
|
JSON_MODE=false
|
||||||
|
SHORT_NAME=""
|
||||||
ARGS=()
|
ARGS=()
|
||||||
for arg in "$@"; do
|
i=1
|
||||||
|
while [ $i -le $# ]; do
|
||||||
|
arg="${!i}"
|
||||||
case "$arg" in
|
case "$arg" in
|
||||||
--json) JSON_MODE=true ;;
|
--json)
|
||||||
--help|-h) echo "Usage: $0 [--json] <feature_description>"; exit 0 ;;
|
JSON_MODE=true
|
||||||
*) ARGS+=("$arg") ;;
|
;;
|
||||||
|
--short-name)
|
||||||
|
if [ $((i + 1)) -gt $# ]; then
|
||||||
|
echo 'Error: --short-name requires a value' >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
i=$((i + 1))
|
||||||
|
next_arg="${!i}"
|
||||||
|
# Check if the next argument is another option (starts with --)
|
||||||
|
if [[ "$next_arg" == --* ]]; then
|
||||||
|
echo 'Error: --short-name requires a value' >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
SHORT_NAME="$next_arg"
|
||||||
|
;;
|
||||||
|
--help|-h)
|
||||||
|
echo "Usage: $0 [--json] [--short-name <name>] <feature_description>"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " --json Output in JSON format"
|
||||||
|
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||||
|
echo " --help, -h Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " $0 'Add user authentication system' --short-name 'user-auth'"
|
||||||
|
echo " $0 'Implement OAuth2 integration for API'"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
ARGS+=("$arg")
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
i=$((i + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
FEATURE_DESCRIPTION="${ARGS[*]}"
|
FEATURE_DESCRIPTION="${ARGS[*]}"
|
||||||
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
if [ -z "$FEATURE_DESCRIPTION" ]; then
|
||||||
echo "Usage: $0 [--json] <feature_description>" >&2
|
echo "Usage: $0 [--json] [--short-name <name>] <feature_description>" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -67,9 +101,84 @@ fi
|
|||||||
NEXT=$((HIGHEST + 1))
|
NEXT=$((HIGHEST + 1))
|
||||||
FEATURE_NUM=$(printf "%03d" "$NEXT")
|
FEATURE_NUM=$(printf "%03d" "$NEXT")
|
||||||
|
|
||||||
BRANCH_NAME=$(echo "$FEATURE_DESCRIPTION" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//')
|
# Function to generate branch name with stop word filtering and length filtering
|
||||||
WORDS=$(echo "$BRANCH_NAME" | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//')
|
generate_branch_name() {
|
||||||
BRANCH_NAME="${FEATURE_NUM}-${WORDS}"
|
local description="$1"
|
||||||
|
|
||||||
|
# Common stop words to filter out
|
||||||
|
local stop_words="^(i|a|an|the|to|for|of|in|on|at|by|with|from|is|are|was|were|be|been|being|have|has|had|do|does|did|will|would|should|could|can|may|might|must|shall|this|that|these|those|my|your|our|their|want|need|add|get|set)$"
|
||||||
|
|
||||||
|
# Convert to lowercase and split into words
|
||||||
|
local clean_name=$(echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/ /g')
|
||||||
|
|
||||||
|
# Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
|
||||||
|
local meaningful_words=()
|
||||||
|
for word in $clean_name; do
|
||||||
|
# Skip empty words
|
||||||
|
[ -z "$word" ] && continue
|
||||||
|
|
||||||
|
# Keep words that are NOT stop words AND (length >= 3 OR are potential acronyms)
|
||||||
|
if ! echo "$word" | grep -qiE "$stop_words"; then
|
||||||
|
if [ ${#word} -ge 3 ]; then
|
||||||
|
meaningful_words+=("$word")
|
||||||
|
elif echo "$description" | grep -q "\b${word^^}\b"; then
|
||||||
|
# Keep short words if they appear as uppercase in original (likely acronyms)
|
||||||
|
meaningful_words+=("$word")
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# If we have meaningful words, use first 3-4 of them
|
||||||
|
if [ ${#meaningful_words[@]} -gt 0 ]; then
|
||||||
|
local max_words=3
|
||||||
|
if [ ${#meaningful_words[@]} -eq 4 ]; then max_words=4; fi
|
||||||
|
|
||||||
|
local result=""
|
||||||
|
local count=0
|
||||||
|
for word in "${meaningful_words[@]}"; do
|
||||||
|
if [ $count -ge $max_words ]; then break; fi
|
||||||
|
if [ -n "$result" ]; then result="$result-"; fi
|
||||||
|
result="$result$word"
|
||||||
|
count=$((count + 1))
|
||||||
|
done
|
||||||
|
echo "$result"
|
||||||
|
else
|
||||||
|
# Fallback to original logic if no meaningful words found
|
||||||
|
echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//' | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate branch name
|
||||||
|
if [ -n "$SHORT_NAME" ]; then
|
||||||
|
# Use provided short name, just clean it up
|
||||||
|
BRANCH_SUFFIX=$(echo "$SHORT_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//')
|
||||||
|
else
|
||||||
|
# Generate from description with smart filtering
|
||||||
|
BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
|
||||||
|
fi
|
||||||
|
|
||||||
|
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
|
||||||
|
|
||||||
|
# GitHub enforces a 244-byte limit on branch names
|
||||||
|
# Validate and truncate if necessary
|
||||||
|
MAX_BRANCH_LENGTH=244
|
||||||
|
if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
|
||||||
|
# Calculate how much we need to trim from suffix
|
||||||
|
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||||
|
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
|
||||||
|
|
||||||
|
# Truncate suffix at word boundary if possible
|
||||||
|
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
|
||||||
|
# Remove trailing hyphen if truncation created one
|
||||||
|
TRUNCATED_SUFFIX=$(echo "$TRUNCATED_SUFFIX" | sed 's/-$//')
|
||||||
|
|
||||||
|
ORIGINAL_BRANCH_NAME="$BRANCH_NAME"
|
||||||
|
BRANCH_NAME="${FEATURE_NUM}-${TRUNCATED_SUFFIX}"
|
||||||
|
|
||||||
|
>&2 echo "[specify] Warning: Branch name exceeded GitHub's 244-byte limit"
|
||||||
|
>&2 echo "[specify] Original: $ORIGINAL_BRANCH_NAME (${#ORIGINAL_BRANCH_NAME} bytes)"
|
||||||
|
>&2 echo "[specify] Truncated to: $BRANCH_NAME (${#BRANCH_NAME} bytes)"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$HAS_GIT" = true ]; then
|
if [ "$HAS_GIT" = true ]; then
|
||||||
git checkout -b "$BRANCH_NAME"
|
git checkout -b "$BRANCH_NAME"
|
||||||
|
|||||||
@@ -58,3 +58,4 @@ else
|
|||||||
echo "BRANCH: $CURRENT_BRANCH"
|
echo "BRANCH: $CURRENT_BRANCH"
|
||||||
echo "HAS_GIT: $HAS_GIT"
|
echo "HAS_GIT: $HAS_GIT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -250,7 +250,7 @@ get_commands_for_language() {
|
|||||||
echo "cargo test && cargo clippy"
|
echo "cargo test && cargo clippy"
|
||||||
;;
|
;;
|
||||||
*"JavaScript"*|*"TypeScript"*)
|
*"JavaScript"*|*"TypeScript"*)
|
||||||
echo "npm test && npm run lint"
|
echo "npm test \\&\\& npm run lint"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "# Add commands for $lang"
|
echo "# Add commands for $lang"
|
||||||
@@ -583,7 +583,7 @@ update_specific_agent() {
|
|||||||
update_agent_file "$ROO_FILE" "Roo Code"
|
update_agent_file "$ROO_FILE" "Roo Code"
|
||||||
;;
|
;;
|
||||||
codebuddy)
|
codebuddy)
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy"
|
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||||
;;
|
;;
|
||||||
q)
|
q)
|
||||||
update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
|
update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
|
||||||
@@ -651,7 +651,7 @@ update_all_existing_agents() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -f "$CODEBUDDY_FILE" ]]; then
|
if [[ -f "$CODEBUDDY_FILE" ]]; then
|
||||||
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy"
|
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
|
||||||
found_agent=true
|
found_agent=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -736,3 +736,4 @@ main() {
|
|||||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||||
main "$@"
|
main "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -145,4 +145,4 @@ if ($Json) {
|
|||||||
if ($IncludeTasks) {
|
if ($IncludeTasks) {
|
||||||
Test-FileExists -Path $paths.TASKS -Description 'tasks.md' | Out-Null
|
Test-FileExists -Path $paths.TASKS -Description 'tasks.md' | Out-Null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -134,3 +134,4 @@ function Test-DirHasFiles {
|
|||||||
return $false
|
return $false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,20 +3,39 @@
|
|||||||
[CmdletBinding()]
|
[CmdletBinding()]
|
||||||
param(
|
param(
|
||||||
[switch]$Json,
|
[switch]$Json,
|
||||||
|
[string]$ShortName,
|
||||||
|
[switch]$Help,
|
||||||
[Parameter(ValueFromRemainingArguments = $true)]
|
[Parameter(ValueFromRemainingArguments = $true)]
|
||||||
[string[]]$FeatureDescription
|
[string[]]$FeatureDescription
|
||||||
)
|
)
|
||||||
$ErrorActionPreference = 'Stop'
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
# Show help if requested
|
||||||
|
if ($Help) {
|
||||||
|
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] <feature description>"
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "Options:"
|
||||||
|
Write-Host " -Json Output in JSON format"
|
||||||
|
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||||
|
Write-Host " -Help Show this help message"
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "Examples:"
|
||||||
|
Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'"
|
||||||
|
Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if feature description provided
|
||||||
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||||
Write-Error "Usage: ./create-new-feature.ps1 [-Json] <feature description>"
|
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] <feature description>"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
||||||
|
|
||||||
# Resolve repository root. Prefer git information when available, but fall back
|
# Resolve repository root. Prefer git information when available, but fall back
|
||||||
# to searching for repository markers so the workflow still functions in repositories that
|
# to searching for repository markers so the workflow still functions in repositories that
|
||||||
# were initialised with --no-git.
|
# were initialized with --no-git.
|
||||||
function Find-RepositoryRoot {
|
function Find-RepositoryRoot {
|
||||||
param(
|
param(
|
||||||
[string]$StartDir,
|
[string]$StartDir,
|
||||||
@@ -72,9 +91,82 @@ if (Test-Path $specsDir) {
|
|||||||
$next = $highest + 1
|
$next = $highest + 1
|
||||||
$featureNum = ('{0:000}' -f $next)
|
$featureNum = ('{0:000}' -f $next)
|
||||||
|
|
||||||
$branchName = $featureDesc.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
|
# Function to generate branch name with stop word filtering and length filtering
|
||||||
$words = ($branchName -split '-') | Where-Object { $_ } | Select-Object -First 3
|
function Get-BranchName {
|
||||||
$branchName = "$featureNum-$([string]::Join('-', $words))"
|
param([string]$Description)
|
||||||
|
|
||||||
|
# Common stop words to filter out
|
||||||
|
$stopWords = @(
|
||||||
|
'i', 'a', 'an', 'the', 'to', 'for', 'of', 'in', 'on', 'at', 'by', 'with', 'from',
|
||||||
|
'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had',
|
||||||
|
'do', 'does', 'did', 'will', 'would', 'should', 'could', 'can', 'may', 'might', 'must', 'shall',
|
||||||
|
'this', 'that', 'these', 'those', 'my', 'your', 'our', 'their',
|
||||||
|
'want', 'need', 'add', 'get', 'set'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert to lowercase and extract words (alphanumeric only)
|
||||||
|
$cleanName = $Description.ToLower() -replace '[^a-z0-9\s]', ' '
|
||||||
|
$words = $cleanName -split '\s+' | Where-Object { $_ }
|
||||||
|
|
||||||
|
# Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
|
||||||
|
$meaningfulWords = @()
|
||||||
|
foreach ($word in $words) {
|
||||||
|
# Skip stop words
|
||||||
|
if ($stopWords -contains $word) { continue }
|
||||||
|
|
||||||
|
# Keep words that are length >= 3 OR appear as uppercase in original (likely acronyms)
|
||||||
|
if ($word.Length -ge 3) {
|
||||||
|
$meaningfulWords += $word
|
||||||
|
} elseif ($Description -match "\b$($word.ToUpper())\b") {
|
||||||
|
# Keep short words if they appear as uppercase in original (likely acronyms)
|
||||||
|
$meaningfulWords += $word
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# If we have meaningful words, use first 3-4 of them
|
||||||
|
if ($meaningfulWords.Count -gt 0) {
|
||||||
|
$maxWords = if ($meaningfulWords.Count -eq 4) { 4 } else { 3 }
|
||||||
|
$result = ($meaningfulWords | Select-Object -First $maxWords) -join '-'
|
||||||
|
return $result
|
||||||
|
} else {
|
||||||
|
# Fallback to original logic if no meaningful words found
|
||||||
|
$result = $Description.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
|
||||||
|
$fallbackWords = ($result -split '-') | Where-Object { $_ } | Select-Object -First 3
|
||||||
|
return [string]::Join('-', $fallbackWords)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate branch name
|
||||||
|
if ($ShortName) {
|
||||||
|
# Use provided short name, just clean it up
|
||||||
|
$branchSuffix = $ShortName.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
|
||||||
|
} else {
|
||||||
|
# Generate from description with smart filtering
|
||||||
|
$branchSuffix = Get-BranchName -Description $featureDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
$branchName = "$featureNum-$branchSuffix"
|
||||||
|
|
||||||
|
# GitHub enforces a 244-byte limit on branch names
|
||||||
|
# Validate and truncate if necessary
|
||||||
|
$maxBranchLength = 244
|
||||||
|
if ($branchName.Length -gt $maxBranchLength) {
|
||||||
|
# Calculate how much we need to trim from suffix
|
||||||
|
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||||
|
$maxSuffixLength = $maxBranchLength - 4
|
||||||
|
|
||||||
|
# Truncate suffix
|
||||||
|
$truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength))
|
||||||
|
# Remove trailing hyphen if truncation created one
|
||||||
|
$truncatedSuffix = $truncatedSuffix -replace '-$', ''
|
||||||
|
|
||||||
|
$originalBranchName = $branchName
|
||||||
|
$branchName = "$featureNum-$truncatedSuffix"
|
||||||
|
|
||||||
|
Write-Warning "[specify] Branch name exceeded GitHub's 244-byte limit"
|
||||||
|
Write-Warning "[specify] Original: $originalBranchName ($($originalBranchName.Length) bytes)"
|
||||||
|
Write-Warning "[specify] Truncated to: $branchName ($($branchName.Length) bytes)"
|
||||||
|
}
|
||||||
|
|
||||||
if ($hasGit) {
|
if ($hasGit) {
|
||||||
try {
|
try {
|
||||||
@@ -115,3 +207,4 @@ if ($Json) {
|
|||||||
Write-Output "HAS_GIT: $hasGit"
|
Write-Output "HAS_GIT: $hasGit"
|
||||||
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
|
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -59,3 +59,4 @@ if ($Json) {
|
|||||||
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||||
Write-Output "HAS_GIT: $($paths.HAS_GIT)"
|
Write-Output "HAS_GIT: $($paths.HAS_GIT)"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -378,7 +378,7 @@ function Update-SpecificAgent {
|
|||||||
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
||||||
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
||||||
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
||||||
'codebuddy' { Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy' }
|
'codebuddy' { Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI' }
|
||||||
'q' { Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI' }
|
'q' { Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI' }
|
||||||
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|q'; return $false }
|
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|q'; return $false }
|
||||||
}
|
}
|
||||||
@@ -397,7 +397,7 @@ function Update-AllExistingAgents {
|
|||||||
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy')) { $ok = $false }; $found = $true }
|
if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI')) { $ok = $false }; $found = $true }
|
||||||
if (Test-Path $Q_FILE) { if (-not (Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI')) { $ok = $false }; $found = $true }
|
if (Test-Path $Q_FILE) { if (-not (Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI')) { $ok = $false }; $found = $true }
|
||||||
if (-not $found) {
|
if (-not $found) {
|
||||||
Write-Info 'No existing agent files found, creating default Claude file...'
|
Write-Info 'No existing agent files found, creating default Claude file...'
|
||||||
@@ -434,3 +434,4 @@ function Main {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Main
|
Main
|
||||||
|
|
||||||
|
|||||||
@@ -401,3 +401,4 @@ By embedding these principles into the specification and planning process, SDD e
|
|||||||
This isn't about replacing developers or automating creativity. It's about amplifying human capability by automating mechanical translation. It's about creating a tight feedback loop where specifications, research, and code evolve together, each iteration bringing deeper understanding and better alignment between intent and implementation.
|
This isn't about replacing developers or automating creativity. It's about amplifying human capability by automating mechanical translation. It's about creating a tight feedback loop where specifications, research, and code evolve together, each iteration bringing deeper understanding and better alignment between intent and implementation.
|
||||||
|
|
||||||
Software development needs better tools for maintaining alignment between intent and implementation. SDD provides the methodology for achieving this alignment through executable specifications that generate code rather than merely guiding it.
|
Software development needs better tools for maintaining alignment between intent and implementation. SDD provides the methodology for achieving this alignment through executable specifications that generate code rather than merely guiding it.
|
||||||
|
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ AGENT_CONFIG = {
|
|||||||
"codebuddy": {
|
"codebuddy": {
|
||||||
"name": "CodeBuddy",
|
"name": "CodeBuddy",
|
||||||
"folder": ".codebuddy/",
|
"folder": ".codebuddy/",
|
||||||
"install_url": "https://www.codebuddy.ai",
|
"install_url": "https://www.codebuddy.ai/cli",
|
||||||
"requires_cli": True,
|
"requires_cli": True,
|
||||||
},
|
},
|
||||||
"roo": {
|
"roo": {
|
||||||
@@ -485,6 +485,73 @@ def init_git_repo(project_path: Path, quiet: bool = False) -> Tuple[bool, Option
|
|||||||
finally:
|
finally:
|
||||||
os.chdir(original_cwd)
|
os.chdir(original_cwd)
|
||||||
|
|
||||||
|
def handle_vscode_settings(sub_item, dest_file, rel_path, verbose=False, tracker=None) -> None:
|
||||||
|
"""Handle merging or copying of .vscode/settings.json files."""
|
||||||
|
def log(message, color="green"):
|
||||||
|
if verbose and not tracker:
|
||||||
|
console.print(f"[{color}]{message}[/] {rel_path}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(sub_item, 'r', encoding='utf-8') as f:
|
||||||
|
new_settings = json.load(f)
|
||||||
|
|
||||||
|
if dest_file.exists():
|
||||||
|
merged = merge_json_files(dest_file, new_settings, verbose=verbose and not tracker)
|
||||||
|
with open(dest_file, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(merged, f, indent=4)
|
||||||
|
f.write('\n')
|
||||||
|
log("Merged:", "green")
|
||||||
|
else:
|
||||||
|
shutil.copy2(sub_item, dest_file)
|
||||||
|
log("Copied (no existing settings.json):", "blue")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log(f"Warning: Could not merge, copying instead: {e}", "yellow")
|
||||||
|
shutil.copy2(sub_item, dest_file)
|
||||||
|
|
||||||
|
def merge_json_files(existing_path: Path, new_content: dict, verbose: bool = False) -> dict:
|
||||||
|
"""Merge new JSON content into existing JSON file.
|
||||||
|
|
||||||
|
Performs a deep merge where:
|
||||||
|
- New keys are added
|
||||||
|
- Existing keys are preserved unless overwritten by new content
|
||||||
|
- Nested dictionaries are merged recursively
|
||||||
|
- Lists and other values are replaced (not merged)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
existing_path: Path to existing JSON file
|
||||||
|
new_content: New JSON content to merge in
|
||||||
|
verbose: Whether to print merge details
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Merged JSON content as dict
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(existing_path, 'r', encoding='utf-8') as f:
|
||||||
|
existing_content = json.load(f)
|
||||||
|
except (FileNotFoundError, json.JSONDecodeError):
|
||||||
|
# If file doesn't exist or is invalid, just use new content
|
||||||
|
return new_content
|
||||||
|
|
||||||
|
def deep_merge(base: dict, update: dict) -> dict:
|
||||||
|
"""Recursively merge update dict into base dict."""
|
||||||
|
result = base.copy()
|
||||||
|
for key, value in update.items():
|
||||||
|
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
|
||||||
|
# Recursively merge nested dictionaries
|
||||||
|
result[key] = deep_merge(result[key], value)
|
||||||
|
else:
|
||||||
|
# Add new key or replace existing value
|
||||||
|
result[key] = value
|
||||||
|
return result
|
||||||
|
|
||||||
|
merged = deep_merge(existing_content, new_content)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
console.print(f"[cyan]Merged JSON file:[/cyan] {existing_path.name}")
|
||||||
|
|
||||||
|
return merged
|
||||||
|
|
||||||
def download_template_from_github(ai_assistant: str, download_dir: Path, *, script_type: str = "sh", verbose: bool = True, show_progress: bool = True, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Tuple[Path, dict]:
|
def download_template_from_github(ai_assistant: str, download_dir: Path, *, script_type: str = "sh", verbose: bool = True, show_progress: bool = True, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Tuple[Path, dict]:
|
||||||
repo_owner = "github"
|
repo_owner = "github"
|
||||||
repo_name = "spec-kit"
|
repo_name = "spec-kit"
|
||||||
@@ -676,7 +743,11 @@ def download_and_extract_template(project_path: Path, ai_assistant: str, script_
|
|||||||
rel_path = sub_item.relative_to(item)
|
rel_path = sub_item.relative_to(item)
|
||||||
dest_file = dest_path / rel_path
|
dest_file = dest_path / rel_path
|
||||||
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
shutil.copy2(sub_item, dest_file)
|
# Special handling for .vscode/settings.json - merge instead of overwrite
|
||||||
|
if dest_file.name == "settings.json" and dest_file.parent.name == ".vscode":
|
||||||
|
handle_vscode_settings(sub_item, dest_file, rel_path, verbose, tracker)
|
||||||
|
else:
|
||||||
|
shutil.copy2(sub_item, dest_file)
|
||||||
else:
|
else:
|
||||||
shutil.copytree(item, dest_path)
|
shutil.copytree(item, dest_path)
|
||||||
else:
|
else:
|
||||||
@@ -1093,18 +1164,25 @@ def check():
|
|||||||
|
|
||||||
tracker.add("git", "Git version control")
|
tracker.add("git", "Git version control")
|
||||||
git_ok = check_tool("git", tracker=tracker)
|
git_ok = check_tool("git", tracker=tracker)
|
||||||
|
|
||||||
agent_results = {}
|
agent_results = {}
|
||||||
for agent_key, agent_config in AGENT_CONFIG.items():
|
for agent_key, agent_config in AGENT_CONFIG.items():
|
||||||
agent_name = agent_config["name"]
|
agent_name = agent_config["name"]
|
||||||
|
requires_cli = agent_config["requires_cli"]
|
||||||
|
|
||||||
tracker.add(agent_key, agent_name)
|
tracker.add(agent_key, agent_name)
|
||||||
agent_results[agent_key] = check_tool(agent_key, tracker=tracker)
|
|
||||||
|
if requires_cli:
|
||||||
|
agent_results[agent_key] = check_tool(agent_key, tracker=tracker)
|
||||||
|
else:
|
||||||
|
# IDE-based agent - skip CLI check and mark as optional
|
||||||
|
tracker.skip(agent_key, "IDE-based, no CLI check")
|
||||||
|
agent_results[agent_key] = False # Don't count IDE agents as "found"
|
||||||
|
|
||||||
# Check VS Code variants (not in agent config)
|
# Check VS Code variants (not in agent config)
|
||||||
tracker.add("code", "Visual Studio Code")
|
tracker.add("code", "Visual Studio Code")
|
||||||
code_ok = check_tool("code", tracker=tracker)
|
code_ok = check_tool("code", tracker=tracker)
|
||||||
|
|
||||||
tracker.add("code-insiders", "Visual Studio Code Insiders")
|
tracker.add("code-insiders", "Visual Studio Code Insiders")
|
||||||
code_insiders_ok = check_tool("code-insiders", tracker=tracker)
|
code_insiders_ok = check_tool("code-insiders", tracker=tracker)
|
||||||
|
|
||||||
@@ -1123,3 +1201,4 @@ def main():
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|||||||
@@ -20,4 +20,4 @@ Auto-generated from all feature plans. Last updated: [DATE]
|
|||||||
[LAST 3 FEATURES AND WHAT THEY ADDED]
|
[LAST 3 FEATURES AND WHAT THEY ADDED]
|
||||||
|
|
||||||
<!-- MANUAL ADDITIONS START -->
|
<!-- MANUAL ADDITIONS START -->
|
||||||
<!-- MANUAL ADDITIONS END -->
|
<!-- MANUAL ADDITIONS END -->
|
||||||
|
|||||||
@@ -38,3 +38,4 @@
|
|||||||
- Add comments or findings inline
|
- Add comments or findings inline
|
||||||
- Link to relevant resources or documentation
|
- Link to relevant resources or documentation
|
||||||
- Items are numbered sequentially for easy reference
|
- Items are numbered sequentially for easy reference
|
||||||
|
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
|
|
||||||
## Goal
|
## Goal
|
||||||
|
|
||||||
Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`.
|
Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/speckit.tasks` has successfully produced a complete `tasks.md`.
|
||||||
|
|
||||||
## Operating Constraints
|
## Operating Constraints
|
||||||
|
|
||||||
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
||||||
|
|
||||||
**Constitution Authority**: The project constitution (`/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`.
|
**Constitution Authority**: The project constitution (`/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit.analyze`.
|
||||||
|
|
||||||
## Execution Steps
|
## Execution Steps
|
||||||
|
|
||||||
@@ -157,9 +157,9 @@ Output a Markdown report (no file writes) with the following structure:
|
|||||||
|
|
||||||
At end of report, output a concise Next Actions block:
|
At end of report, output a concise Next Actions block:
|
||||||
|
|
||||||
- If CRITICAL issues exist: Recommend resolving before `/implement`
|
- If CRITICAL issues exist: Recommend resolving before `/speckit.implement`
|
||||||
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
|
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
|
||||||
- Provide explicit command suggestions: e.g., "Run /specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
|
- Provide explicit command suggestions: e.g., "Run /speckit.specify with refinement", "Run /speckit.plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
|
||||||
|
|
||||||
### 8. Offer Remediation
|
### 8. Offer Remediation
|
||||||
|
|
||||||
@@ -185,3 +185,4 @@ Ask the user: "Would you like me to suggest concrete remediation edits for the t
|
|||||||
## Context
|
## Context
|
||||||
|
|
||||||
{ARGS}
|
{ARGS}
|
||||||
|
|
||||||
|
|||||||
@@ -288,3 +288,4 @@ Sample items:
|
|||||||
- Correct: Validation of requirement quality
|
- Correct: Validation of requirement quality
|
||||||
- Wrong: "Does it do X?"
|
- Wrong: "Does it do X?"
|
||||||
- Correct: "Is X clearly specified?"
|
- Correct: "Is X clearly specified?"
|
||||||
|
|
||||||
|
|||||||
@@ -177,3 +177,4 @@ Behavior rules:
|
|||||||
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
||||||
|
|
||||||
Context for prioritization: {ARGS}
|
Context for prioritization: {ARGS}
|
||||||
|
|
||||||
|
|||||||
@@ -75,3 +75,4 @@ If the user supplies partial updates (e.g., only one principle revision), still
|
|||||||
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
||||||
|
|
||||||
Do not create a new template; always operate on the existing `/memory/constitution.md` file.
|
Do not create a new template; always operate on the existing `/memory/constitution.md` file.
|
||||||
|
|
||||||
|
|||||||
@@ -74,11 +74,19 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
**If ignore file missing**: Create with full pattern set for detected technology
|
**If ignore file missing**: Create with full pattern set for detected technology
|
||||||
|
|
||||||
**Common Patterns by Technology** (from plan.md tech stack):
|
**Common Patterns by Technology** (from plan.md tech stack):
|
||||||
- **Node.js/JavaScript**: `node_modules/`, `dist/`, `build/`, `*.log`, `.env*`
|
- **Node.js/JavaScript/TypeScript**: `node_modules/`, `dist/`, `build/`, `*.log`, `.env*`
|
||||||
- **Python**: `__pycache__/`, `*.pyc`, `.venv/`, `venv/`, `dist/`, `*.egg-info/`
|
- **Python**: `__pycache__/`, `*.pyc`, `.venv/`, `venv/`, `dist/`, `*.egg-info/`
|
||||||
- **Java**: `target/`, `*.class`, `*.jar`, `.gradle/`, `build/`
|
- **Java**: `target/`, `*.class`, `*.jar`, `.gradle/`, `build/`
|
||||||
- **C#/.NET**: `bin/`, `obj/`, `*.user`, `*.suo`, `packages/`
|
- **C#/.NET**: `bin/`, `obj/`, `*.user`, `*.suo`, `packages/`
|
||||||
- **Go**: `*.exe`, `*.test`, `vendor/`, `*.out`
|
- **Go**: `*.exe`, `*.test`, `vendor/`, `*.out`
|
||||||
|
- **Ruby**: `.bundle/`, `log/`, `tmp/`, `*.gem`, `vendor/bundle/`
|
||||||
|
- **PHP**: `vendor/`, `*.log`, `*.cache`, `*.env`
|
||||||
|
- **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
|
||||||
|
- **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
|
||||||
|
- **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
|
||||||
|
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `Makefile`, `config.log`, `.idea/`, `*.log`, `.env*`
|
||||||
|
- **Swift**: `.build/`, `DerivedData/`, `*.swiftpm/`, `Packages/`
|
||||||
|
- **R**: `.Rproj.user/`, `.Rhistory`, `.RData`, `.Ruserdata`, `*.Rproj`, `packrat/`, `renv/`
|
||||||
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
|
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
|
||||||
|
|
||||||
**Tool-Specific Patterns**:
|
**Tool-Specific Patterns**:
|
||||||
@@ -86,6 +94,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- **ESLint**: `node_modules/`, `dist/`, `build/`, `coverage/`, `*.min.js`
|
- **ESLint**: `node_modules/`, `dist/`, `build/`, `coverage/`, `*.min.js`
|
||||||
- **Prettier**: `node_modules/`, `dist/`, `build/`, `coverage/`, `package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`
|
- **Prettier**: `node_modules/`, `dist/`, `build/`, `coverage/`, `package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`
|
||||||
- **Terraform**: `.terraform/`, `*.tfstate*`, `*.tfvars`, `.terraform.lock.hcl`
|
- **Terraform**: `.terraform/`, `*.tfstate*`, `*.tfvars`, `.terraform.lock.hcl`
|
||||||
|
- **Kubernetes/k8s**: `*.secret.yaml`, `secrets/`, `.kube/`, `kubeconfig*`, `*.key`, `*.crt`
|
||||||
|
|
||||||
5. Parse tasks.md structure and extract:
|
5. Parse tasks.md structure and extract:
|
||||||
- **Task phases**: Setup, Tests, Core, Integration, Polish
|
- **Task phases**: Setup, Tests, Core, Integration, Polish
|
||||||
@@ -122,4 +131,5 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- Confirm the implementation follows the technical plan
|
- Confirm the implementation follows the technical plan
|
||||||
- Report final status with summary of completed work
|
- Report final status with summary of completed work
|
||||||
|
|
||||||
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/tasks` first to regenerate the task list.
|
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit.tasks` first to regenerate the task list.
|
||||||
|
|
||||||
|
|||||||
@@ -84,3 +84,4 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
|
|
||||||
- Use absolute paths
|
- Use absolute paths
|
||||||
- ERROR on gate failures or unresolved clarifications
|
- ERROR on gate failures or unresolved clarifications
|
||||||
|
|
||||||
|
|||||||
@@ -19,11 +19,32 @@ The text the user typed after `/speckit.specify` in the triggering message **is*
|
|||||||
|
|
||||||
Given that feature description, do this:
|
Given that feature description, do this:
|
||||||
|
|
||||||
1. Run the script `{SCRIPT}` from repo root and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute.
|
1. **Generate a concise short name** (2-4 words) for the branch:
|
||||||
**IMPORTANT** You must only ever run this script once. The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
- Analyze the feature description and extract the most meaningful keywords
|
||||||
2. Load `templates/spec-template.md` to understand required sections.
|
- Create a 2-4 word short name that captures the essence of the feature
|
||||||
|
- Use action-noun format when possible (e.g., "add-user-auth", "fix-payment-bug")
|
||||||
|
- Preserve technical terms and acronyms (OAuth2, API, JWT, etc.)
|
||||||
|
- Keep it concise but descriptive enough to understand the feature at a glance
|
||||||
|
- Examples:
|
||||||
|
- "I want to add user authentication" → "user-auth"
|
||||||
|
- "Implement OAuth2 integration for the API" → "oauth2-api-integration"
|
||||||
|
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||||
|
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||||
|
|
||||||
3. Follow this execution flow:
|
2. Run the script `{SCRIPT}` from repo root **with the short-name argument** and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute.
|
||||||
|
|
||||||
|
**IMPORTANT**:
|
||||||
|
|
||||||
|
- Append the short-name argument to the `{SCRIPT}` command with the 2-4 word short name you created in step 1
|
||||||
|
- Bash: `--short-name "your-generated-short-name"`
|
||||||
|
- PowerShell: `-ShortName "your-generated-short-name"`
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot")
|
||||||
|
- You must only ever run this script once
|
||||||
|
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
|
||||||
|
|
||||||
|
3. Load `templates/spec-template.md` to understand required sections.
|
||||||
|
|
||||||
|
4. Follow this execution flow:
|
||||||
|
|
||||||
1. Parse user description from Input
|
1. Parse user description from Input
|
||||||
If empty: ERROR "No feature description provided"
|
If empty: ERROR "No feature description provided"
|
||||||
@@ -49,9 +70,9 @@ Given that feature description, do this:
|
|||||||
7. Identify Key Entities (if data involved)
|
7. Identify Key Entities (if data involved)
|
||||||
8. Return: SUCCESS (spec ready for planning)
|
8. Return: SUCCESS (spec ready for planning)
|
||||||
|
|
||||||
4. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
|
5. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
|
||||||
|
|
||||||
5. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
|
6. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
|
||||||
|
|
||||||
a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
|
a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
|
||||||
|
|
||||||
@@ -143,7 +164,7 @@ Given that feature description, do this:
|
|||||||
|
|
||||||
d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
|
d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
|
||||||
|
|
||||||
6. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
||||||
|
|
||||||
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||||
|
|
||||||
@@ -209,3 +230,4 @@ Success criteria must be:
|
|||||||
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
|
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
|
||||||
- "React components render efficiently" (framework-specific)
|
- "React components render efficiently" (framework-specific)
|
||||||
- "Redis cache hit rate above 80%" (technology-specific)
|
- "Redis cache hit rate above 80%" (technology-specific)
|
||||||
|
|
||||||
|
|||||||
@@ -22,27 +22,13 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
|
- **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
|
||||||
- Note: Not all projects have all documents. Generate tasks based on what's available.
|
- Note: Not all projects have all documents. Generate tasks based on what's available.
|
||||||
|
|
||||||
3. **Execute task generation workflow** (follow the template structure):
|
3. **Execute task generation workflow**:
|
||||||
- Load plan.md and extract tech stack, libraries, project structure
|
- Load plan.md and extract tech stack, libraries, project structure
|
||||||
- **Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)**
|
- Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
|
||||||
- If data-model.md exists: Extract entities → map to user stories
|
- If data-model.md exists: Extract entities and map to user stories
|
||||||
- If contracts/ exists: Each file → map endpoints to user stories
|
- If contracts/ exists: Map endpoints to user stories
|
||||||
- If research.md exists: Extract decisions → generate setup tasks
|
- If research.md exists: Extract decisions for setup tasks
|
||||||
- **Generate tasks ORGANIZED BY USER STORY**:
|
- Generate tasks organized by user story (see Task Generation Rules below)
|
||||||
- Setup tasks (shared infrastructure needed by all stories)
|
|
||||||
- **Foundational tasks (prerequisites that must complete before ANY user story can start)**
|
|
||||||
- For each user story (in priority order P1, P2, P3...):
|
|
||||||
- Group all tasks needed to complete JUST that story
|
|
||||||
- Include models, services, endpoints, UI components specific to that story
|
|
||||||
- Mark which tasks are [P] parallelizable
|
|
||||||
- If tests requested: Include tests specific to that story
|
|
||||||
- Polish/Integration tasks (cross-cutting concerns)
|
|
||||||
- **Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature spec or user asks for TDD approach
|
|
||||||
- Apply task rules:
|
|
||||||
- Different files = mark [P] for parallel
|
|
||||||
- Same file = sequential (no [P])
|
|
||||||
- If tests requested: Tests before implementation (TDD order)
|
|
||||||
- Number tasks sequentially (T001, T002...)
|
|
||||||
- Generate dependency graph showing user story completion order
|
- Generate dependency graph showing user story completion order
|
||||||
- Create parallel execution examples per user story
|
- Create parallel execution examples per user story
|
||||||
- Validate task completeness (each user story has all needed tasks, independently testable)
|
- Validate task completeness (each user story has all needed tasks, independently testable)
|
||||||
@@ -52,12 +38,9 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- Phase 1: Setup tasks (project initialization)
|
- Phase 1: Setup tasks (project initialization)
|
||||||
- Phase 2: Foundational tasks (blocking prerequisites for all user stories)
|
- Phase 2: Foundational tasks (blocking prerequisites for all user stories)
|
||||||
- Phase 3+: One phase per user story (in priority order from spec.md)
|
- Phase 3+: One phase per user story (in priority order from spec.md)
|
||||||
- Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
|
- Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
|
||||||
- Clear [Story] labels (US1, US2, US3...) for each task
|
|
||||||
- [P] markers for parallelizable tasks within each story
|
|
||||||
- Checkpoint markers after each story phase
|
|
||||||
- Final Phase: Polish & cross-cutting concerns
|
- Final Phase: Polish & cross-cutting concerns
|
||||||
- Numbered tasks (T001, T002...) in execution order
|
- All tasks must follow the strict checklist format (see Task Generation Rules below)
|
||||||
- Clear file paths for each task
|
- Clear file paths for each task
|
||||||
- Dependencies section showing story completion order
|
- Dependencies section showing story completion order
|
||||||
- Parallel execution examples per story
|
- Parallel execution examples per story
|
||||||
@@ -69,6 +52,7 @@ You **MUST** consider the user input before proceeding (if not empty).
|
|||||||
- Parallel opportunities identified
|
- Parallel opportunities identified
|
||||||
- Independent test criteria for each story
|
- Independent test criteria for each story
|
||||||
- Suggested MVP scope (typically just User Story 1)
|
- Suggested MVP scope (typically just User Story 1)
|
||||||
|
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
|
||||||
|
|
||||||
Context for task generation: {ARGS}
|
Context for task generation: {ARGS}
|
||||||
|
|
||||||
@@ -76,10 +60,44 @@ The tasks.md should be immediately executable - each task must be specific enoug
|
|||||||
|
|
||||||
## Task Generation Rules
|
## Task Generation Rules
|
||||||
|
|
||||||
**IMPORTANT**: Tests are optional. Only generate test tasks if the user explicitly requested testing or TDD approach in the feature specification.
|
|
||||||
|
|
||||||
**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
|
**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
|
||||||
|
|
||||||
|
**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
|
||||||
|
|
||||||
|
### Checklist Format (REQUIRED)
|
||||||
|
|
||||||
|
Every task MUST strictly follow this format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
- [ ] [TaskID] [P?] [Story?] Description with file path
|
||||||
|
```
|
||||||
|
|
||||||
|
**Format Components**:
|
||||||
|
|
||||||
|
1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
|
||||||
|
2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
|
||||||
|
3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
|
||||||
|
4. **[Story] label**: REQUIRED for user story phase tasks only
|
||||||
|
- Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
|
||||||
|
- Setup phase: NO story label
|
||||||
|
- Foundational phase: NO story label
|
||||||
|
- User Story phases: MUST have story label
|
||||||
|
- Polish phase: NO story label
|
||||||
|
5. **Description**: Clear action with exact file path
|
||||||
|
|
||||||
|
**Examples**:
|
||||||
|
|
||||||
|
- ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
|
||||||
|
- ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
|
||||||
|
- ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
|
||||||
|
- ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
|
||||||
|
- ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
|
||||||
|
- ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
|
||||||
|
- ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
|
||||||
|
- ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
|
||||||
|
|
||||||
|
### Task Organization
|
||||||
|
|
||||||
1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
|
1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
|
||||||
- Each user story (P1, P2, P3...) gets its own phase
|
- Each user story (P1, P2, P3...) gets its own phase
|
||||||
- Map all related components to their story:
|
- Map all related components to their story:
|
||||||
@@ -94,22 +112,22 @@ The tasks.md should be immediately executable - each task must be specific enoug
|
|||||||
- If tests requested: Each contract → contract test task [P] before implementation in that story's phase
|
- If tests requested: Each contract → contract test task [P] before implementation in that story's phase
|
||||||
|
|
||||||
3. **From Data Model**:
|
3. **From Data Model**:
|
||||||
- Map each entity → to the user story(ies) that need it
|
- Map each entity to the user story(ies) that need it
|
||||||
- If entity serves multiple stories: Put in earliest story or Setup phase
|
- If entity serves multiple stories: Put in earliest story or Setup phase
|
||||||
- Relationships → service layer tasks in appropriate story phase
|
- Relationships → service layer tasks in appropriate story phase
|
||||||
|
|
||||||
4. **From Setup/Infrastructure**:
|
4. **From Setup/Infrastructure**:
|
||||||
- Shared infrastructure → Setup phase (Phase 1)
|
- Shared infrastructure → Setup phase (Phase 1)
|
||||||
- Foundational/blocking tasks → Foundational phase (Phase 2)
|
- Foundational/blocking tasks → Foundational phase (Phase 2)
|
||||||
- Examples: Database schema setup, authentication framework, core libraries, base configurations
|
|
||||||
- These MUST complete before any user story can be implemented
|
|
||||||
- Story-specific setup → within that story's phase
|
- Story-specific setup → within that story's phase
|
||||||
|
|
||||||
5. **Ordering**:
|
### Phase Structure
|
||||||
- Phase 1: Setup (project initialization)
|
|
||||||
- Phase 2: Foundational (blocking prerequisites - must complete before user stories)
|
- **Phase 1**: Setup (project initialization)
|
||||||
- Phase 3+: User Stories in priority order (P1, P2, P3...)
|
- **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
|
||||||
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
|
- **Phase 3+**: User Stories in priority order (P1, P2, P3...)
|
||||||
- Final Phase: Polish & Cross-Cutting Concerns
|
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
|
||||||
- Each user story phase should be a complete, independently testable increment
|
- Each phase should be a complete, independently testable increment
|
||||||
|
- **Final Phase**: Polish & Cross-Cutting Concerns
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -102,3 +102,4 @@ directories captured above]
|
|||||||
|-----------|------------|-------------------------------------|
|
|-----------|------------|-------------------------------------|
|
||||||
| [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
|
| [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
|
||||||
| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |
|
| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |
|
||||||
|
|
||||||
|
|||||||
@@ -113,3 +113,4 @@
|
|||||||
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
|
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
|
||||||
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
|
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
|
||||||
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
|
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
|
||||||
|
|
||||||
|
|||||||
@@ -248,3 +248,4 @@ With multiple developers:
|
|||||||
- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence
|
- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,3 +11,4 @@
|
|||||||
".specify/scripts/powershell/": true
|
".specify/scripts/powershell/": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user