Template cleanup and reorganization

This commit is contained in:
den (work)
2025-10-03 17:08:14 -07:00
parent 23e0c5c83c
commit 5042c76558
7 changed files with 251 additions and 370 deletions

View File

@@ -42,7 +42,7 @@ generate_commands() {
mkdir -p "$output_dir" mkdir -p "$output_dir"
for template in templates/commands/*.md; do for template in templates/commands/*.md; do
[[ -f "$template" ]] || continue [[ -f "$template" ]] || continue
local name description script_command body local name description script_command agent_script_command body
name=$(basename "$template" .md) name=$(basename "$template" .md)
# Normalize line endings # Normalize line endings
@@ -57,13 +57,29 @@ generate_commands() {
script_command="(Missing script command for $script_variant)" script_command="(Missing script command for $script_variant)"
fi fi
# Extract agent_script command from YAML frontmatter if present
agent_script_command=$(printf '%s\n' "$file_content" | awk '
/^agent_scripts:$/ { in_agent_scripts=1; next }
in_agent_scripts && /^[[:space:]]*'"$script_variant"':[[:space:]]*/ {
sub(/^[[:space:]]*'"$script_variant"':[[:space:]]*/, "")
print
exit
}
in_agent_scripts && /^[a-zA-Z]/ { in_agent_scripts=0 }
')
# Replace {SCRIPT} placeholder with the script command # Replace {SCRIPT} placeholder with the script command
body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g") body=$(printf '%s\n' "$file_content" | sed "s|{SCRIPT}|${script_command}|g")
# Remove the scripts: section from frontmatter while preserving YAML structure # Replace {AGENT_SCRIPT} placeholder with the agent script command if found
if [[ -n $agent_script_command ]]; then
body=$(printf '%s\n' "$body" | sed "s|{AGENT_SCRIPT}|${agent_script_command}|g")
fi
# Remove the scripts: and agent_scripts: sections from frontmatter while preserving YAML structure
body=$(printf '%s\n' "$body" | awk ' body=$(printf '%s\n' "$body" | awk '
/^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next } /^---$/ { print; if (++dash_count == 1) in_frontmatter=1; else in_frontmatter=0; next }
in_frontmatter && /^scripts:$/ { skip_scripts=1; next } in_frontmatter && /^(scripts|agent_scripts):$/ { skip_scripts=1; next }
in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 } in_frontmatter && /^[a-zA-Z].*:/ && skip_scripts { skip_scripts=0 }
in_frontmatter && skip_scripts && /^[[:space:]]/ { next } in_frontmatter && skip_scripts && /^[[:space:]]/ { next }
{ print } { print }
@@ -113,24 +129,7 @@ build_variant() {
fi fi
[[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; } [[ -d templates ]] && { mkdir -p "$SPEC_DIR/templates"; find templates -type f -not -path "templates/commands/*" -exec cp --parents {} "$SPEC_DIR"/ \; ; echo "Copied templates -> .specify/templates"; }
# Inject variant into plan-template.md within .specify/templates if present
local plan_tpl="$base_dir/.specify/templates/plan-template.md"
if [[ -f "$plan_tpl" ]]; then
plan_norm=$(tr -d '\r' < "$plan_tpl")
# Extract script command from YAML frontmatter
script_command=$(printf '%s\n' "$plan_norm" | awk -v sv="$script" '/^[[:space:]]*'"$script"':[[:space:]]*/ {sub(/^[[:space:]]*'"$script"':[[:space:]]*/, ""); print; exit}')
if [[ -n $script_command ]]; then
# Always prefix with .specify/ for plan usage
script_command=".specify/$script_command"
# Replace {SCRIPT} placeholder with the script command and __AGENT__ with agent name
substituted=$(sed "s|{SCRIPT}|${script_command}|g" "$plan_tpl" | tr -d '\r' | sed "s|__AGENT__|${agent}|g")
# Strip YAML frontmatter from plan template output (keep body only)
stripped=$(printf '%s\n' "$substituted" | awk 'BEGIN{fm=0;dash=0} /^---$/ {dash++; if(dash==1){fm=1; next} else if(dash==2){fm=0; next}} {if(!fm) print}')
printf '%s\n' "$stripped" > "$plan_tpl"
else
echo "Warning: no plan-template script command found for $script in YAML frontmatter" >&2
fi
fi
# NOTE: We substitute {ARGS} internally. Outward tokens differ intentionally: # NOTE: We substitute {ARGS} internally. Outward tokens differ intentionally:
# * Markdown/prompt (claude, copilot, cursor, opencode): $ARGUMENTS # * Markdown/prompt (claude, copilot, cursor, opencode): $ARGUMENTS
# * TOML (gemini, qwen): {{args}} # * TOML (gemini, qwen): {{args}}

View File

@@ -3,6 +3,9 @@ description: Execute the implementation planning workflow using the plan templat
scripts: scripts:
sh: scripts/bash/setup-plan.sh --json sh: scripts/bash/setup-plan.sh --json
ps: scripts/powershell/setup-plan.ps1 -Json ps: scripts/powershell/setup-plan.ps1 -Json
agent_scripts:
sh: scripts/bash/update-agent-context.sh __AGENT__
ps: scripts/powershell/update-agent-context.ps1 -AgentType __AGENT__
--- ---
The user input to you can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty). The user input to you can be provided directly by the agent or as a command argument - you **MUST** consider it before proceeding with the prompt (if not empty).
@@ -11,36 +14,72 @@ User input:
$ARGUMENTS $ARGUMENTS
Given the implementation details provided as an argument, do this: ## Execution Steps
1. Run `{SCRIPT}` from the repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. All future file paths must be absolute. 1. **Setup**: Run `{SCRIPT}` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH.
- BEFORE proceeding, inspect FEATURE_SPEC for a `## Clarifications` section with at least one `Session` subheading. If missing or clearly ambiguous areas remain (vague adjectives, unresolved critical choices), PAUSE and instruct the user to run `/clarify` first to reduce rework. Only continue if: (a) Clarifications exist OR (b) an explicit user override is provided (e.g., "proceed without clarification"). Do not attempt to fabricate clarifications yourself. - Before proceeding: Check FEATURE_SPEC has `## Clarifications` section. If missing or ambiguous, instruct user to run `/clarify` first.
2. Read and analyze the feature specification to understand:
- The feature requirements and user stories
- Functional and non-functional requirements
- Success criteria and acceptance criteria
- Any technical constraints or dependencies mentioned
3. Read the constitution at `/memory/constitution.md` to understand constitutional requirements. 2. **Load context**: Read FEATURE_SPEC and `.specify/memory/constitution.md`. Load IMPL_PLAN template (already copied).
4. Execute the implementation plan template: 3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
- Load `/templates/plan-template.md` (already copied to IMPL_PLAN path) - Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
- Set Input path to FEATURE_SPEC - Fill Constitution Check section from constitution
- Run the Execution Flow (main) function steps 1-9 - Evaluate gates (ERROR if violations unjustified)
- The template is self-contained and executable - Phase 0: Generate research.md (resolve all NEEDS CLARIFICATION)
- Follow error handling and gate checks as specified - Phase 1: Generate data-model.md, contracts/, quickstart.md
- Let the template guide artifact generation in $SPECS_DIR: - Phase 1: Update agent context by running the agent script
* Phase 0 generates research.md - Re-evaluate Constitution Check post-design
* Phase 1 generates data-model.md, contracts/, quickstart.md
* Phase 2 generates tasks.md
- Incorporate user-provided details from arguments into Technical Context: {ARGS}
- Update Progress Tracking as you complete each phase
5. Verify execution completed: 4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
- Check Progress Tracking shows all phases complete
- Ensure all required artifacts were generated
- Confirm no ERROR states in execution
6. Report results with branch name, file paths, and generated artifacts. ## Phases
Use absolute paths with the repository root for all file operations to avoid path issues. ### Phase 0: Outline & Research
1. **Extract unknowns from Technical Context** above:
- For each NEEDS CLARIFICATION → research task
- For each dependency → best practices task
- For each integration → patterns task
2. **Generate and dispatch research agents**:
```
For each unknown in Technical Context:
Task: "Research {unknown} for {feature context}"
For each technology choice:
Task: "Find best practices for {tech} in {domain}"
```
3. **Consolidate findings** in `research.md` using format:
- Decision: [what was chosen]
- Rationale: [why chosen]
- Alternatives considered: [what else evaluated]
**Output**: research.md with all NEEDS CLARIFICATION resolved
### Phase 1: Design & Contracts
**Prerequisites:** `research.md` complete
1. **Extract entities from feature spec** → `data-model.md`:
- Entity name, fields, relationships
- Validation rules from requirements
- State transitions if applicable
2. **Generate API contracts** from functional requirements:
- For each user action → endpoint
- Use standard REST/GraphQL patterns
- Output OpenAPI/GraphQL schema to `/contracts/`
3. **Agent context update**:
- Run `{AGENT_SCRIPT}`
- These scripts detect which AI agent is in use
- Update the appropriate agent-specific context file
- Add only new technology from current plan
- Preserve manual additions between markers
**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file
## Key rules
- Use absolute paths
- ERROR on gate failures or unresolved clarifications

View File

@@ -18,7 +18,56 @@ Given that feature description, do this:
1. Run the script `{SCRIPT}` from repo root and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute. 1. Run the script `{SCRIPT}` from repo root and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute.
**IMPORTANT** You must only ever run this script once. The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for. **IMPORTANT** You must only ever run this script once. The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for.
2. Load `templates/spec-template.md` to understand required sections. 2. Load `templates/spec-template.md` to understand required sections.
3. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
4. Report completion with branch name, spec file path, and readiness for the next phase.
Note: The script creates and checks out the new branch and initializes the spec file before writing. 3. Follow this execution flow:
1. Parse user description from Input
If empty: ERROR "No feature description provided"
2. Extract key concepts from description
Identify: actors, actions, data, constraints
3. For each unclear aspect:
Mark with [NEEDS CLARIFICATION: specific question]
4. Fill User Scenarios & Testing section
If no clear user flow: ERROR "Cannot determine user scenarios"
5. Generate Functional Requirements
Each requirement must be testable
Mark ambiguous requirements
6. Identify Key Entities (if data involved)
7. Run Review Checklist
If any [NEEDS CLARIFICATION]: WARN "Spec has uncertainties"
If implementation details found: ERROR "Remove tech details"
8. Return: SUCCESS (spec ready for planning)
4. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
5. Report completion with branch name, spec file path, and readiness for the next phase.
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
## General Guidelines
## Quick Guidelines
- Focus on WHAT users need and WHY
- Avoid HOW to implement (no tech stack, APIs, code structure)
- Written for business stakeholders, not developers
### Section Requirements
- **Mandatory sections**: Must be completed for every feature
- **Optional sections**: Include only when relevant to the feature
- When a section doesn't apply, remove it entirely (don't leave as "N/A")
### For AI Generation
When creating this spec from a user prompt:
1. **Mark all ambiguities**: Use [NEEDS CLARIFICATION: specific question] for any assumption you'd need to make
2. **Don't guess**: If the prompt doesn't specify something (e.g., "login system" without auth method), mark it
3. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
4. **Common underspecified areas**:
- User types and permissions
- Data retention/deletion policies
- Performance targets and scale
- Error handling behaviors
- Integration requirements
- Security/compliance needs

View File

@@ -11,55 +11,65 @@ User input:
$ARGUMENTS $ARGUMENTS
1. Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. ## Execution Steps
2. Load and analyze available design documents:
- Always read plan.md for tech stack and libraries
- IF EXISTS: Read data-model.md for entities
- IF EXISTS: Read contracts/ for API endpoints
- IF EXISTS: Read research.md for technical decisions
- IF EXISTS: Read quickstart.md for test scenarios
Note: Not all projects have all documents. For example: 1. **Setup**: Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute.
- CLI tools might not have contracts/
- Simple libraries might not need data-model.md
- Generate tasks based on what's available
3. Generate tasks following the template: 2. **Load design documents**: Read from FEATURE_DIR:
- Use `/templates/tasks-template.md` as the base - **Required**: plan.md (tech stack, libraries, structure)
- Replace example tasks with actual tasks based on: - **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
* **Setup tasks**: Project init, dependencies, linting - Note: Not all projects have all documents. Generate tasks based on what's available.
* **Test tasks [P]**: One per contract, one per integration scenario
* **Core tasks**: One per entity, service, CLI command, endpoint
* **Integration tasks**: DB connections, middleware, logging
* **Polish tasks [P]**: Unit tests, performance, docs
4. Task generation rules: 3. **Execute task generation workflow** (follow the template structure):
- Each contract file → contract test task marked [P] - Load plan.md and extract tech stack, libraries, project structure
- Each entity in data-model → model creation task marked [P] - If data-model.md exists: Extract entities → generate model tasks
- Each endpoint → implementation task (not parallel if shared files) - If contracts/ exists: Each file → generate endpoint/API tasks
- Each user story → integration test marked [P] - If research.md exists: Extract decisions → generate setup tasks
- Different files = can be parallel [P] - Generate tasks by category: Setup, Core Implementation, Integration, Polish
- Same file = sequential (no [P]) - **Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature spec or user asks for TDD approach
- Apply task rules:
* Different files = mark [P] for parallel
* Same file = sequential (no [P])
* If tests requested: Tests before implementation (TDD order)
- Number tasks sequentially (T001, T002...)
- Generate dependency graph
- Create parallel execution examples
- Validate task completeness (all entities have implementations, all endpoints covered)
5. Order tasks by dependencies: 4. **Generate tasks.md**: Use `.specify/templates/tasks-template.md` as structure, fill with:
- Setup before everything - Correct feature name from plan.md
- Tests before implementation (TDD) - Numbered tasks (T001, T002...) in dependency order
- Models before services
- Services before endpoints
- Core before integration
- Everything before polish
6. Include parallel execution examples:
- Group [P] tasks that can run together
- Show actual Task agent commands
7. Create FEATURE_DIR/tasks.md with:
- Correct feature name from implementation plan
- Numbered tasks (T001, T002, etc.)
- Clear file paths for each task - Clear file paths for each task
- [P] markers for parallelizable tasks
- Phase groupings based on what's needed (Setup, Core Implementation, Integration, Polish)
- If tests requested: Include separate "Tests First (TDD)" phase before Core Implementation
- Dependency notes - Dependency notes
5. **Report**: Output path to generated tasks.md and summary of task counts by phase.
- Parallel execution guidance - Parallel execution guidance
Context for task generation: {ARGS} Context for task generation: {ARGS}
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context. The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
## Task Generation Rules
**IMPORTANT**: Tests are optional. Only generate test tasks if the user explicitly requested testing or TDD approach in the feature specification.
1. **From Contracts**:
- Each contract/endpoint → implementation task
- If tests requested: Each contract → contract test task [P] before implementation
2. **From Data Model**:
- Each entity → model creation task [P]
- Relationships → service layer tasks
3. **From User Stories**:
- Each story → implementation tasks
- If tests requested: Each story → integration test [P]
- If quickstart.md exists: Validation tasks
4. **Ordering**:
- Without tests: Setup → Models → Services → Endpoints → Integration → Polish
- With tests (TDD): Setup → Tests → Models → Services → Endpoints → Integration → Polish
- Dependencies block parallel execution

View File

@@ -1,45 +1,22 @@
---
description: "Implementation plan template for feature development"
scripts:
sh: scripts/bash/update-agent-context.sh __AGENT__
ps: scripts/powershell/update-agent-context.ps1 -AgentType __AGENT__
---
# Implementation Plan: [FEATURE] # Implementation Plan: [FEATURE]
**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link] **Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link]
**Input**: Feature specification from `/specs/[###-feature-name]/spec.md` **Input**: Feature specification from `/specs/[###-feature-name]/spec.md`
## Execution Flow (/plan command scope) **Note**: This template is filled in by the `/plan` command. See `.specify/templates/commands/plan.md` for the execution workflow.
```
1. Load feature spec from Input path
→ If not found: ERROR "No feature spec at {path}"
2. Fill Technical Context (scan for NEEDS CLARIFICATION)
→ Detect Project Type from file system structure or context (web=frontend+backend, mobile=app+api)
→ Set Structure Decision based on project type
3. Fill the Constitution Check section based on the content of the constitution document.
4. Evaluate Constitution Check section below
→ If violations exist: Document in Complexity Tracking
→ If no justification possible: ERROR "Simplify approach first"
→ Update Progress Tracking: Initial Constitution Check
5. Execute Phase 0 → research.md
→ If NEEDS CLARIFICATION remain: ERROR "Resolve unknowns"
6. Execute Phase 1 → contracts, data-model.md, quickstart.md, agent-specific template file (e.g., `CLAUDE.md` for Claude Code, `.github/copilot-instructions.md` for GitHub Copilot, `GEMINI.md` for Gemini CLI, `QWEN.md` for Qwen Code, or `AGENTS.md` for all other agents).
7. Re-evaluate Constitution Check section
→ If new violations: Refactor design, return to Phase 1
→ Update Progress Tracking: Post-Design Constitution Check
8. Plan Phase 2 → Describe task generation approach (DO NOT create tasks.md)
9. STOP - Ready for /tasks command
```
**IMPORTANT**: The /plan command STOPS at step 7. Phases 2-4 are executed by other commands:
- Phase 2: /tasks command creates tasks.md
- Phase 3-4: Implementation execution (manual or via tools)
## Summary ## Summary
[Extract from feature spec: primary requirement + technical approach from research] [Extract from feature spec: primary requirement + technical approach from research]
## Technical Context ## Technical Context
<!--
ACTION REQUIRED: Replace the content in this section with the technical details
for the project. The structure here is presented in advisory capacity to guide
the iteration process.
-->
**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION] **Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION]
**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION] **Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION]
**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A] **Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A]
@@ -51,6 +28,7 @@ scripts:
**Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION] **Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION]
## Constitution Check ## Constitution Check
*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.* *GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
[Gates determined based on constitution file] [Gates determined based on constitution file]
@@ -58,6 +36,7 @@ scripts:
## Project Structure ## Project Structure
### Documentation (this feature) ### Documentation (this feature)
``` ```
specs/[###-feature]/ specs/[###-feature]/
├── plan.md # This file (/plan command output) ├── plan.md # This file (/plan command output)
@@ -75,6 +54,7 @@ specs/[###-feature]/
real paths (e.g., apps/admin, packages/something). The delivered plan must real paths (e.g., apps/admin, packages/something). The delivered plan must
not include Option labels. not include Option labels.
--> -->
``` ```
# [REMOVE IF UNUSED] Option 1: Single project (DEFAULT) # [REMOVE IF UNUSED] Option 1: Single project (DEFAULT)
src/ src/
@@ -114,112 +94,11 @@ ios/ or android/
**Structure Decision**: [Document the selected structure and reference the real **Structure Decision**: [Document the selected structure and reference the real
directories captured above] directories captured above]
## Phase 0: Outline & Research
1. **Extract unknowns from Technical Context** above:
- For each NEEDS CLARIFICATION → research task
- For each dependency → best practices task
- For each integration → patterns task
2. **Generate and dispatch research agents**:
```
For each unknown in Technical Context:
Task: "Research {unknown} for {feature context}"
For each technology choice:
Task: "Find best practices for {tech} in {domain}"
```
3. **Consolidate findings** in `research.md` using format:
- Decision: [what was chosen]
- Rationale: [why chosen]
- Alternatives considered: [what else evaluated]
**Output**: research.md with all NEEDS CLARIFICATION resolved
## Phase 1: Design & Contracts
*Prerequisites: research.md complete*
1. **Extract entities from feature spec** → `data-model.md`:
- Entity name, fields, relationships
- Validation rules from requirements
- State transitions if applicable
2. **Generate API contracts** from functional requirements:
- For each user action → endpoint
- Use standard REST/GraphQL patterns
- Output OpenAPI/GraphQL schema to `/contracts/`
3. **Generate contract tests** from contracts:
- One test file per endpoint
- Assert request/response schemas
- Tests must fail (no implementation yet)
4. **Extract test scenarios** from user stories:
- Each story → integration test scenario
- Quickstart test = story validation steps
5. **Update agent file incrementally** (O(1) operation):
- Run `{SCRIPT}`
**IMPORTANT**: Execute it exactly as specified above. Do not add or remove any arguments.
- If exists: Add only NEW tech from current plan
- Preserve manual additions between markers
- Update recent changes (keep last 3)
- Keep under 150 lines for token efficiency
- Output to repository root
**Output**: data-model.md, /contracts/*, failing tests, quickstart.md, agent-specific file
## Phase 2: Task Planning Approach
*This section describes what the /tasks command will do - DO NOT execute during /plan*
**Task Generation Strategy**:
- Load `.specify/templates/tasks-template.md` as base
- Generate tasks from Phase 1 design docs (contracts, data model, quickstart)
- Each contract → contract test task [P]
- Each entity → model creation task [P]
- Each user story → integration test task
- Implementation tasks to make tests pass
**Ordering Strategy**:
- TDD order: Tests before implementation
- Dependency order: Models before services before UI
- Mark [P] for parallel execution (independent files)
**Estimated Output**: 25-30 numbered, ordered tasks in tasks.md
**IMPORTANT**: This phase is executed by the /tasks command, NOT by /plan
## Phase 3+: Future Implementation
*These phases are beyond the scope of the /plan command*
**Phase 3**: Task execution (/tasks command creates tasks.md)
**Phase 4**: Implementation (execute tasks.md following constitutional principles)
**Phase 5**: Validation (run tests, execute quickstart.md, performance validation)
## Complexity Tracking ## Complexity Tracking
*Fill ONLY if Constitution Check has violations that must be justified* *Fill ONLY if Constitution Check has violations that must be justified*
| Violation | Why Needed | Simpler Alternative Rejected Because | | Violation | Why Needed | Simpler Alternative Rejected Because |
|-----------|------------|-------------------------------------| |-----------|------------|-------------------------------------|
| [e.g., 4th project] | [current need] | [why 3 projects insufficient] | | [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] | | [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |
## Progress Tracking
*This checklist is updated during execution flow*
**Phase Status**:
- [ ] Phase 0: Research complete (/plan command)
- [ ] Phase 1: Design complete (/plan command)
- [ ] Phase 2: Task planning complete (/plan command - describe approach only)
- [ ] Phase 3: Tasks generated (/tasks command)
- [ ] Phase 4: Implementation complete
- [ ] Phase 5: Validation passed
**Gate Status**:
- [ ] Initial Constitution Check: PASS
- [ ] Post-Design Constitution Check: PASS
- [ ] All NEEDS CLARIFICATION resolved
- [ ] Complexity deviations documented
---
*Based on Constitution v2.1.1 - See `/memory/constitution.md`*

View File

@@ -5,69 +5,41 @@
**Status**: Draft **Status**: Draft
**Input**: User description: "$ARGUMENTS" **Input**: User description: "$ARGUMENTS"
## Execution Flow (main)
```
1. Parse user description from Input
→ If empty: ERROR "No feature description provided"
2. Extract key concepts from description
→ Identify: actors, actions, data, constraints
3. For each unclear aspect:
→ Mark with [NEEDS CLARIFICATION: specific question]
4. Fill User Scenarios & Testing section
→ If no clear user flow: ERROR "Cannot determine user scenarios"
5. Generate Functional Requirements
→ Each requirement must be testable
→ Mark ambiguous requirements
6. Identify Key Entities (if data involved)
7. Run Review Checklist
→ If any [NEEDS CLARIFICATION]: WARN "Spec has uncertainties"
→ If implementation details found: ERROR "Remove tech details"
8. Return: SUCCESS (spec ready for planning)
```
---
## ⚡ Quick Guidelines
- ✅ Focus on WHAT users need and WHY
- ❌ Avoid HOW to implement (no tech stack, APIs, code structure)
- 👥 Written for business stakeholders, not developers
### Section Requirements
- **Mandatory sections**: Must be completed for every feature
- **Optional sections**: Include only when relevant to the feature
- When a section doesn't apply, remove it entirely (don't leave as "N/A")
### For AI Generation
When creating this spec from a user prompt:
1. **Mark all ambiguities**: Use [NEEDS CLARIFICATION: specific question] for any assumption you'd need to make
2. **Don't guess**: If the prompt doesn't specify something (e.g., "login system" without auth method), mark it
3. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
4. **Common underspecified areas**:
- User types and permissions
- Data retention/deletion policies
- Performance targets and scale
- Error handling behaviors
- Integration requirements
- Security/compliance needs
---
## User Scenarios & Testing *(mandatory)* ## User Scenarios & Testing *(mandatory)*
### Primary User Story ### Primary User Story
[Describe the main user journey in plain language] [Describe the main user journey in plain language]
### Acceptance Scenarios ### Acceptance Scenarios
<!--
ACTION REQUIRED: The content in this section represents placeholders.
Fill them out with the right acceptance scenarios.
-->
1. **Given** [initial state], **When** [action], **Then** [expected outcome] 1. **Given** [initial state], **When** [action], **Then** [expected outcome]
2. **Given** [initial state], **When** [action], **Then** [expected outcome] 2. **Given** [initial state], **When** [action], **Then** [expected outcome]
### Edge Cases ### Edge Cases
<!--
ACTION REQUIRED: The content in this section represents placeholders.
Fill them out with the right edge cases.
-->
- What happens when [boundary condition]? - What happens when [boundary condition]?
- How does system handle [error scenario]? - How does system handle [error scenario]?
## Requirements *(mandatory)* ## Requirements *(mandatory)*
<!--
ACTION REQUIRED: The content in this section represents placeholders.
Fill them out with the right functional requirements.
-->
### Functional Requirements ### Functional Requirements
- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"] - **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"]
- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"] - **FR-002**: System MUST [specific capability, e.g., "validate email addresses"]
- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"] - **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"]
@@ -75,42 +47,11 @@ When creating this spec from a user prompt:
- **FR-005**: System MUST [behavior, e.g., "log all security events"] - **FR-005**: System MUST [behavior, e.g., "log all security events"]
*Example of marking unclear requirements:* *Example of marking unclear requirements:*
- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?] - **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?]
- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified] - **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified]
### Key Entities *(include if feature involves data)* ### Key Entities *(include if feature involves data)*
- **[Entity 1]**: [What it represents, key attributes without implementation] - **[Entity 1]**: [What it represents, key attributes without implementation]
- **[Entity 2]**: [What it represents, relationships to other entities] - **[Entity 2]**: [What it represents, relationships to other entities]
---
## Review & Acceptance Checklist
*GATE: Automated checks run during main() execution*
### Content Quality
- [ ] No implementation details (languages, frameworks, APIs)
- [ ] Focused on user value and business needs
- [ ] Written for non-technical stakeholders
- [ ] All mandatory sections completed
### Requirement Completeness
- [ ] No [NEEDS CLARIFICATION] markers remain
- [ ] Requirements are testable and unambiguous
- [ ] Success criteria are measurable
- [ ] Scope is clearly bounded
- [ ] Dependencies and assumptions identified
---
## Execution Status
*Updated by main() during processing*
- [ ] User description parsed
- [ ] Key concepts extracted
- [ ] Ambiguities marked
- [ ] User scenarios defined
- [ ] Requirements generated
- [ ] Entities identified
- [ ] Review checklist passed
---

View File

@@ -1,36 +1,13 @@
---
description: "Task list template for feature implementation"
---
# Tasks: [FEATURE NAME] # Tasks: [FEATURE NAME]
**Input**: Design documents from `/specs/[###-feature-name]/` **Input**: Design documents from `/specs/[###-feature-name]/`
**Prerequisites**: plan.md (required), research.md, data-model.md, contracts/ **Prerequisites**: plan.md (required), research.md, data-model.md, contracts/
## Execution Flow (main) **Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification.
```
1. Load plan.md from feature directory
→ If not found: ERROR "No implementation plan found"
→ Extract: tech stack, libraries, structure
2. Load optional design documents:
→ data-model.md: Extract entities → model tasks
→ contracts/: Each file → contract test task
→ research.md: Extract decisions → setup tasks
3. Generate tasks by category:
→ Setup: project init, dependencies, linting
→ Tests: contract tests, integration tests
→ Core: models, services, CLI commands
→ Integration: DB, middleware, logging
→ Polish: unit tests, performance, docs
4. Apply task rules:
→ Different files = mark [P] for parallel
→ Same file = sequential (no [P])
→ Tests before implementation (TDD)
5. Number tasks sequentially (T001, T002...)
6. Generate dependency graph
7. Create parallel execution examples
8. Validate task completeness:
→ All contracts have tests?
→ All entities have models?
→ All endpoints implemented?
9. Return: SUCCESS (tasks ready for execution)
```
## Format: `[ID] [P?] Description` ## Format: `[ID] [P?] Description`
- **[P]**: Can run in parallel (different files, no dependencies) - **[P]**: Can run in parallel (different files, no dependencies)
@@ -42,19 +19,34 @@
- **Mobile**: `api/src/`, `ios/src/` or `android/src/` - **Mobile**: `api/src/`, `ios/src/` or `android/src/`
- Paths shown below assume single project - adjust based on plan.md structure - Paths shown below assume single project - adjust based on plan.md structure
<!--
============================================================================
IMPORTANT: The tasks below are SAMPLE TASKS for illustration purposes only.
The /tasks command MUST replace these with actual tasks based on:
- Feature requirements from plan.md
- Entities from data-model.md
- Endpoints from contracts/
- User stories from the feature spec
DO NOT keep these sample tasks in the generated tasks.md file.
============================================================================
-->
## Phase 3.1: Setup ## Phase 3.1: Setup
- [ ] T001 Create project structure per implementation plan - [ ] T001 Create project structure per implementation plan
- [ ] T002 Initialize [language] project with [framework] dependencies - [ ] T002 Initialize [language] project with [framework] dependencies
- [ ] T003 [P] Configure linting and formatting tools - [ ] T003 [P] Configure linting and formatting tools
## Phase 3.2: Tests First (TDD) ⚠️ MUST COMPLETE BEFORE 3.3 ## Phase 3.2: Tests First (TDD) - OPTIONAL ⚠️
**CRITICAL: These tests MUST be written and MUST FAIL before ANY implementation** **NOTE: This phase is only included if tests are explicitly requested**
**If included: These tests MUST be written and MUST FAIL before ANY implementation**
- [ ] T004 [P] Contract test POST /api/users in tests/contract/test_users_post.py - [ ] T004 [P] Contract test POST /api/users in tests/contract/test_users_post.py
- [ ] T005 [P] Contract test GET /api/users/{id} in tests/contract/test_users_get.py - [ ] T005 [P] Contract test GET /api/users/{id} in tests/contract/test_users_get.py
- [ ] T006 [P] Integration test user registration in tests/integration/test_registration.py - [ ] T006 [P] Integration test user registration in tests/integration/test_registration.py
- [ ] T007 [P] Integration test auth flow in tests/integration/test_auth.py - [ ] T007 [P] Integration test auth flow in tests/integration/test_auth.py
## Phase 3.3: Core Implementation (ONLY after tests are failing) ## Phase 3.3: Core Implementation
- [ ] T008 [P] User model in src/models/user.py - [ ] T008 [P] User model in src/models/user.py
- [ ] T009 [P] UserService CRUD in src/services/user_service.py - [ ] T009 [P] UserService CRUD in src/services/user_service.py
- [ ] T010 [P] CLI --create-user in src/cli/user_commands.py - [ ] T010 [P] CLI --create-user in src/cli/user_commands.py
@@ -70,11 +62,11 @@
- [ ] T018 CORS and security headers - [ ] T018 CORS and security headers
## Phase 3.5: Polish ## Phase 3.5: Polish
- [ ] T019 [P] Unit tests for validation in tests/unit/test_validation.py - [ ] T019 [P] Documentation updates in docs/
- [ ] T020 Performance tests (<200ms) - [ ] T020 Code cleanup and refactoring
- [ ] T021 [P] Update docs/api.md - [ ] T021 Performance optimization
- [ ] T022 Remove duplication - [ ] T022 [P] Unit tests (if requested) in tests/unit/
- [ ] T023 Run manual-testing.md - [ ] T023 Run quickstart.md validation
## Dependencies ## Dependencies
- Tests (T004-T007) before implementation (T008-T014) - Tests (T004-T007) before implementation (T008-T014)
@@ -97,31 +89,3 @@ Task: "Integration test auth in tests/integration/test_auth.py"
- Commit after each task - Commit after each task
- Avoid: vague tasks, same file conflicts - Avoid: vague tasks, same file conflicts
## Task Generation Rules
*Applied during main() execution*
1. **From Contracts**:
- Each contract file contract test task [P]
- Each endpoint implementation task
2. **From Data Model**:
- Each entity model creation task [P]
- Relationships service layer tasks
3. **From User Stories**:
- Each story integration test [P]
- Quickstart scenarios validation tasks
4. **Ordering**:
- Setup Tests Models Services Endpoints Polish
- Dependencies block parallel execution
## Validation Checklist
*GATE: Checked by main() before returning*
- [ ] All contracts have corresponding tests
- [ ] All entities have model tasks
- [ ] All tests come before implementation
- [ ] Parallel tasks truly independent
- [ ] Each task specifies exact file path
- [ ] No task modifies same file as another [P] task