diff --git a/apps/docs/command-reference.mdx b/apps/docs/command-reference.mdx
new file mode 100644
index 00000000..d8fbcfd8
--- /dev/null
+++ b/apps/docs/command-reference.mdx
@@ -0,0 +1,235 @@
+---
+title: "Task Master Commands"
+description: "A comprehensive reference of all available Task Master commands"
+---
+
+
+
+ ```bash
+ # Parse a PRD file and generate tasks
+ task-master parse-prd
+
+ # Limit the number of tasks generated
+ task-master parse-prd --num-tasks=10
+ ```
+
+
+
+ ```bash
+ # List all tasks
+ task-master list
+
+ # List tasks with a specific status
+ task-master list --status=
+
+ # List tasks with subtasks
+ task-master list --with-subtasks
+
+ # List tasks with a specific status and include subtasks
+ task-master list --status= --with-subtasks
+ ```
+
+
+
+ ```bash
+ # Show the next task to work on based on dependencies and status
+ task-master next
+ ```
+
+
+
+ ```bash
+ # Show details of a specific task
+ task-master show
+ # or
+ task-master show --id=
+
+ # View a specific subtask (e.g., subtask 2 of task 1)
+ task-master show 1.2
+ ```
+
+
+
+ ```bash
+ # Update tasks from a specific ID and provide context
+ task-master update --from= --prompt=""
+ ```
+
+
+
+ ```bash
+ # Update a single task by ID with new information
+ task-master update-task --id= --prompt=""
+
+ # Use research-backed updates with Perplexity AI
+ task-master update-task --id= --prompt="" --research
+ ```
+
+
+
+ ```bash
+ # Append additional information to a specific subtask
+ task-master update-subtask --id= --prompt=""
+
+ # Example: Add details about API rate limiting to subtask 2 of task 5
+ task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"
+
+ # Use research-backed updates with Perplexity AI
+ task-master update-subtask --id= --prompt="" --research
+ ```
+
+ Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
+
+
+
+ ```bash
+ # Generate individual task files from tasks.json
+ task-master generate
+ ```
+
+
+
+ ```bash
+ # Set status of a single task
+ task-master set-status --id= --status=
+
+ # Set status for multiple tasks
+ task-master set-status --id=1,2,3 --status=
+
+ # Set status for subtasks
+ task-master set-status --id=1.1,1.2 --status=
+ ```
+
+ When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.
+
+
+
+ ```bash
+ # Expand a specific task with subtasks
+ task-master expand --id= --num=
+
+ # Expand with additional context
+ task-master expand --id= --prompt=""
+
+ # Expand all pending tasks
+ task-master expand --all
+
+ # Force regeneration of subtasks for tasks that already have them
+ task-master expand --all --force
+
+ # Research-backed subtask generation for a specific task
+ task-master expand --id= --research
+
+ # Research-backed generation for all tasks
+ task-master expand --all --research
+ ```
+
+
+
+ ```bash
+ # Clear subtasks from a specific task
+ task-master clear-subtasks --id=
+
+ # Clear subtasks from multiple tasks
+ task-master clear-subtasks --id=1,2,3
+
+ # Clear subtasks from all tasks
+ task-master clear-subtasks --all
+ ```
+
+
+
+ ```bash
+ # Analyze complexity of all tasks
+ task-master analyze-complexity
+
+ # Save report to a custom location
+ task-master analyze-complexity --output=my-report.json
+
+ # Use a specific LLM model
+ task-master analyze-complexity --model=claude-3-opus-20240229
+
+ # Set a custom complexity threshold (1-10)
+ task-master analyze-complexity --threshold=6
+
+ # Use an alternative tasks file
+ task-master analyze-complexity --file=custom-tasks.json
+
+ # Use Perplexity AI for research-backed complexity analysis
+ task-master analyze-complexity --research
+ ```
+
+
+
+ ```bash
+ # Display the task complexity analysis report
+ task-master complexity-report
+
+ # View a report at a custom location
+ task-master complexity-report --file=my-report.json
+ ```
+
+
+
+ ```bash
+ # Add a dependency to a task
+ task-master add-dependency --id= --depends-on=
+
+ # Remove a dependency from a task
+ task-master remove-dependency --id= --depends-on=
+
+ # Validate dependencies without fixing them
+ task-master validate-dependencies
+
+ # Find and fix invalid dependencies automatically
+ task-master fix-dependencies
+ ```
+
+
+
+ ```bash
+ # Add a new task using AI
+ task-master add-task --prompt="Description of the new task"
+
+ # Add a task with dependencies
+ task-master add-task --prompt="Description" --dependencies=1,2,3
+
+ # Add a task with priority
+ task-master add-task --prompt="Description" --priority=high
+ ```
+
+
+
+ ```bash
+ # Initialize a new project with Task Master structure
+ task-master init
+ ```
+
+
+
+ ```bash
+ # Start autonomous TDD workflow for a task
+ task-master autopilot start
+
+ # Get next action with context
+ task-master autopilot next
+
+ # Complete phase with test results
+ task-master autopilot complete --results '{"total":N,"passed":N,"failed":N}'
+
+ # Commit changes
+ task-master autopilot commit
+
+ # Check workflow status
+ task-master autopilot status
+
+ # Resume interrupted workflow
+ task-master autopilot resume
+
+ # Abort workflow
+ task-master autopilot abort
+ ```
+
+ The TDD workflow enforces RED → GREEN → COMMIT cycles for each subtask. See [AI Agent Integration](./ai-agent-integration.mdx) for details.
+
+
diff --git a/apps/docs/configuration.mdx b/apps/docs/configuration.mdx
new file mode 100644
index 00000000..caa51082
--- /dev/null
+++ b/apps/docs/configuration.mdx
@@ -0,0 +1,94 @@
+---
+title: "Configuration"
+description: "Configure Task Master through environment variables in a .env file"
+---
+
+## Required Configuration
+
+
+ Task Master requires an Anthropic API key to function. Add this to your `.env` file:
+
+ ```bash
+ ANTHROPIC_API_KEY=sk-ant-api03-your-api-key
+ ```
+
+ You can obtain an API key from the [Anthropic Console](https://console.anthropic.com/).
+
+
+## Optional Configuration
+
+| Variable | Default Value | Description | Example |
+| --- | --- | --- | --- |
+| `MODEL` | `"claude-3-7-sonnet-20250219"` | Claude model to use | `MODEL=claude-3-opus-20240229` |
+| `MAX_TOKENS` | `"4000"` | Maximum tokens for responses | `MAX_TOKENS=8000` |
+| `TEMPERATURE` | `"0.7"` | Temperature for model responses | `TEMPERATURE=0.5` |
+| `DEBUG` | `"false"` | Enable debug logging | `DEBUG=true` |
+| `LOG_LEVEL` | `"info"` | Console output level | `LOG_LEVEL=debug` |
+| `DEFAULT_SUBTASKS` | `"3"` | Default subtask count | `DEFAULT_SUBTASKS=5` |
+| `DEFAULT_PRIORITY` | `"medium"` | Default priority | `DEFAULT_PRIORITY=high` |
+| `PROJECT_NAME` | `"MCP SaaS MVP"` | Project name in metadata | `PROJECT_NAME=My Awesome Project` |
+| `PROJECT_VERSION` | `"1.0.0"` | Version in metadata | `PROJECT_VERSION=2.1.0` |
+| `PERPLEXITY_API_KEY` | - | For research-backed features | `PERPLEXITY_API_KEY=pplx-...` |
+| `PERPLEXITY_MODEL` | `"sonar-medium-online"` | Perplexity model | `PERPLEXITY_MODEL=sonar-large-online` |
+
+## TDD Workflow Configuration
+
+Additional options for autonomous TDD workflow:
+
+| Variable | Default | Description |
+| --- | --- | --- |
+| `TM_MAX_ATTEMPTS` | `3` | Max attempts per subtask before marking blocked |
+| `TM_AUTO_COMMIT` | `true` | Auto-commit after GREEN phase |
+| `TM_PROJECT_ROOT` | Current dir | Default project root |
+
+## Example .env File
+
+```
+# Required
+ANTHROPIC_API_KEY=sk-ant-api03-your-api-key
+
+# Optional - Claude Configuration
+MODEL=claude-3-7-sonnet-20250219
+MAX_TOKENS=4000
+TEMPERATURE=0.7
+
+# Optional - Perplexity API for Research
+PERPLEXITY_API_KEY=pplx-your-api-key
+PERPLEXITY_MODEL=sonar-medium-online
+
+# Optional - Project Info
+PROJECT_NAME=My Project
+PROJECT_VERSION=1.0.0
+
+# Optional - Application Configuration
+DEFAULT_SUBTASKS=3
+DEFAULT_PRIORITY=medium
+DEBUG=false
+LOG_LEVEL=info
+
+# TDD Workflow
+TM_MAX_ATTEMPTS=3
+TM_AUTO_COMMIT=true
+```
+
+## Troubleshooting
+
+### If `task-master init` doesn't respond:
+
+Try running it with Node directly:
+
+```bash
+node node_modules/claude-task-master/scripts/init.js
+```
+
+Or clone the repository and run:
+
+```bash
+git clone https://github.com/eyaltoledano/claude-task-master.git
+cd claude-task-master
+node scripts/init.js
+```
+
+
+For advanced configuration options and detailed customization, see our [Advanced Configuration Guide] page.
+
diff --git a/apps/docs/docs.json b/apps/docs/docs.json
index ee1f033d..25fefec1 100644
--- a/apps/docs/docs.json
+++ b/apps/docs/docs.json
@@ -57,14 +57,7 @@
"group": "TDD Workflow (Autopilot)",
"pages": [
"tdd-workflow/quickstart",
- "tdd-workflow/ai-agent-integration",
- {
- "group": "Templates & Examples",
- "pages": [
- "tdd-workflow/templates/claude-template",
- "tdd-workflow/templates/example-prompts"
- ]
- }
+ "tdd-workflow/ai-agent-integration"
]
}
]
diff --git a/apps/docs/tdd-workflow/ai-agent-integration.mdx b/apps/docs/tdd-workflow/ai-agent-integration.mdx
index 93735aa1..faebb7f9 100644
--- a/apps/docs/tdd-workflow/ai-agent-integration.mdx
+++ b/apps/docs/tdd-workflow/ai-agent-integration.mdx
@@ -930,15 +930,84 @@ tm init
---
+## Working with AI Agents
+
+Example prompts for AI agents (Claude Code, Cursor, etc.) to use the TDD workflow.
+
+### Starting a Task
+
+```
+I want to implement Task 7 using TDD workflow. Please:
+1. Start the autopilot workflow
+2. Show me the first subtask to implement
+3. Begin the RED-GREEN-COMMIT cycle
+```
+
+### RED Phase - Writing Failing Tests
+
+```
+We're in RED phase for subtask "{SUBTASK_TITLE}". Please:
+1. Read the subtask requirements
+2. Write a test that validates the behavior
+3. The test MUST fail because the feature doesn't exist yet
+4. Run the tests and report results to complete the RED phase
+```
+
+### GREEN Phase - Implementing
+
+```
+We're in GREEN phase. The test is failing with: {ERROR_MESSAGE}
+
+Please:
+1. Implement the minimal code to make this test pass
+2. Don't over-engineer or add untested features
+3. Run tests and report results to complete the GREEN phase
+```
+
+### Handling Errors
+
+```
+The RED phase validation failed - no test failures detected.
+
+Please:
+1. Review the test I just wrote
+2. Identify why it's not actually testing new behavior
+3. Rewrite the test to properly fail until the feature is implemented
+```
+
+```
+GREEN phase validation failed - {N} tests still failing.
+
+Please:
+1. Review the failing test output
+2. Fix the implementation to pass all tests
+3. Try completing the GREEN phase again
+```
+
+### Checking Progress
+
+```
+What's the current state of the workflow? Please show:
+- Which subtask we're on
+- Current TDD phase (RED/GREEN/COMMIT)
+- Progress percentage
+- Next action required
+```
+
+### Resuming Work
+
+```
+I have an in-progress workflow. Please:
+1. Resume the autopilot workflow
+2. Show current status
+3. Continue from where we left off
+```
+
+---
+
## Additional Resources
-- [Command Reference](./command-reference.mdx) - Complete CLI command documentation
-- [MCP Provider Guide](./mcp-provider-guide.mdx) - MCP integration details
-- [Task Structure](./task-structure.mdx) - Understanding TaskMaster's task system
-- [Configuration](./configuration.mdx) - Project configuration options
-
-## Support
-
-For issues, questions, or contributions:
-- GitHub Issues: https://github.com/eyaltoledano/claude-task-master/issues
-- Documentation: https://docs.task-master.dev
+- [Command Reference](/command-reference) - Complete CLI command documentation
+- [MCP Provider Guide](/capabilities/mcp) - MCP integration details
+- [Task Structure](/capabilities/task-structure) - Understanding TaskMaster's task system
+- [Configuration](/configuration) - Project configuration options
diff --git a/apps/docs/tdd-workflow/quickstart.mdx b/apps/docs/tdd-workflow/quickstart.mdx
index f627eaca..75fc210c 100644
--- a/apps/docs/tdd-workflow/quickstart.mdx
+++ b/apps/docs/tdd-workflow/quickstart.mdx
@@ -297,9 +297,7 @@ tm autopilot commit # Save progress
## Next Steps
- Read [AI Agent Integration Guide](./ai-agent-integration.mdx) for complete documentation
-- See [Example Prompts](./templates/example-prompts.mdx) for AI agent patterns
-- Check [Command Reference](./command-reference.mdx) for all options
-- Review [CLAUDE.md Template](./templates/CLAUDE.md.template) for AI integration
+- Check [Command Reference](/command-reference) for all options
## Tips
diff --git a/apps/docs/tdd-workflow/templates/claude-template.mdx b/apps/docs/tdd-workflow/templates/claude-template.mdx
deleted file mode 100644
index c7a59fb1..00000000
--- a/apps/docs/tdd-workflow/templates/claude-template.mdx
+++ /dev/null
@@ -1,388 +0,0 @@
----
-title: "CLAUDE.md Template"
-description: "Ready-to-use CLAUDE.md template for AI agent integration with TDD workflow"
----
-
-This file provides integration instructions for AI agents (like Claude Code) to work with TaskMaster's autonomous TDD workflow system.
-
-## Quick Reference
-
-```bash
-# Start workflow
-tm autopilot start
-
-# Get next action
-tm autopilot next --json
-
-# Complete phase with test results
-tm autopilot complete --results '{"total":N,"passed":N,"failed":N,"skipped":N}'
-
-# Commit changes
-tm autopilot commit
-
-# Check status
-tm autopilot status --json
-
-# Abort workflow
-tm autopilot abort
-```
-
-## Integration Pattern
-
-### 1. Start Task
-
-Before implementing a task:
-
-```bash
-tm autopilot start {TASK_ID}
-```
-
-This creates a workflow branch and initializes the TDD state machine.
-
-### 2. Follow TDD Cycle
-
-For each subtask, repeat this cycle:
-
-#### RED Phase - Write Failing Test
-
-1. Check next action:
-```bash
-tm autopilot next --json
-```
-
-2. Write a test that **fails** because the feature doesn't exist yet
-
-3. Run tests and report results:
-```bash
-npm test # or appropriate test command
-tm autopilot complete --results '{TEST_RESULTS_JSON}'
-```
-
-**Important:** RED phase MUST have at least one failing test.
-
-#### GREEN Phase - Implement Feature
-
-1. Check next action confirms GREEN phase
-
-2. Write minimal implementation to make tests pass
-
-3. Run tests and report results:
-```bash
-npm test
-tm autopilot complete --results '{TEST_RESULTS_JSON}'
-```
-
-**Important:** GREEN phase MUST have all tests passing (failed === 0).
-
-#### COMMIT Phase - Save Progress
-
-1. Review changes:
-```bash
-git status
-git diff
-```
-
-2. Commit (auto-generates message with metadata):
-```bash
-tm autopilot commit
-```
-
-3. Workflow automatically advances to next subtask
-
-### 3. Monitor Progress
-
-```bash
-# Check overall progress
-tm autopilot status --json
-
-# See what's next
-tm autopilot next --json
-```
-
-### 4. Handle Completion
-
-When all subtasks are done:
-- Workflow enters COMPLETE phase
-- Branch remains for review/merge
-- State can be cleaned up
-
-## Example Session
-
-```bash
-# Start task with 3 subtasks
-$ tm autopilot start 7
-✓ Workflow started for task 7
-✓ Created branch: task-7
-✓ Phase: RED
-✓ Next: generate_test for subtask 7.1
-
-# Write failing test for subtask 7.1
-$ cat > tests/feature.test.ts
-# ... write test ...
-
-$ npm test
-# 1 test, 0 passed, 1 failed
-
-$ tm autopilot complete --results '{"total":1,"passed":0,"failed":1,"skipped":0}'
-✓ RED phase complete
-✓ Phase: GREEN
-✓ Next: implement_code
-
-# Write implementation
-$ cat > src/feature.ts
-# ... write code ...
-
-$ npm test
-# 1 test, 1 passed, 0 failed
-
-$ tm autopilot complete --results '{"total":1,"passed":1,"failed":0,"skipped":0}'
-✓ GREEN phase complete
-✓ Phase: COMMIT
-✓ Next: commit_changes
-
-$ tm autopilot commit
-✓ Created commit: abc123
-✓ Message: feat(feature): implement feature (Task 7.1)
-✓ Advanced to subtask 7.2
-✓ Phase: RED
-✓ Next: generate_test
-
-# Repeat for subtasks 7.2 and 7.3...
-```
-
-## Test Result Format
-
-Always provide test results in this JSON format:
-
-```json
-{
- "total": 10, // Total number of tests
- "passed": 8, // Number of passing tests
- "failed": 2, // Number of failing tests
- "skipped": 0 // Number of skipped tests (optional)
-}
-```
-
-### Parsing Test Output
-
-Common test frameworks output that needs parsing:
-
-**Vitest:**
-```
-Test Files 1 passed (1)
- Tests 10 passed | 2 failed (12)
-```
-→ `{"total":12,"passed":10,"failed":2,"skipped":0}`
-
-**Jest:**
-```
-Tests: 2 failed, 10 passed, 12 total
-```
-→ `{"total":12,"passed":10,"failed":2,"skipped":0}`
-
-**Mocha:**
-```
- 12 passing
- 2 failing
-```
-→ `{"total":14,"passed":12,"failed":2,"skipped":0}`
-
-## Error Handling
-
-### Common Issues
-
-**1. RED Phase Won't Complete**
-- Error: "RED phase validation failed: no test failures"
-- Solution: Your test isn't actually testing new behavior. Write a test that fails.
-
-**2. GREEN Phase Won't Complete**
-- Error: "GREEN phase validation failed: tests still failing"
-- Solution: Implementation incomplete. Debug and fix failing tests.
-
-**3. Workflow Already Exists**
-- Error: "Workflow already in progress"
-- Solution: Run `tm autopilot resume` or `tm autopilot abort --force` then restart
-
-**4. No Staged Changes**
-- Error: "No staged changes to commit"
-- Solution: Ensure you've actually created/modified files
-
-### Recovery
-
-If workflow gets stuck:
-
-```bash
-# Check current state
-tm autopilot status --json
-
-# If corrupted, abort and restart
-tm autopilot abort --force
-tm autopilot start {TASK_ID}
-```
-
-## Best Practices
-
-### 1. One Feature Per Test Cycle
-
-Each RED-GREEN-COMMIT cycle should implement exactly one small feature or behavior.
-
-**Good:**
-- RED: Test that `getUser()` returns user object
-- GREEN: Implement `getUser()` to return user
-- COMMIT: One commit for getUser feature
-
-**Bad:**
-- RED: Test multiple features at once
-- GREEN: Implement entire module
-- COMMIT: Massive commit with unrelated changes
-
-### 2. Meaningful Test Names
-
-Tests should clearly describe what they're validating:
-
-```typescript
-// Good
-it('should return 404 when user not found', async () => {
- const result = await getUser('nonexistent');
- expect(result.status).toBe(404);
-});
-
-// Bad
-it('test 1', () => {
- // what does this test?
-});
-```
-
-### 3. Minimal Implementation
-
-In GREEN phase, write just enough code to pass the test:
-
-```typescript
-// Good - minimal implementation
-function getUser(id: string) {
- if (id === 'nonexistent') {
- return { status: 404 };
- }
- return { status: 200, data: users[id] };
-}
-
-// Bad - over-engineering
-function getUser(id: string) {
- // Adds caching, validation, logging, etc. that isn't tested
-}
-```
-
-### 4. Keep Tests Fast
-
-Fast tests mean fast feedback:
-- Avoid network calls (use mocks)
-- Avoid file system operations (use in-memory)
-- Avoid waiting/sleeping
-
-### 5. Commit Message Quality
-
-Let TaskMaster generate commit messages - they include:
-- Conventional commit format (feat, fix, refactor, etc.)
-- Subtask context and ID
-- Workflow metadata
-- Co-authorship attribution
-
-## MCP Integration (Advanced)
-
-For programmatic integration, use MCP tools instead of CLI:
-
-```typescript
-import { MCPClient } from '@modelcontextprotocol/sdk';
-
-const client = new MCPClient();
-
-// Start workflow
-const start = await client.call('autopilot_start', {
- taskId: '7',
- projectRoot: '/path/to/project'
-});
-
-// Get next action
-const next = await client.call('autopilot_next', {
- projectRoot: '/path/to/project'
-});
-
-// Complete phase
-const complete = await client.call('autopilot_complete_phase', {
- projectRoot: '/path/to/project',
- testResults: { total: 1, passed: 0, failed: 1, skipped: 0 }
-});
-
-// Commit
-const commit = await client.call('autopilot_commit', {
- projectRoot: '/path/to/project'
-});
-```
-
-See [AI Agent Integration Guide](../ai-agent-integration.mdx) for complete MCP documentation.
-
-## Workflow State Files
-
-TaskMaster persists workflow state to `.taskmaster/workflow-state.json`:
-
-```json
-{
- "phase": "SUBTASK_LOOP",
- "context": {
- "taskId": "7",
- "subtasks": [...],
- "currentSubtaskIndex": 0,
- "currentTDDPhase": "RED",
- "branchName": "task-7",
- "errors": [],
- "metadata": {
- "startedAt": "2025-01-10T..."
- }
- }
-}
-```
-
-**Important:** Never manually edit this file. Use CLI/MCP tools only.
-
-## Project Structure
-
-```
-project/
-├── .taskmaster/
-│ ├── workflow-state.json # Current workflow state
-│ ├── tasks/
-│ │ └── tasks.json # Task definitions
-│ └── docs/
-│ └── prd.txt # Product requirements
-├── src/ # Implementation files
-├── tests/ # Test files
-└── package.json
-```
-
-## Additional Resources
-
-- [AI Agent Integration Guide](../ai-agent-integration.mdx) - Complete integration documentation
-- [Command Reference](../command-reference.mdx) - All CLI commands
-- [Task Structure](../task-structure.mdx) - Understanding tasks and subtasks
-- [MCP Provider Guide](../mcp-provider-guide.mdx) - MCP integration details
-
-## Troubleshooting
-
-**Q: Workflow won't start**
-A: Check that task has subtasks (`tm show `) and git working tree is clean
-
-**Q: Can't complete RED phase**
-A: Verify at least one test is actually failing (not skipped, not passing)
-
-**Q: Can't complete GREEN phase**
-A: Verify ALL tests pass (zero failures)
-
-**Q: Commit fails**
-A: Check that you've made changes and they're staged (or stageable)
-
-**Q: State seems wrong**
-A: Check `.taskmaster/workflow-state.json` or run `tm autopilot status`
-
----
-
-**For detailed documentation, see:** [AI Agent Integration Guide](../ai-agent-integration.mdx)
diff --git a/apps/docs/tdd-workflow/templates/example-prompts.mdx b/apps/docs/tdd-workflow/templates/example-prompts.mdx
deleted file mode 100644
index 6d65eaa4..00000000
--- a/apps/docs/tdd-workflow/templates/example-prompts.mdx
+++ /dev/null
@@ -1,478 +0,0 @@
----
-title: "Example Prompts"
-description: "Collection of effective prompts for AI agents working with TaskMaster's TDD workflow system"
----
-
-Collection of effective prompts for AI agents working with TaskMaster's TDD workflow system.
-
-## Getting Started Prompts
-
-### Start a Task
-
-```
-I want to implement Task 7 using TDD workflow. Please:
-1. Start the autopilot workflow
-2. Show me the first subtask to implement
-3. Begin the RED-GREEN-COMMIT cycle
-```
-
-### Resume Work
-
-```
-I have an in-progress workflow. Please:
-1. Resume the autopilot workflow
-2. Show current status and progress
-3. Continue from where we left off
-```
-
-### Understanding Current State
-
-```
-What's the current state of the workflow? Please show:
-- Which subtask we're on
-- Current TDD phase (RED/GREEN/COMMIT)
-- Progress percentage
-- Next action required
-```
-
-## Test Generation Prompts
-
-### Basic Test Generation
-
-```
-We're in RED phase for subtask "{SUBTASK_TITLE}". Please:
-1. Read the subtask requirements
-2. Write a comprehensive test that validates the behavior
-3. The test MUST fail because the feature doesn't exist yet
-4. Use the project's testing framework (vitest/jest/etc)
-5. Follow the project's test file conventions
-```
-
-### Test for Specific Feature
-
-```
-For subtask: "Implement user authentication endpoint"
-
-Write a failing test that:
-1. Tests POST /api/auth/login
-2. Validates request body (email, password)
-3. Checks response format and status codes
-4. Uses proper mocking for database calls
-5. Follows security best practices
-```
-
-### Edge Case Testing
-
-```
-The basic happy path test is passing. Now write additional tests for:
-1. Error cases (invalid input, missing fields)
-2. Edge cases (empty strings, null values, etc.)
-3. Security concerns (SQL injection, XSS)
-4. Performance expectations (timeout, rate limits)
-
-Each test should initially fail.
-```
-
-### Test Refactoring
-
-```
-Our tests are passing but could be improved. Please:
-1. Review existing tests for duplication
-2. Extract common setup into beforeEach/fixtures
-3. Improve test descriptions for clarity
-4. Add missing edge cases
-5. Ensure all new tests fail first (RED phase)
-```
-
-## Implementation Prompts
-
-### Basic Implementation
-
-```
-We're in GREEN phase. The test is failing with: {ERROR_MESSAGE}
-
-Please:
-1. Implement the minimal code to make this test pass
-2. Don't over-engineer or add features not tested
-3. Follow the project's code style and patterns
-4. Ensure the implementation is clean and readable
-```
-
-### Implementation with Constraints
-
-```
-Implement the feature to pass the test, but:
-- Use TypeScript with strict type checking
-- Follow SOLID principles
-- Keep functions under 20 lines
-- Use dependency injection where appropriate
-- Add JSDoc comments for public APIs
-```
-
-### Fix Failing Tests
-
-```
-GREEN phase validation failed - {N} tests still failing.
-
-Please:
-1. Review the failing test output
-2. Identify what's not working
-3. Fix the implementation to pass all tests
-4. Don't modify tests to make them pass
-5. Explain what was wrong
-```
-
-### Refactor Implementation
-
-```
-Tests are passing but code quality needs improvement:
-1. Extract repeated logic into functions
-2. Improve variable names
-3. Add error handling
-4. Optimize performance if needed
-5. Ensure tests still pass after refactoring
-```
-
-## Debugging Prompts
-
-### Test Output Parsing
-
-```
-Here's the test output:
-{PASTE_TEST_OUTPUT}
-
-Please parse this into the required JSON format:
-{
- "total": N,
- "passed": N,
- "failed": N,
- "skipped": N
-}
-
-Then complete the current phase.
-```
-
-### Workflow Stuck
-
-```
-The workflow seems stuck. Please:
-1. Check the current workflow status
-2. Identify the issue
-3. If corrupted, abort and restart
-4. Explain what went wrong and how to prevent it
-```
-
-### Phase Validation Failing
-
-```
-I'm getting: "RED phase validation failed: no test failures"
-
-Please:
-1. Review the test I just wrote
-2. Identify why it's not actually testing new behavior
-3. Rewrite the test to properly fail
-4. Explain what makes a good failing test
-```
-
-### Git Issues
-
-```
-Getting git errors when trying to start workflow:
-{PASTE_ERROR}
-
-Please:
-1. Diagnose the git issue
-2. Provide commands to fix it
-3. Restart the workflow once fixed
-```
-
-## Advanced Patterns
-
-### Parallel Test Generation
-
-```
-We have 3 subtasks to implement. For efficiency:
-1. Read all 3 subtask descriptions
-2. Plan the test structure for each
-3. Identify shared test utilities needed
-4. Generate tests for subtask 1 (they should fail)
-5. Once we complete 1, move to 2, then 3
-```
-
-### Integration Test Strategy
-
-```
-This subtask requires integration testing. Please:
-1. Set up test database/environment
-2. Write integration tests that exercise the full stack
-3. Use proper cleanup in afterEach
-4. Mock external services (APIs, third-party)
-5. Ensure tests are deterministic and fast
-```
-
-### Test-Driven Refactoring
-
-```
-We need to refactor {MODULE_NAME} but keep behavior unchanged:
-1. First, write comprehensive tests for current behavior
-2. Ensure all tests pass (document current state)
-3. Refactor the implementation
-4. Verify all tests still pass
-5. Commit the refactoring
-```
-
-### Complex Feature Implementation
-
-```
-Subtask: "{COMPLEX_SUBTASK}"
-
-This is complex. Let's break it down:
-1. Identify the core functionality to test
-2. Write tests for the simplest version
-3. Implement minimal working code
-4. Commit that cycle
-5. Then iteratively add more tests for additional features
-6. Each iteration is a RED-GREEN-COMMIT cycle
-```
-
-### Performance Testing
-
-```
-Write performance tests for {FEATURE}:
-1. Measure baseline performance (current state)
-2. Write test that fails if operation takes > {N}ms
-3. Implement optimizations to pass the test
-4. Document performance improvements
-5. Consider edge cases (large inputs, concurrent requests)
-```
-
-### Security Testing
-
-```
-Write security-focused tests for {FEATURE}:
-1. Test input validation (injection attacks)
-2. Test authentication/authorization
-3. Test data sanitization
-4. Test rate limiting
-5. Document security assumptions
-
-Each test should initially fail and demonstrate the vulnerability.
-```
-
-## Workflow Automation Patterns
-
-### Full Autonomous Mode
-
-```
-Implement Task {TASK_ID} completely autonomously:
-1. Start the workflow
-2. For each subtask:
- a. Read requirements
- b. Write failing tests
- c. Implement to pass tests
- d. Commit changes
-3. Continue until all subtasks complete
-4. Report final status
-
-Rules:
-- Never skip the RED phase
-- Always verify tests fail first
-- Implement minimal working code
-- Commit after each subtask
-- Handle errors gracefully with retries
-```
-
-### Supervised Mode
-
-```
-Work on Task {TASK_ID} with human oversight:
-1. Start workflow and show plan
-2. For each subtask:
- a. Show test plan, wait for approval
- b. Write and run tests, show results
- c. Show implementation plan, wait for approval
- d. Implement and verify
- e. Show commit message, wait for approval
-3. Request feedback between subtasks
-```
-
-### Review Mode
-
-```
-Review the current workflow state:
-1. Show all completed subtasks and their commits
-2. Identify remaining subtasks
-3. Check test coverage
-4. Verify git history is clean
-5. Recommend next steps
-```
-
-## Error Recovery Patterns
-
-### Retry with Learning
-
-```
-The {PHASE} phase failed {N} times. Please:
-1. Review all previous attempts
-2. Identify the pattern of failures
-3. Propose a different approach
-4. Explain why this approach should work
-5. Implement with the new approach
-```
-
-### Escalate to Human
-
-```
-After {MAX_ATTEMPTS} attempts, unable to complete {SUBTASK}.
-
-Please:
-1. Document what was tried
-2. Explain what's not working
-3. Provide relevant code and test output
-4. Suggest where human expertise is needed
-5. Save current state for manual intervention
-```
-
-### Reset and Restart
-
-```
-Workflow is in an inconsistent state. Please:
-1. Save any valuable work
-2. Abort the current workflow
-3. Explain what went wrong
-4. Propose a better approach
-5. Restart with improved strategy
-```
-
-## Example Complete Session
-
-```
-I need to implement Task 7 which has 5 subtasks. Please work autonomously with these preferences:
-
-1. Testing Framework: vitest
-2. Code Style: TypeScript strict mode, functional style preferred
-3. Commit Style: Conventional commits with detailed messages
-4. Review: Show me status after each subtask completion
-
-Workflow:
-1. Start autopilot for task 7
-2. For each subtask (7.1 through 7.5):
- - RED: Write comprehensive failing tests
- - GREEN: Implement minimal code to pass
- - COMMIT: Auto-generate commit and advance
-3. Final: Show summary of all commits and changes
-
-Error Handling:
-- If phase validation fails, explain why and retry
-- If tests are flaky, identify and fix
-- If stuck after 3 attempts, ask for help
-
-Let's begin!
-```
-
-## Tips for Effective Prompts
-
-### 1. Be Specific About Context
-
-**Good:**
-```
-For the UserAuthenticationService in src/services/auth.ts,
-write tests for the login method using vitest.
-```
-
-**Bad:**
-```
-Write tests for authentication.
-```
-
-### 2. Specify Success Criteria
-
-**Good:**
-```
-Tests should cover:
-1. Successful login with valid credentials
-2. Failed login with invalid password
-3. Account lockout after 5 failures
-4. Rate limiting (max 10 attempts/minute)
-```
-
-**Bad:**
-```
-Test login functionality.
-```
-
-### 3. Request Explanations
-
-**Good:**
-```
-Implement the feature and explain:
-1. Why this approach was chosen
-2. What edge cases are handled
-3. What assumptions were made
-```
-
-**Bad:**
-```
-Just implement it.
-```
-
-### 4. Include Project Context
-
-**Good:**
-```
-Following the existing pattern in src/models/,
-create a User model that:
-- Extends BaseModel
-- Uses Zod for validation
-- Includes proper TypeScript types
-```
-
-**Bad:**
-```
-Create a user model.
-```
-
-## Troubleshooting Prompts
-
-### When Tests Won't Fail
-
-```
-My test is passing when it should fail. Please:
-1. Review the test code
-2. Identify why it's passing
-3. Check if implementation already exists
-4. Rewrite test to actually test new behavior
-5. Verify it fails this time
-```
-
-### When Implementation is Incomplete
-
-```
-Tests are still failing after implementation. Please:
-1. Show me the failing test output
-2. Review the implementation
-3. Identify what's missing
-4. Fix the implementation
-5. Verify all tests pass
-```
-
-### When Workflow Won't Advance
-
-```
-Can't complete the phase. Getting error: {ERROR}
-
-Please:
-1. Check workflow status
-2. Verify test results format is correct
-3. Check if phase validation requirements are met
-4. If needed, show me how to manually fix state
-```
-
----
-
-## Additional Resources
-
-- [AI Agent Integration Guide](../ai-agent-integration.mdx)
-- [CLAUDE.md Template](./CLAUDE.md.template)
-- [Command Reference](../command-reference.mdx)
-- [Testing Best Practices](./testing-best-practices.mdx)
diff --git a/packages/tm-core/src/auth/auth-manager.ts b/packages/tm-core/src/auth/auth-manager.ts
index 7610a95b..f525b36f 100644
--- a/packages/tm-core/src/auth/auth-manager.ts
+++ b/packages/tm-core/src/auth/auth-manager.ts
@@ -31,7 +31,6 @@ export class AuthManager {
private supabaseClient: SupabaseAuthClient;
private organizationService?: OrganizationService;
private readonly logger = getLogger('AuthManager');
- private refreshPromise: Promise | null = null;
private constructor(config?: Partial) {
this.credentialStore = CredentialStore.getInstance(config);